aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/.gitignore17
-rw-r--r--lib/842/842_debugfs.h5
-rw-r--r--lib/Kconfig182
-rw-r--r--lib/Kconfig.debug2028
-rw-r--r--lib/Kconfig.kasan264
-rw-r--r--lib/Kconfig.kcsan257
-rw-r--r--lib/Kconfig.kfence99
-rw-r--r--lib/Kconfig.kgdb48
-rw-r--r--lib/Kconfig.kmsan63
-rw-r--r--lib/Kconfig.ubsan164
-rw-r--r--lib/Makefile225
-rw-r--r--lib/argv_split.c4
-rw-r--r--lib/asn1_decoder.c4
-rw-r--r--lib/asn1_encoder.c452
-rw-r--r--lib/assoc_array.c32
-rw-r--r--lib/atomic64.c52
-rw-r--r--lib/audit.c14
-rw-r--r--lib/base64.c103
-rw-r--r--lib/bch.c122
-rw-r--r--lib/bitfield_kunit.c (renamed from lib/test_bitfield.c)90
-rw-r--r--lib/bitmap-str.c510
-rw-r--r--lib/bitmap.c834
-rw-r--r--lib/bootconfig-data.S10
-rw-r--r--lib/bootconfig.c985
-rw-r--r--lib/bsearch.c22
-rw-r--r--lib/btree.c31
-rw-r--r--lib/bug.c95
-rw-r--r--lib/buildid.c191
-rw-r--r--lib/bust_spinlocks.c3
-rw-r--r--lib/checksum.c31
-rw-r--r--lib/checksum_kunit.c641
-rw-r--r--lib/closure.c211
-rw-r--r--lib/clz_ctz.c32
-rw-r--r--lib/cmdline.c58
-rw-r--r--lib/cmdline_kunit.c156
-rw-r--r--lib/compat_audit.c15
-rw-r--r--lib/cpu_rmap.c59
-rw-r--r--lib/cpumask.c200
-rw-r--r--lib/cpumask_kunit.c155
-rw-r--r--lib/crc-ccitt.c55
-rw-r--r--lib/crc-itu-t.c2
-rw-r--r--lib/crc-t10dif.c75
-rw-r--r--lib/crc32.c18
-rw-r--r--lib/crc32test.c6
-rw-r--r--lib/crc64-rocksoft.c126
-rw-r--r--lib/crc64.c33
-rw-r--r--lib/crc7.c2
-rw-r--r--lib/crc8.c2
-rw-r--r--lib/crypto/Kconfig141
-rw-r--r--lib/crypto/Makefile57
-rw-r--r--lib/crypto/aes.c356
-rw-r--r--lib/crypto/aesgcm.c740
-rw-r--r--lib/crypto/arc4.c74
-rw-r--r--lib/crypto/blake2s-generic.c110
-rw-r--r--lib/crypto/blake2s-selftest.c651
-rw-r--r--lib/crypto/blake2s.c71
-rw-r--r--lib/crypto/chacha.c (renamed from lib/chacha.c)25
-rw-r--r--lib/crypto/chacha20poly1305-selftest.c9082
-rw-r--r--lib/crypto/chacha20poly1305.c373
-rw-r--r--lib/crypto/curve25519-fiat32.c864
-rw-r--r--lib/crypto/curve25519-generic.c24
-rw-r--r--lib/crypto/curve25519-hacl64.c786
-rw-r--r--lib/crypto/curve25519-selftest.c1321
-rw-r--r--lib/crypto/curve25519.c33
-rw-r--r--lib/crypto/des.c902
-rw-r--r--lib/crypto/gf128mul.c436
-rw-r--r--lib/crypto/libchacha.c35
-rw-r--r--lib/crypto/memneq.c173
-rw-r--r--lib/crypto/mpi/Makefile (renamed from lib/mpi/Makefile)7
-rw-r--r--lib/crypto/mpi/ec.c1509
-rw-r--r--lib/crypto/mpi/generic_mpih-add1.c (renamed from lib/mpi/generic_mpih-add1.c)0
-rw-r--r--lib/crypto/mpi/generic_mpih-lshift.c (renamed from lib/mpi/generic_mpih-lshift.c)0
-rw-r--r--lib/crypto/mpi/generic_mpih-mul1.c (renamed from lib/mpi/generic_mpih-mul1.c)0
-rw-r--r--lib/crypto/mpi/generic_mpih-mul2.c (renamed from lib/mpi/generic_mpih-mul2.c)0
-rw-r--r--lib/crypto/mpi/generic_mpih-mul3.c (renamed from lib/mpi/generic_mpih-mul3.c)0
-rw-r--r--lib/crypto/mpi/generic_mpih-rshift.c (renamed from lib/mpi/generic_mpih-rshift.c)0
-rw-r--r--lib/crypto/mpi/generic_mpih-sub1.c (renamed from lib/mpi/generic_mpih-sub1.c)0
-rw-r--r--lib/crypto/mpi/longlong.h (renamed from lib/mpi/longlong.h)92
-rw-r--r--lib/crypto/mpi/mpi-add.c155
-rw-r--r--lib/crypto/mpi/mpi-bit.c308
-rw-r--r--lib/crypto/mpi/mpi-cmp.c (renamed from lib/mpi/mpi-cmp.c)54
-rw-r--r--lib/crypto/mpi/mpi-div.c234
-rw-r--r--lib/crypto/mpi/mpi-inline.h (renamed from lib/mpi/mpi-inline.h)0
-rw-r--r--lib/crypto/mpi/mpi-internal.h (renamed from lib/mpi/mpi-internal.h)53
-rw-r--r--lib/crypto/mpi/mpi-inv.c143
-rw-r--r--lib/crypto/mpi/mpi-mod.c157
-rw-r--r--lib/crypto/mpi/mpi-mul.c92
-rw-r--r--lib/crypto/mpi/mpi-pow.c (renamed from lib/mpi/mpi-pow.c)0
-rw-r--r--lib/crypto/mpi/mpi-sub-ui.c78
-rw-r--r--lib/crypto/mpi/mpicoder.c (renamed from lib/mpi/mpicoder.c)345
-rw-r--r--lib/crypto/mpi/mpih-cmp.c (renamed from lib/mpi/mpih-cmp.c)0
-rw-r--r--lib/crypto/mpi/mpih-div.c517
-rw-r--r--lib/crypto/mpi/mpih-mul.c (renamed from lib/mpi/mpih-mul.c)25
-rw-r--r--lib/crypto/mpi/mpiutil.c330
-rw-r--r--lib/crypto/poly1305-donna32.c205
-rw-r--r--lib/crypto/poly1305-donna64.c184
-rw-r--r--lib/crypto/poly1305.c78
-rw-r--r--lib/crypto/sha1.c (renamed from lib/sha1.c)122
-rw-r--r--lib/crypto/sha256.c168
-rw-r--r--lib/crypto/utils.c88
-rw-r--r--lib/debug_info.c3
-rw-r--r--lib/debugobjects.c693
-rw-r--r--lib/dec_and_lock.c31
-rw-r--r--lib/decompress.c5
-rw-r--r--lib/decompress_bunzip2.c10
-rw-r--r--lib/decompress_inflate.c15
-rw-r--r--lib/decompress_unlz4.c8
-rw-r--r--lib/decompress_unlzma.c8
-rw-r--r--lib/decompress_unlzo.c3
-rw-r--r--lib/decompress_unxz.c17
-rw-r--r--lib/decompress_unzstd.c352
-rw-r--r--lib/devmem_is_allowed.c28
-rw-r--r--lib/devres.c216
-rw-r--r--lib/dhry.h358
-rw-r--r--lib/dhry_1.c290
-rw-r--r--lib/dhry_2.c175
-rw-r--r--lib/dhry_run.c87
-rw-r--r--lib/digsig.c4
-rw-r--r--lib/dim/Makefile7
-rw-r--r--lib/dim/dim.c84
-rw-r--r--lib/dim/net_dim.c247
-rw-r--r--lib/dim/rdma_dim.c109
-rw-r--r--lib/dump_stack.c61
-rw-r--r--lib/dynamic_debug.c805
-rw-r--r--lib/dynamic_queue_limits.c4
-rw-r--r--lib/earlycpio.c8
-rw-r--r--lib/errname.c225
-rw-r--r--lib/error-inject.c35
-rw-r--r--lib/errseq.c1
-rw-r--r--lib/extable.c6
-rw-r--r--lib/fault-inject-usercopy.c39
-rw-r--r--lib/fault-inject.c294
-rw-r--r--lib/fdt_addresses.c2
-rw-r--r--lib/find_bit.c310
-rw-r--r--lib/find_bit_benchmark.c43
-rw-r--r--lib/flex_proportions.c45
-rw-r--r--lib/fonts/Kconfig9
-rw-r--r--lib/fonts/Makefile1
-rw-r--r--lib/fonts/font_10x18.c10
-rw-r--r--lib/fonts/font_6x10.c10
-rw-r--r--lib/fonts/font_6x11.c10
-rw-r--r--lib/fonts/font_6x8.c2577
-rw-r--r--lib/fonts/font_7x14.c10
-rw-r--r--lib/fonts/font_8x16.c10
-rw-r--r--lib/fonts/font_8x8.c10
-rw-r--r--lib/fonts/font_acorn_8x8.c10
-rw-r--r--lib/fonts/font_mini_4x6.c9
-rw-r--r--lib/fonts/font_pearl_8x8.c12
-rw-r--r--lib/fonts/font_sun12x22.c10
-rw-r--r--lib/fonts/font_sun8x16.c8
-rw-r--r--lib/fonts/font_ter16x32.c16
-rw-r--r--lib/fonts/fonts.c106
-rw-r--r--lib/fortify_kunit.c332
-rw-r--r--lib/fw_table.c227
-rw-r--r--lib/gen_crc64table.c51
-rw-r--r--lib/genalloc.c191
-rw-r--r--lib/generic-radix-tree.c112
-rw-r--r--lib/glob.c4
-rw-r--r--lib/group_cpus.c439
-rw-r--r--lib/hashtable_test.c317
-rw-r--r--lib/hexdump.c63
-rw-r--r--lib/idr.c48
-rw-r--r--lib/interval_tree.c132
-rw-r--r--lib/iomap.c74
-rw-r--r--lib/ioremap.c231
-rw-r--r--lib/iov_iter.c2610
-rw-r--r--lib/irq_poll.c8
-rw-r--r--lib/is_signed_type_kunit.c49
-rw-r--r--lib/jedec_ddr_data.c132
-rw-r--r--lib/kasprintf.c2
-rw-r--r--lib/kfifo.c5
-rw-r--r--lib/kobject.c210
-rw-r--r--lib/kobject_uevent.c15
-rw-r--r--lib/kstrtox.c51
-rw-r--r--lib/kstrtox.h2
-rw-r--r--lib/kunit/.kunitconfig3
-rw-r--r--lib/kunit/Kconfig73
-rw-r--r--lib/kunit/Makefile27
-rw-r--r--lib/kunit/assert.c272
-rw-r--r--lib/kunit/attributes.c474
-rw-r--r--lib/kunit/debugfs.c227
-rw-r--r--lib/kunit/debugfs.h30
-rw-r--r--lib/kunit/device-impl.h19
-rw-r--r--lib/kunit/device.c195
-rw-r--r--lib/kunit/executor.c416
-rw-r--r--lib/kunit/executor_test.c293
-rw-r--r--lib/kunit/hooks-impl.h31
-rw-r--r--lib/kunit/hooks.c21
-rw-r--r--lib/kunit/kunit-example-test.c377
-rw-r--r--lib/kunit/kunit-test.c831
-rw-r--r--lib/kunit/resource.c178
-rw-r--r--lib/kunit/static_stub.c123
-rw-r--r--lib/kunit/string-stream-test.c544
-rw-r--r--lib/kunit/string-stream.c207
-rw-r--r--lib/kunit/string-stream.h62
-rw-r--r--lib/kunit/test.c938
-rw-r--r--lib/kunit/try-catch-impl.h27
-rw-r--r--lib/kunit/try-catch.c98
-rw-r--r--lib/kunit_iov_iter.c777
-rw-r--r--lib/libcrc32c.c8
-rw-r--r--lib/linear_ranges.c276
-rw-r--r--lib/list-test.c1502
-rw-r--r--lib/list_debug.c36
-rw-r--r--lib/list_sort.c23
-rw-r--r--lib/livepatch/Makefile9
-rw-r--r--lib/livepatch/test_klp_callbacks_busy.c45
-rw-r--r--lib/livepatch/test_klp_shadow_vars.c281
-rw-r--r--lib/livepatch/test_klp_state.c162
-rw-r--r--lib/livepatch/test_klp_state2.c191
-rw-r--r--lib/livepatch/test_klp_state3.c5
-rw-r--r--lib/llist.c44
-rw-r--r--lib/locking-selftest.c1027
-rw-r--r--lib/lockref.c35
-rw-r--r--lib/logic_iomem.c321
-rw-r--r--lib/logic_pio.c40
-rw-r--r--lib/lru_cache.c66
-rw-r--r--lib/lwq.c158
-rw-r--r--lib/lz4/lz4_compress.c4
-rw-r--r--lib/lz4/lz4_decompress.c41
-rw-r--r--lib/lz4/lz4defs.h13
-rw-r--r--lib/lz4/lz4hc_compress.c3
-rw-r--r--lib/lzo/lzo1x_compress.c35
-rw-r--r--lib/lzo/lzo1x_decompress_safe.c2
-rw-r--r--lib/maple_tree.c7564
-rw-r--r--lib/math/Kconfig9
-rw-r--r--lib/math/Makefile5
-rw-r--r--lib/math/div64.c61
-rw-r--r--lib/math/int_log.c133
-rw-r--r--lib/math/int_pow.c2
-rw-r--r--lib/math/int_sqrt.c3
-rw-r--r--lib/math/prime_numbers.c10
-rw-r--r--lib/math/rational-test.c56
-rw-r--r--lib/math/rational.c74
-rw-r--r--lib/math/reciprocal_div.c10
-rw-r--r--lib/math/test_div64.c249
-rw-r--r--lib/memcpy_kunit.c569
-rw-r--r--lib/memregion.c19
-rw-r--r--lib/mpi/mpi-bit.c56
-rw-r--r--lib/mpi/mpih-div.c223
-rw-r--r--lib/mpi/mpiutil.c126
-rw-r--r--lib/net_utils.c6
-rw-r--r--lib/nlattr.c433
-rw-r--r--lib/nmi_backtrace.c23
-rw-r--r--lib/nodemask.c31
-rw-r--r--lib/notifier-error-inject.c18
-rw-r--r--lib/objagg.c17
-rw-r--r--lib/objpool.c297
-rw-r--r--lib/oid_registry.c27
-rw-r--r--lib/once.c41
-rw-r--r--lib/overflow_kunit.c1148
-rw-r--r--lib/packing.c20
-rw-r--r--lib/parman.c5
-rw-r--r--lib/parser.c123
-rw-r--r--lib/pci_iomap.c43
-rw-r--r--lib/percpu-refcount.c161
-rw-r--r--lib/percpu_counter.c205
-rw-r--r--lib/pldmfw/Makefile2
-rw-r--r--lib/pldmfw/pldmfw.c878
-rw-r--r--lib/pldmfw/pldmfw_private.h238
-rw-r--r--lib/polynomial.c108
-rw-r--r--lib/radix-tree.c49
-rw-r--r--lib/radix-tree.h8
-rw-r--r--lib/raid6/.gitignore1
-rw-r--r--lib/raid6/Makefile107
-rw-r--r--lib/raid6/algos.c141
-rw-r--r--lib/raid6/avx2.c12
-rw-r--r--lib/raid6/avx512.c6
-rw-r--r--lib/raid6/int.uc9
-rw-r--r--lib/raid6/loongarch.h38
-rw-r--r--lib/raid6/loongarch_simd.c422
-rw-r--r--lib/raid6/mktables.c4
-rw-r--r--lib/raid6/neon.h22
-rw-r--r--lib/raid6/neon.uc1
-rw-r--r--lib/raid6/recov.c1
-rw-r--r--lib/raid6/recov_avx2.c6
-rw-r--r--lib/raid6/recov_loongarch_simd.c513
-rw-r--r--lib/raid6/recov_neon.c8
-rw-r--r--lib/raid6/recov_neon_inner.c1
-rw-r--r--lib/raid6/recov_ssse3.c6
-rw-r--r--lib/raid6/s390vx.uc7
-rw-r--r--lib/raid6/test/.gitignore3
-rw-r--r--lib/raid6/test/Makefile71
-rw-r--r--lib/raid6/test/test.c1
-rw-r--r--lib/raid6/unroll.awk2
-rw-r--r--lib/raid6/vpermxor.uc2
-rw-r--r--lib/random32.c201
-rw-r--r--lib/ratelimit.c12
-rw-r--r--lib/rbtree.c48
-rw-r--r--lib/rbtree_test.c37
-rw-r--r--lib/rcuref.c281
-rw-r--r--lib/reed_solomon/Makefile2
-rw-r--r--lib/reed_solomon/decode_rs.c97
-rw-r--r--lib/reed_solomon/reed_solomon.c12
-rw-r--r--lib/reed_solomon/test_rslib.c518
-rw-r--r--lib/ref_tracker.c275
-rw-r--r--lib/refcount.c257
-rw-r--r--lib/rhashtable.c70
-rw-r--r--lib/sbitmap.c566
-rw-r--r--lib/scatterlist.c555
-rw-r--r--lib/seq_buf.c164
-rw-r--r--lib/sg_pool.c56
-rw-r--r--lib/sg_split.c12
-rw-r--r--lib/sha256.c279
-rw-r--r--lib/show_mem.c49
-rw-r--r--lib/siphash.c85
-rw-r--r--lib/siphash_kunit.c (renamed from lib/test_siphash.c)172
-rw-r--r--lib/slub_kunit.c190
-rw-r--r--lib/smp_processor_id.c21
-rw-r--r--lib/sort.c69
-rw-r--r--lib/stackdepot.c902
-rw-r--r--lib/stackinit_kunit.c461
-rw-r--r--lib/strcat_kunit.c104
-rw-r--r--lib/string.c419
-rw-r--r--lib/string_helpers.c538
-rw-r--r--lib/strncpy_from_user.c48
-rw-r--r--lib/strnlen_user.c33
-rw-r--r--lib/strscpy_kunit.c142
-rw-r--r--lib/syscall.c17
-rw-r--r--lib/test-string_helpers.c240
-rw-r--r--lib/test_bitmap.c1032
-rw-r--r--lib/test_bitops.c111
-rw-r--r--lib/test_bits.c75
-rw-r--r--lib/test_blackhole_dev.c100
-rw-r--r--lib/test_bpf.c9125
-rw-r--r--lib/test_dynamic_debug.c165
-rw-r--r--lib/test_firmware.c744
-rw-r--r--lib/test_fortify/read_overflow-memchr.c5
-rw-r--r--lib/test_fortify/read_overflow-memchr_inv.c5
-rw-r--r--lib/test_fortify/read_overflow-memcmp.c5
-rw-r--r--lib/test_fortify/read_overflow-memscan.c5
-rw-r--r--lib/test_fortify/read_overflow2-memcmp.c5
-rw-r--r--lib/test_fortify/read_overflow2-memcpy.c5
-rw-r--r--lib/test_fortify/read_overflow2-memmove.c5
-rw-r--r--lib/test_fortify/read_overflow2_field-memcpy.c5
-rw-r--r--lib/test_fortify/read_overflow2_field-memmove.c5
-rw-r--r--lib/test_fortify/test_fortify.h35
-rw-r--r--lib/test_fortify/write_overflow-memcpy.c5
-rw-r--r--lib/test_fortify/write_overflow-memmove.c5
-rw-r--r--lib/test_fortify/write_overflow-memset.c5
-rw-r--r--lib/test_fortify/write_overflow-strcpy-lit.c5
-rw-r--r--lib/test_fortify/write_overflow-strcpy.c5
-rw-r--r--lib/test_fortify/write_overflow-strncpy-src.c5
-rw-r--r--lib/test_fortify/write_overflow-strncpy.c5
-rw-r--r--lib/test_fortify/write_overflow-strscpy.c5
-rw-r--r--lib/test_fortify/write_overflow_field-memcpy.c5
-rw-r--r--lib/test_fortify/write_overflow_field-memmove.c5
-rw-r--r--lib/test_fortify/write_overflow_field-memset.c5
-rw-r--r--lib/test_fprobe.c275
-rw-r--r--lib/test_fpu.c89
-rw-r--r--lib/test_free_pages.c47
-rw-r--r--lib/test_hash.c259
-rw-r--r--lib/test_hexdump.c10
-rw-r--r--lib/test_hmm.c1553
-rw-r--r--lib/test_hmm_uapi.h77
-rw-r--r--lib/test_ida.c42
-rw-r--r--lib/test_kasan.c670
-rw-r--r--lib/test_kmod.c60
-rw-r--r--lib/test_kprobes.c403
-rw-r--r--lib/test_linear_ranges.c219
-rw-r--r--lib/test_list_sort.c130
-rw-r--r--lib/test_lockup.c620
-rw-r--r--lib/test_maple_tree.c3905
-rw-r--r--lib/test_meminit.c439
-rw-r--r--lib/test_min_heap.c194
-rw-r--r--lib/test_objagg.c6
-rw-r--r--lib/test_objpool.c690
-rw-r--r--lib/test_overflow.c614
-rw-r--r--lib/test_printf.c275
-rw-r--r--lib/test_ref_tracker.c115
-rw-r--r--lib/test_rhashtable.c80
-rw-r--r--lib/test_scanf.c813
-rw-r--r--lib/test_sort.c40
-rw-r--r--lib/test_stackinit.c387
-rw-r--r--lib/test_string.c115
-rw-r--r--lib/test_strscpy.c150
-rw-r--r--lib/test_sysctl.c195
-rw-r--r--lib/test_ubsan.c101
-rw-r--r--lib/test_user_copy.c155
-rw-r--r--lib/test_vmalloc.c253
-rw-r--r--lib/test_xarray.c278
-rw-r--r--lib/textsearch.c4
-rw-r--r--lib/timerqueue.c40
-rw-r--r--lib/trace_readwrite.c47
-rw-r--r--lib/ts_bm.c51
-rw-r--r--lib/ts_fsm.c4
-rw-r--r--lib/ts_kmp.c2
-rw-r--r--lib/ubsan.c275
-rw-r--r--lib/ubsan.h51
-rw-r--r--lib/ucs2_string.c52
-rw-r--r--lib/usercopy.c73
-rw-r--r--lib/uuid.c12
-rw-r--r--lib/vdso/Kconfig33
-rw-r--r--lib/vdso/Makefile17
-rw-r--r--lib/vdso/gettimeofday.c441
-rw-r--r--lib/vsprintf.c691
-rw-r--r--lib/win_minmax.c2
-rw-r--r--lib/xarray.c323
-rw-r--r--lib/xxhash.c2
-rw-r--r--lib/xz/Kconfig20
-rw-r--r--lib/xz/xz_crc32.c2
-rw-r--r--lib/xz/xz_dec_bcj.c4
-rw-r--r--lib/xz/xz_dec_lzma2.c197
-rw-r--r--lib/xz/xz_dec_stream.c22
-rw-r--r--lib/xz/xz_dec_syms.c9
-rw-r--r--lib/xz/xz_lzma2.h2
-rw-r--r--lib/xz/xz_private.h3
-rw-r--r--lib/xz/xz_stream.h2
-rw-r--r--lib/zlib_deflate/deflate.c104
-rw-r--r--lib/zlib_deflate/deflate_syms.c1
-rw-r--r--lib/zlib_deflate/deftree.c54
-rw-r--r--lib/zlib_deflate/defutil.h136
-rw-r--r--lib/zlib_dfltcc/Makefile11
-rw-r--r--lib/zlib_dfltcc/dfltcc.c40
-rw-r--r--lib/zlib_dfltcc/dfltcc.h124
-rw-r--r--lib/zlib_dfltcc/dfltcc_deflate.c313
-rw-r--r--lib/zlib_dfltcc/dfltcc_deflate.h21
-rw-r--r--lib/zlib_dfltcc/dfltcc_inflate.c154
-rw-r--r--lib/zlib_dfltcc/dfltcc_inflate.h37
-rw-r--r--lib/zlib_dfltcc/dfltcc_util.h103
-rw-r--r--lib/zlib_inflate/inffast.c102
-rw-r--r--lib/zlib_inflate/inflate.c56
-rw-r--r--lib/zlib_inflate/inflate.h8
-rw-r--r--lib/zlib_inflate/infutil.h18
-rw-r--r--lib/zstd/Makefile45
-rw-r--r--lib/zstd/bitstream.h379
-rw-r--r--lib/zstd/common/bitstream.h446
-rw-r--r--lib/zstd/common/compiler.h184
-rw-r--r--lib/zstd/common/cpu.h194
-rw-r--r--lib/zstd/common/debug.c24
-rw-r--r--lib/zstd/common/debug.h101
-rw-r--r--lib/zstd/common/entropy_common.c357
-rw-r--r--lib/zstd/common/error_private.c56
-rw-r--r--lib/zstd/common/error_private.h145
-rw-r--r--lib/zstd/common/fse.h711
-rw-r--r--lib/zstd/common/fse_decompress.c390
-rw-r--r--lib/zstd/common/huf.h358
-rw-r--r--lib/zstd/common/mem.h261
-rw-r--r--lib/zstd/common/portability_macros.h93
-rw-r--r--lib/zstd/common/zstd_common.c83
-rw-r--r--lib/zstd/common/zstd_deps.h107
-rw-r--r--lib/zstd/common/zstd_internal.h443
-rw-r--r--lib/zstd/compress.c3485
-rw-r--r--lib/zstd/compress/clevels.h132
-rw-r--r--lib/zstd/compress/fse_compress.c668
-rw-r--r--lib/zstd/compress/hist.c165
-rw-r--r--lib/zstd/compress/hist.h75
-rw-r--r--lib/zstd/compress/huf_compress.c1335
-rw-r--r--lib/zstd/compress/zstd_compress.c6127
-rw-r--r--lib/zstd/compress/zstd_compress_internal.h1399
-rw-r--r--lib/zstd/compress/zstd_compress_literals.c159
-rw-r--r--lib/zstd/compress/zstd_compress_literals.h31
-rw-r--r--lib/zstd/compress/zstd_compress_sequences.c442
-rw-r--r--lib/zstd/compress/zstd_compress_sequences.h54
-rw-r--r--lib/zstd/compress/zstd_compress_superblock.c573
-rw-r--r--lib/zstd/compress/zstd_compress_superblock.h32
-rw-r--r--lib/zstd/compress/zstd_cwksp.h595
-rw-r--r--lib/zstd/compress/zstd_double_fast.c696
-rw-r--r--lib/zstd/compress/zstd_double_fast.h32
-rw-r--r--lib/zstd/compress/zstd_fast.c675
-rw-r--r--lib/zstd/compress/zstd_fast.h31
-rw-r--r--lib/zstd/compress/zstd_lazy.c2102
-rw-r--r--lib/zstd/compress/zstd_lazy.h119
-rw-r--r--lib/zstd/compress/zstd_ldm.c724
-rw-r--r--lib/zstd/compress/zstd_ldm.h111
-rw-r--r--lib/zstd/compress/zstd_ldm_geartab.h106
-rw-r--r--lib/zstd/compress/zstd_opt.c1446
-rw-r--r--lib/zstd/compress/zstd_opt.h50
-rw-r--r--lib/zstd/decompress.c2531
-rw-r--r--lib/zstd/decompress/huf_decompress.c1740
-rw-r--r--lib/zstd/decompress/zstd_ddict.c241
-rw-r--r--lib/zstd/decompress/zstd_ddict.h44
-rw-r--r--lib/zstd/decompress/zstd_decompress.c2150
-rw-r--r--lib/zstd/decompress/zstd_decompress_block.c2072
-rw-r--r--lib/zstd/decompress/zstd_decompress_block.h68
-rw-r--r--lib/zstd/decompress/zstd_decompress_internal.h228
-rw-r--r--lib/zstd/decompress_sources.h34
-rw-r--r--lib/zstd/entropy_common.c243
-rw-r--r--lib/zstd/error_private.h53
-rw-r--r--lib/zstd/fse.h575
-rw-r--r--lib/zstd/fse_compress.c795
-rw-r--r--lib/zstd/fse_decompress.c332
-rw-r--r--lib/zstd/huf.h212
-rw-r--r--lib/zstd/huf_compress.c772
-rw-r--r--lib/zstd/huf_decompress.c960
-rw-r--r--lib/zstd/mem.h151
-rw-r--r--lib/zstd/zstd_common.c75
-rw-r--r--lib/zstd/zstd_common_module.c32
-rw-r--r--lib/zstd/zstd_compress_module.c164
-rw-r--r--lib/zstd/zstd_decompress_module.c105
-rw-r--r--lib/zstd/zstd_internal.h263
-rw-r--r--lib/zstd/zstd_opt.h1014
491 files changed, 120097 insertions, 23575 deletions
diff --git a/lib/.gitignore b/lib/.gitignore
index f2a39c9e5485..54596b634ecb 100644
--- a/lib/.gitignore
+++ b/lib/.gitignore
@@ -1,8 +1,9 @@
-#
-# Generated files
-#
-gen_crc32table
-gen_crc64table
-crc32table.h
-crc64table.h
-oid_registry_data.c
+# SPDX-License-Identifier: GPL-2.0-only
+/crc32table.h
+/crc64table.h
+/default.bconf
+/gen_crc32table
+/gen_crc64table
+/oid_registry_data.c
+/test_fortify.log
+/test_fortify/*.log
diff --git a/lib/842/842_debugfs.h b/lib/842/842_debugfs.h
index 277e403e8701..4469407c3e0d 100644
--- a/lib/842/842_debugfs.h
+++ b/lib/842/842_debugfs.h
@@ -22,8 +22,6 @@ static int __init sw842_debugfs_create(void)
return -ENODEV;
sw842_debugfs_root = debugfs_create_dir(MODULE_NAME, NULL);
- if (IS_ERR(sw842_debugfs_root))
- return PTR_ERR(sw842_debugfs_root);
for (i = 0; i < ARRAY_SIZE(template_count); i++) {
char name[32];
@@ -46,8 +44,7 @@ static int __init sw842_debugfs_create(void)
static void __exit sw842_debugfs_remove(void)
{
- if (sw842_debugfs_root && !IS_ERR(sw842_debugfs_root))
- debugfs_remove_recursive(sw842_debugfs_root);
+ debugfs_remove_recursive(sw842_debugfs_root);
}
#endif
diff --git a/lib/Kconfig b/lib/Kconfig
index 90623a0e1942..5ddda7c2ed9b 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -19,8 +19,12 @@ config RAID6_PQ_BENCHMARK
Benchmark all available RAID6 PQ functions on init and choose the
fastest one.
+config LINEAR_RANGES
+ tristate
+
config PACKING
bool "Generic bitfield packing and unpacking"
+ select BITREVERSE
default n
help
This option provides the packing() helper function, which permits
@@ -42,21 +46,23 @@ config BITREVERSE
config HAVE_ARCH_BITREVERSE
bool
default n
- depends on BITREVERSE
help
This option enables the use of hardware bit-reversal instructions on
architectures which support such operations.
-config GENERIC_STRNCPY_FROM_USER
+config ARCH_HAS_STRNCPY_FROM_USER
bool
-config GENERIC_STRNLEN_USER
+config ARCH_HAS_STRNLEN_USER
bool
-config GENERIC_NET_UTILS
- bool
+config GENERIC_STRNCPY_FROM_USER
+ def_bool !ARCH_HAS_STRNCPY_FROM_USER
+
+config GENERIC_STRNLEN_USER
+ def_bool !ARCH_HAS_STRNLEN_USER
-config GENERIC_FIND_FIRST_BIT
+config GENERIC_NET_UTILS
bool
source "lib/math/Kconfig"
@@ -80,9 +86,13 @@ config ARCH_USE_CMPXCHG_LOCKREF
config ARCH_HAS_FAST_MULTIPLIER
bool
+config ARCH_USE_SYM_ANNOTATIONS
+ bool
+
config INDIRECT_PIO
bool "Access I/O in non-MMIO mode"
depends on ARM64
+ depends on HAS_IOPORT
help
On some platforms where no separate I/O space exists, there are I/O
hosts which can not be accessed in MMIO mode. Using the logical PIO
@@ -96,6 +106,29 @@ config INDIRECT_PIO
When in doubt, say N.
+config INDIRECT_IOMEM
+ bool
+ help
+ This is selected by other options/architectures to provide the
+ emulated iomem accessors.
+
+config INDIRECT_IOMEM_FALLBACK
+ bool
+ depends on INDIRECT_IOMEM
+ help
+ If INDIRECT_IOMEM is selected, this enables falling back to plain
+ mmio accesses when the IO memory address is not a registered
+ emulated region.
+
+config TRACE_MMIO_ACCESS
+ bool "Register read/write tracing"
+ depends on TRACING && ARCH_HAVE_TRACE_MMIO_ACCESS
+ help
+ Create tracepoints for MMIO read/write operations. These trace events
+ can be used for logging all MMIO read/write operations.
+
+source "lib/crypto/Kconfig"
+
config CRC_CCITT
tristate "CRC-CCITT functions"
help
@@ -121,6 +154,15 @@ config CRC_T10DIF
kernel tree needs to calculate CRC checks for use with the
SCSI data integrity subsystem.
+config CRC64_ROCKSOFT
+ tristate "CRC calculation for the Rocksoft model CRC64"
+ select CRC64
+ select CRYPTO
+ select CRYPTO_CRC64_ROCKSOFT
+ help
+ This option provides a CRC64 API to a registered crypto driver.
+ This is used with the block layer's data integrity subsystem.
+
config CRC_ITU_T
tristate "CRC ITU-T V.41 functions"
help
@@ -278,6 +320,13 @@ config ZLIB_DEFLATE
tristate
select BITREVERSE
+config ZLIB_DFLTCC
+ def_bool y
+ depends on S390
+ prompt "Enable s390x DEFLATE CONVERSION CALL support for kernel zlib"
+ help
+ Enable s390x hardware support for zlib in the kernel.
+
config LZO_COMPRESS
tristate
@@ -293,12 +342,16 @@ config LZ4HC_COMPRESS
config LZ4_DECOMPRESS
tristate
-config ZSTD_COMPRESS
+config ZSTD_COMMON
select XXHASH
tristate
+config ZSTD_COMPRESS
+ select ZSTD_COMMON
+ tristate
+
config ZSTD_DECOMPRESS
- select XXHASH
+ select ZSTD_COMMON
tristate
source "lib/xz/Kconfig"
@@ -329,6 +382,10 @@ config DECOMPRESS_LZ4
select LZ4_DECOMPRESS
tristate
+config DECOMPRESS_ZSTD
+ select ZSTD_DECOMPRESS
+ tristate
+
#
# Generic allocator support is selected if needed
#
@@ -358,6 +415,7 @@ config REED_SOLOMON_DEC16
#
config BCH
tristate
+ select BITREVERSE
config BCH_CONST_PARAMS
bool
@@ -420,10 +478,14 @@ config INTERVAL_TREE
See:
- Documentation/rbtree.txt
+ Documentation/core-api/rbtree.rst
for more information.
+config INTERVAL_TREE_SPAN_ITER
+ bool
+ depends on INTERVAL_TREE
+
config XARRAY_MULTI
bool
help
@@ -444,12 +506,17 @@ config ASSOCIATIVE_ARRAY
for more information.
+config CLOSURES
+ bool
+
config HAS_IOMEM
bool
depends on !NO_IOMEM
- select GENERIC_IO
default y
+config HAS_IOPORT
+ bool
+
config HAS_IOPORT_MAP
bool
depends on HAS_IOMEM && !NO_IOPORT_MAP
@@ -474,6 +541,15 @@ config CPUMASK_OFFSTACK
them on the stack. This is a bit more expensive, but avoids
stack overflow.
+config FORCE_NR_CPUS
+ bool "Set number of CPUs at compile time"
+ depends on SMP && EXPERT && !COMPILE_TEST
+ help
+ Say Yes if you have NR_CPUS set to an actual number of possible
+ CPUs in your system, not to a default value. This forces the core
+ code to rely on compile-time value and optimize kernel routines
+ better.
+
config CPU_RMAP
bool
depends on SMP
@@ -531,14 +607,6 @@ config LRU_CACHE
config CLZ_TAB
bool
-config DDR
- bool "JEDEC DDR data"
- help
- Data from JEDEC specs for DDR SDRAM memories,
- particularly the AC timing parameters and addressing
- information. This data is useful for drivers handling
- DDR SDRAM controllers.
-
config IRQ_POLL
bool "IRQ polling library"
help
@@ -562,6 +630,13 @@ config SIGNATURE
Digital signature verification. Currently only RSA is supported.
Implementation is done using GnuPG MPI library
+config DIMLIB
+ bool
+ help
+ Dynamic Interrupt Moderation library.
+ Implements an algorithm for dynamically changing CQ moderation values
+ according to run time performance.
+
#
# libfdt files, only selected if needed.
#
@@ -574,7 +649,12 @@ config OID_REGISTRY
Enable fast lookup object identifier registry.
config UCS2_STRING
- tristate
+ tristate
+
+#
+# generic vdso
+#
+source "lib/vdso/Kconfig"
source "lib/fonts/Kconfig"
@@ -602,6 +682,15 @@ config ARCH_NO_SG_CHAIN
config ARCH_HAS_PMEM_API
bool
+config MEMREGION
+ bool
+
+config ARCH_HAS_CPU_CACHE_INVALIDATE_MEMREGION
+ bool
+
+config ARCH_HAS_MEMREMAP_COMPAT_ALIGN
+ bool
+
# use memcpy to implement user copies for nommu architectures
config UACCESS_MEMCPY
bool
@@ -609,7 +698,12 @@ config UACCESS_MEMCPY
config ARCH_HAS_UACCESS_FLUSHCACHE
bool
-config ARCH_HAS_UACCESS_MCSAFE
+# arch has a concept of a recoverable synchronous exception due to a
+# memory-read error like x86 machine-check or ARM data-abort, and
+# implements copy_mc_to_{user,kernel} to abort and report
+# 'bytes-transferred' if that exception fires when accessing the source
+# buffer.
+config ARCH_HAS_COPY_MC
bool
# Temporary. Goes away when all archs are cleaned up
@@ -619,6 +713,25 @@ config ARCH_STACKWALK
config STACKDEPOT
bool
select STACKTRACE
+ help
+ Stack depot: stack trace storage that avoids duplication
+
+config STACKDEPOT_ALWAYS_INIT
+ bool
+ select STACKDEPOT
+ help
+ Always initialize stack depot during early boot
+
+config STACKDEPOT_MAX_FRAMES
+ int "Maximum number of frames in trace saved in stack depot"
+ range 1 256
+ default 64
+ depends on STACKDEPOT
+
+config REF_TRACKER
+ bool
+ depends on STACKTRACE_SUPPORT
+ select STACKDEPOT
config SBITMAP
bool
@@ -626,11 +739,19 @@ config SBITMAP
config PARMAN
tristate "parman" if COMPILE_TEST
-config STRING_SELFTEST
- tristate "Test string functions"
+config OBJAGG
+ tristate "objagg" if COMPILE_TEST
+
+config LWQ_TEST
+ bool "Boot-time test for lwq queuing"
+ help
+ Run boot-time test of light-weight queuing.
endmenu
+config GENERIC_IOREMAP
+ bool
+
config GENERIC_LIB_ASHLDI3
bool
@@ -649,5 +770,18 @@ config GENERIC_LIB_CMPDI2
config GENERIC_LIB_UCMPDI2
bool
-config OBJAGG
- tristate "objagg" if COMPILE_TEST
+config GENERIC_LIB_DEVMEM_IS_ALLOWED
+ bool
+
+config PLDMFW
+ bool
+ default n
+
+config ASN1_ENCODER
+ tristate
+
+config POLYNOMIAL
+ tristate
+
+config FIRMWARE_TABLE
+ bool
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index cbdfae379896..ef36b829ae1f 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -35,6 +35,17 @@ config PRINTK_CALLER
no option to enable/disable at the kernel command line parameter or
sysfs interface.
+config STACKTRACE_BUILD_ID
+ bool "Show build ID information in stacktraces"
+ depends on PRINTK
+ help
+ Selecting this option adds build ID information for symbols in
+ stacktraces printed with the printk format '%p[SR]b'.
+
+ This option is intended for distros where debuginfo is not easily
+ accessible but can be downloaded given the build ID of the vmlinux or
+ kernel module where the function is located.
+
config CONSOLE_LOGLEVEL_DEFAULT
int "Default console loglevel (1-15)"
range 1 15
@@ -98,7 +109,8 @@ config DYNAMIC_DEBUG
bool "Enable dynamic printk() support"
default n
depends on PRINTK
- depends on DEBUG_FS
+ depends on (DEBUG_FS || PROC_FS)
+ select DYNAMIC_DEBUG_CORE
help
Compiles debug level messages into the kernel, which would not
@@ -116,8 +128,9 @@ config DYNAMIC_DEBUG
Usage:
Dynamic debugging is controlled via the 'dynamic_debug/control' file,
- which is contained in the 'debugfs' filesystem. Thus, the debugfs
- filesystem must first be mounted before making use of this feature.
+ which is contained in the 'debugfs' filesystem or procfs.
+ Thus, the debugfs or procfs filesystem must first be mounted before
+ making use of this feature.
We refer the control file as: <debugfs>/dynamic_debug/control. This
file contains a list of the debug statements that can be enabled. The
format for each line of the file is:
@@ -128,8 +141,8 @@ config DYNAMIC_DEBUG
lineno : line number of the debug statement
module : module that contains the debug statement
function : function that contains the debug statement
- flags : '=p' means the line is turned 'on' for printing
- format : the format used for the debug statement
+ flags : '=p' means the line is turned 'on' for printing
+ format : the format used for the debug statement
From a live system:
@@ -164,26 +177,133 @@ config DYNAMIC_DEBUG
See Documentation/admin-guide/dynamic-debug-howto.rst for additional
information.
+config DYNAMIC_DEBUG_CORE
+ bool "Enable core function of dynamic debug support"
+ depends on PRINTK
+ depends on (DEBUG_FS || PROC_FS)
+ help
+ Enable core functional support of dynamic debug. It is useful
+ when you want to tie dynamic debug to your kernel modules with
+ DYNAMIC_DEBUG_MODULE defined for each of them, especially for
+ the case of embedded system where the kernel image size is
+ sensitive for people.
+
+config SYMBOLIC_ERRNAME
+ bool "Support symbolic error names in printf"
+ default y if PRINTK
+ help
+ If you say Y here, the kernel's printf implementation will
+ be able to print symbolic error names such as ENOSPC instead
+ of the number 28. It makes the kernel image slightly larger
+ (about 3KB), but can make the kernel logs easier to read.
+
+config DEBUG_BUGVERBOSE
+ bool "Verbose BUG() reporting (adds 70K)" if DEBUG_KERNEL && EXPERT
+ depends on BUG && (GENERIC_BUG || HAVE_DEBUG_BUGVERBOSE)
+ default y
+ help
+ Say Y here to make BUG() panics output the file name and line number
+ of the BUG call as well as the EIP and oops trace. This aids
+ debugging but costs about 70-100K of memory.
+
endmenu # "printk and dmesg options"
+config DEBUG_KERNEL
+ bool "Kernel debugging"
+ help
+ Say Y here if you are developing drivers or trying to debug and
+ identify kernel problems.
+
+config DEBUG_MISC
+ bool "Miscellaneous debug code"
+ default DEBUG_KERNEL
+ depends on DEBUG_KERNEL
+ help
+ Say Y here if you need to enable miscellaneous debug code that should
+ be under a more specific debug option but isn't.
+
menu "Compile-time checks and compiler options"
config DEBUG_INFO
- bool "Compile the kernel with debug info"
- depends on DEBUG_KERNEL && !COMPILE_TEST
+ bool
help
- If you say Y here the resulting kernel image will include
- debugging info resulting in a larger kernel image.
+ A kernel debug info option other than "None" has been selected
+ in the "Debug information" choice below, indicating that debug
+ information will be generated for build targets.
+
+# Clang generates .uleb128 with label differences for DWARF v5, a feature that
+# older binutils ports do not support when utilizing RISC-V style linker
+# relaxation: https://sourceware.org/bugzilla/show_bug.cgi?id=27215
+config AS_HAS_NON_CONST_ULEB128
+ def_bool $(as-instr,.uleb128 .Lexpr_end4 - .Lexpr_start3\n.Lexpr_start3:\n.Lexpr_end4:)
+
+choice
+ prompt "Debug information"
+ depends on DEBUG_KERNEL
+ help
+ Selecting something other than "None" results in a kernel image
+ that will include debugging info resulting in a larger kernel image.
This adds debug symbols to the kernel and modules (gcc -g), and
is needed if you intend to use kernel crashdump or binary object
tools like crash, kgdb, LKCD, gdb, etc on the kernel.
- Say Y here only if you plan to debug the kernel.
- If unsure, say N.
+ Choose which version of DWARF debug info to emit. If unsure,
+ select "Toolchain default".
+
+config DEBUG_INFO_NONE
+ bool "Disable debug information"
+ help
+ Do not build the kernel with debugging information, which will
+ result in a faster and smaller build.
+
+config DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT
+ bool "Rely on the toolchain's implicit default DWARF version"
+ select DEBUG_INFO
+ depends on !CC_IS_CLANG || AS_IS_LLVM || CLANG_VERSION < 140000 || (AS_IS_GNU && AS_VERSION >= 23502 && AS_HAS_NON_CONST_ULEB128)
+ help
+ The implicit default version of DWARF debug info produced by a
+ toolchain changes over time.
+
+ This can break consumers of the debug info that haven't upgraded to
+ support newer revisions, and prevent testing newer versions, but
+ those should be less common scenarios.
+
+config DEBUG_INFO_DWARF4
+ bool "Generate DWARF Version 4 debuginfo"
+ select DEBUG_INFO
+ depends on !CC_IS_CLANG || AS_IS_LLVM || (AS_IS_GNU && AS_VERSION >= 23502)
+ help
+ Generate DWARF v4 debug info. This requires gcc 4.5+, binutils 2.35.2
+ if using clang without clang's integrated assembler, and gdb 7.0+.
+
+ If you have consumers of DWARF debug info that are not ready for
+ newer revisions of DWARF, you may wish to choose this or have your
+ config select this.
+
+config DEBUG_INFO_DWARF5
+ bool "Generate DWARF Version 5 debuginfo"
+ select DEBUG_INFO
+ depends on !ARCH_HAS_BROKEN_DWARF5
+ depends on !CC_IS_CLANG || AS_IS_LLVM || (AS_IS_GNU && AS_VERSION >= 23502 && AS_HAS_NON_CONST_ULEB128)
+ help
+ Generate DWARF v5 debug info. Requires binutils 2.35.2, gcc 5.0+ (gcc
+ 5.0+ accepts the -gdwarf-5 flag but only had partial support for some
+ draft features until 7.0), and gdb 8.0+.
+
+ Changes to the structure of debug info in Version 5 allow for around
+ 15-18% savings in resulting image and debug info section sizes as
+ compared to DWARF Version 4. DWARF Version 5 standardizes previous
+ extensions such as accelerators for symbol indexing and the format
+ for fission (.dwo/.dwp) files. Users may not want to select this
+ config if they rely on tooling that has not yet been updated to
+ support DWARF Version 5.
+
+endchoice # "Debug information"
+
+if DEBUG_INFO
config DEBUG_INFO_REDUCED
bool "Reduce debugging information"
- depends on DEBUG_INFO
help
If you say Y here gcc is instructed to generate less debugging
information for structure types. This means that tools that
@@ -194,10 +314,54 @@ config DEBUG_INFO_REDUCED
DEBUG_INFO build and compile times are reduced too.
Only works with newer gcc versions.
+choice
+ prompt "Compressed Debug information"
+ help
+ Compress the resulting debug info. Results in smaller debug info sections,
+ but requires that consumers are able to decompress the results.
+
+ If unsure, choose DEBUG_INFO_COMPRESSED_NONE.
+
+config DEBUG_INFO_COMPRESSED_NONE
+ bool "Don't compress debug information"
+ help
+ Don't compress debug info sections.
+
+config DEBUG_INFO_COMPRESSED_ZLIB
+ bool "Compress debugging information with zlib"
+ depends on $(cc-option,-gz=zlib)
+ depends on $(ld-option,--compress-debug-sections=zlib)
+ help
+ Compress the debug information using zlib. Requires GCC 5.0+ or Clang
+ 5.0+, binutils 2.26+, and zlib.
+
+ Users of dpkg-deb via scripts/package/builddeb may find an increase in
+ size of their debug .deb packages with this config set, due to the
+ debug info being compressed with zlib, then the object files being
+ recompressed with a different compression scheme. But this is still
+ preferable to setting $KDEB_COMPRESS to "none" which would be even
+ larger.
+
+config DEBUG_INFO_COMPRESSED_ZSTD
+ bool "Compress debugging information with zstd"
+ depends on $(cc-option,-gz=zstd)
+ depends on $(ld-option,--compress-debug-sections=zstd)
+ help
+ Compress the debug information using zstd. This may provide better
+ compression than zlib, for about the same time costs, but requires newer
+ toolchain support. Requires GCC 13.0+ or Clang 16.0+, binutils 2.40+, and
+ zstd.
+
+endchoice # "Compressed Debug information"
+
config DEBUG_INFO_SPLIT
bool "Produce split debuginfo in .dwo files"
- depends on DEBUG_INFO
depends on $(cc-option,-gsplit-dwarf)
+ # RISC-V linker relaxation + -gsplit-dwarf has issues with LLVM and GCC
+ # prior to 12.x:
+ # https://github.com/llvm/llvm-project/issues/56642
+ # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=99090
+ depends on !RISCV || GCC_VERSION >= 120000
help
Generate debug info into separate .dwo files. This significantly
reduces the build directory size for builds with DEBUG_INFO,
@@ -210,27 +374,57 @@ config DEBUG_INFO_SPLIT
to know about the .dwo files and include them.
Incompatible with older versions of ccache.
-config DEBUG_INFO_DWARF4
- bool "Generate dwarf4 debuginfo"
- depends on DEBUG_INFO
- depends on $(cc-option,-gdwarf-4)
- help
- Generate dwarf4 debug info. This requires recent versions
- of gcc and gdb. It makes the debug information larger.
- But it significantly improves the success of resolving
- variables in gdb on optimized code.
-
config DEBUG_INFO_BTF
bool "Generate BTF typeinfo"
- depends on DEBUG_INFO
+ depends on !DEBUG_INFO_SPLIT && !DEBUG_INFO_REDUCED
+ depends on !GCC_PLUGIN_RANDSTRUCT || COMPILE_TEST
+ depends on BPF_SYSCALL
+ depends on !DEBUG_INFO_DWARF5 || PAHOLE_VERSION >= 121
+ # pahole uses elfutils, which does not have support for Hexagon relocations
+ depends on !HEXAGON
help
Generate deduplicated BTF type information from DWARF debug info.
Turning this on expects presence of pahole tool, which will convert
DWARF type info into equivalent deduplicated BTF type info.
+config PAHOLE_HAS_SPLIT_BTF
+ def_bool PAHOLE_VERSION >= 119
+
+config PAHOLE_HAS_BTF_TAG
+ def_bool PAHOLE_VERSION >= 123
+ depends on CC_IS_CLANG
+ help
+ Decide whether pahole emits btf_tag attributes (btf_type_tag and
+ btf_decl_tag) or not. Currently only clang compiler implements
+ these attributes, so make the config depend on CC_IS_CLANG.
+
+config PAHOLE_HAS_LANG_EXCLUDE
+ def_bool PAHOLE_VERSION >= 124
+ help
+ Support for the --lang_exclude flag which makes pahole exclude
+ compilation units from the supplied language. Used in Kbuild to
+ omit Rust CUs which are not supported in version 1.24 of pahole,
+ otherwise it would emit malformed kernel and module binaries when
+ using DEBUG_INFO_BTF_MODULES.
+
+config DEBUG_INFO_BTF_MODULES
+ def_bool y
+ depends on DEBUG_INFO_BTF && MODULES && PAHOLE_HAS_SPLIT_BTF
+ help
+ Generate compact split BTF type information for kernel modules.
+
+config MODULE_ALLOW_BTF_MISMATCH
+ bool "Allow loading modules with non-matching BTF type info"
+ depends on DEBUG_INFO_BTF_MODULES
+ help
+ For modules whose split BTF does not match vmlinux, load without
+ BTF rather than refusing to load. The default behavior with
+ module BTF enabled is to reject modules with such mismatches;
+ this option will still load module BTF where possible but ignore
+ it when a mismatch is found.
+
config GDB_SCRIPTS
bool "Provide GDB scripts for kernel debugging"
- depends on DEBUG_INFO
help
This creates the required links to GDB helper scripts in the
build directory. If you load vmlinux into gdb, the helper
@@ -239,26 +433,22 @@ config GDB_SCRIPTS
instance. See Documentation/dev-tools/gdb-kernel-debugging.rst
for further details.
-config ENABLE_MUST_CHECK
- bool "Enable __must_check logic"
- default y
- help
- Enable the __must_check logic in the kernel build. Disable this to
- suppress the "warning: ignoring return value of 'foo', declared with
- attribute warn_unused_result" messages.
+endif # DEBUG_INFO
config FRAME_WARN
- int "Warn for stack frames larger than (needs gcc 4.4)"
+ int "Warn for stack frames larger than"
range 0 8192
+ default 0 if KMSAN
default 2048 if GCC_PLUGIN_LATENT_ENTROPY
- default 1280 if (!64BIT && PARISC)
- default 1024 if (!64BIT && !PARISC)
+ default 2048 if PARISC
+ default 1536 if (!64BIT && XTENSA)
+ default 1280 if KASAN && !64BIT
+ default 1024 if !64BIT
default 2048 if 64BIT
help
- Tell gcc to warn at build time for stack frames larger than this.
+ Tell the compiler to warn at build time for stack frames larger than this.
Setting this too low will cause a lot of warnings.
Setting it to 0 disables the warning.
- Requires gcc 4.4
config STRIP_ASM_SYMS
bool "Strip assembler-generated symbols during link"
@@ -269,72 +459,28 @@ config STRIP_ASM_SYMS
get_wchan() and suchlike.
config READABLE_ASM
- bool "Generate readable assembler code"
- depends on DEBUG_KERNEL
- help
- Disable some compiler optimizations that tend to generate human unreadable
- assembler output. This may make the kernel slightly slower, but it helps
- to keep kernel developers who have to stare a lot at assembler listings
- sane.
-
-config UNUSED_SYMBOLS
- bool "Enable unused/obsolete exported symbols"
- default y if X86
- help
- Unused but exported symbols make the kernel needlessly bigger. For
- that reason most of these unused exports will soon be removed. This
- option is provided temporarily to provide a transition period in case
- some external kernel module needs one of these symbols anyway. If you
- encounter such a case in your module, consider if you are actually
- using the right API. (rationale: since nobody in the kernel is using
- this in a module, there is a pretty good chance it's actually the
- wrong interface to use). If you really need the symbol, please send a
- mail to the linux kernel mailing list mentioning the symbol and why
- you really need it, and what the merge plan to the mainline kernel for
- your module is.
-
-config DEBUG_FS
- bool "Debug Filesystem"
+ bool "Generate readable assembler code"
+ depends on DEBUG_KERNEL
+ depends on CC_IS_GCC
help
- debugfs is a virtual file system that kernel developers use to put
- debugging files into. Enable this option to be able to read and
- write to these files.
-
- For detailed documentation on the debugfs API, see
- Documentation/filesystems/.
-
- If unsure, say N.
+ Disable some compiler optimizations that tend to generate human unreadable
+ assembler output. This may make the kernel slightly slower, but it helps
+ to keep kernel developers who have to stare a lot at assembler listings
+ sane.
-config HEADERS_CHECK
- bool "Run 'make headers_check' when building vmlinux"
+config HEADERS_INSTALL
+ bool "Install uapi headers to usr/include"
depends on !UML
help
- This option will extract the user-visible kernel headers whenever
- building the kernel, and will run basic sanity checks on them to
- ensure that exported files do not attempt to include files which
- were not exported, etc.
-
- If you're making modifications to header files which are
- relevant for userspace, say 'Y', and check the headers
- exported to $(INSTALL_HDR_PATH) (usually 'usr/include' in
- your build tree), to make sure they're suitable.
-
-config OPTIMIZE_INLINING
- bool "Allow compiler to uninline functions marked 'inline'"
- help
- This option determines if the kernel forces gcc to inline the functions
- developers have marked 'inline'. Doing so takes away freedom from gcc to
- do what it thinks is best, which is desirable for the gcc 3.x series of
- compilers. The gcc 4.x series have a rewritten inlining algorithm and
- enabling this option will generate a smaller kernel there. Hopefully
- this algorithm is so good that allowing gcc 4.x and above to make the
- decision will become the default in the future. Until then this option
- is there to test gcc for this.
-
- If unsure, say N.
+ This option will install uapi headers (headers exported to user-space)
+ into the usr/include directory for use during the kernel build.
+ This is unneeded for building the kernel itself, but needed for some
+ user-space program samples. It is also needed by some features such
+ as uapi header sanity checks.
config DEBUG_SECTION_MISMATCH
bool "Enable full Section mismatch analysis"
+ depends on CC_IS_GCC
help
The section mismatch analysis checks if there are illegal
references from one section to another section.
@@ -346,23 +492,13 @@ config DEBUG_SECTION_MISMATCH
which results in the code/data being placed in specific sections.
The section mismatch analysis is always performed after a full
kernel build, and enabling this option causes the following
- additional steps to occur:
+ additional step to occur:
- Add the option -fno-inline-functions-called-once to gcc commands.
When inlining a function annotated with __init in a non-init
function, we would lose the section information and thus
the analysis would not catch the illegal reference.
This option tells gcc to inline less (but it does result in
a larger kernel).
- - Run the section mismatch analysis for each module/built-in.a file.
- When we run the section mismatch analysis on vmlinux.o, we
- lose valuable information about where the mismatch was
- introduced.
- Running the analysis for each module/built-in.a file
- tells where the mismatch happens much closer to the
- source. The drawback is that the same mismatch is
- reported at least twice.
- - Enable verbose reporting from modpost in order to help resolve
- the section mismatches that are reported.
config SECTION_MISMATCH_WARN_ONLY
bool "Make section mismatch errors non-fatal"
@@ -373,6 +509,19 @@ config SECTION_MISMATCH_WARN_ONLY
If unsure, say Y.
+config DEBUG_FORCE_FUNCTION_ALIGN_64B
+ bool "Force all function address 64B aligned"
+ depends on EXPERT && (X86_64 || ARM64 || PPC32 || PPC64 || ARC || RISCV || S390)
+ select FUNCTION_ALIGNMENT_64B
+ help
+ There are cases that a commit from one domain changes the function
+ address alignment of other domains, and cause magic performance
+ bump (regression or improvement). Enable this option will help to
+ verify if the bump is caused by function alignment changes, while
+ it will slightly increase the kernel size and affect icache usage.
+
+ It is mainly for debug and performance tuning use.
+
#
# Select this config option from the architecture Kconfig, if it
# is preferred to always offer frame pointers as a config
@@ -390,20 +539,36 @@ config FRAME_POINTER
larger and slower, but it gives very useful debugging information
in case of kernel bugs. (precise oopses/stacktraces/warnings)
+config OBJTOOL
+ bool
+
config STACK_VALIDATION
bool "Compile-time stack metadata validation"
- depends on HAVE_STACK_VALIDATION
+ depends on HAVE_STACK_VALIDATION && UNWINDER_FRAME_POINTER
+ select OBJTOOL
default n
help
- Add compile-time checks to validate stack metadata, including frame
- pointers (if CONFIG_FRAME_POINTER is enabled). This helps ensure
- that runtime stack traces are more reliable.
-
- This is also a prerequisite for generation of ORC unwind data, which
- is needed for CONFIG_UNWINDER_ORC.
+ Validate frame pointer rules at compile-time. This helps ensure that
+ runtime stack traces are more reliable.
For more information, see
- tools/objtool/Documentation/stack-validation.txt.
+ tools/objtool/Documentation/objtool.txt.
+
+config NOINSTR_VALIDATION
+ bool
+ depends on HAVE_NOINSTR_VALIDATION && DEBUG_ENTRY
+ select OBJTOOL
+ default y
+
+config VMLINUX_MAP
+ bool "Generate vmlinux.map file when linking"
+ depends on EXPERT
+ help
+ Selecting this option will pass "-Map=vmlinux.map" to ld
+ when linking vmlinux. That file can be useful for verifying
+ and debugging magic section games, and for seeing which
+ pieces of code get eliminated with
+ CONFIG_LD_DEAD_CODE_DATA_ELIMINATION.
config DEBUG_FORCE_WEAK_PER_CPU
bool "Force weak per-cpu definitions"
@@ -422,6 +587,8 @@ config DEBUG_FORCE_WEAK_PER_CPU
endmenu # "Compiler options"
+menu "Generic Kernel Debugging Instruments"
+
config MAGIC_SYSRQ
bool "Magic SysRq key"
depends on !UML
@@ -455,20 +622,71 @@ config MAGIC_SYSRQ_SERIAL
This option allows you to decide whether you want to enable the
magic SysRq key.
-config DEBUG_KERNEL
- bool "Kernel debugging"
+config MAGIC_SYSRQ_SERIAL_SEQUENCE
+ string "Char sequence that enables magic SysRq over serial"
+ depends on MAGIC_SYSRQ_SERIAL
+ default ""
help
- Say Y here if you are developing drivers or trying to debug and
- identify kernel problems.
+ Specifies a sequence of characters that can follow BREAK to enable
+ SysRq on a serial console.
-config DEBUG_MISC
- bool "Miscellaneous debug code"
- default DEBUG_KERNEL
- depends on DEBUG_KERNEL
+ If unsure, leave an empty string and the option will not be enabled.
+
+config DEBUG_FS
+ bool "Debug Filesystem"
help
- Say Y here if you need to enable miscellaneous debug code that should
- be under a more specific debug option but isn't.
+ debugfs is a virtual file system that kernel developers use to put
+ debugging files into. Enable this option to be able to read and
+ write to these files.
+
+ For detailed documentation on the debugfs API, see
+ Documentation/filesystems/.
+
+ If unsure, say N.
+
+choice
+ prompt "Debugfs default access"
+ depends on DEBUG_FS
+ default DEBUG_FS_ALLOW_ALL
+ help
+ This selects the default access restrictions for debugfs.
+ It can be overridden with kernel command line option
+ debugfs=[on,no-mount,off]. The restrictions apply for API access
+ and filesystem registration.
+
+config DEBUG_FS_ALLOW_ALL
+ bool "Access normal"
+ help
+ No restrictions apply. Both API and filesystem registration
+ is on. This is the normal default operation.
+
+config DEBUG_FS_DISALLOW_MOUNT
+ bool "Do not register debugfs as filesystem"
+ help
+ The API is open but filesystem is not loaded. Clients can still do
+ their work and read with debug tools that do not need
+ debugfs filesystem.
+
+config DEBUG_FS_ALLOW_NONE
+ bool "No access"
+ help
+ Access is off. Clients get -PERM when trying to create nodes in
+ debugfs tree and debugfs is not registered as a filesystem.
+ Client can then back-off or continue without debugfs access.
+
+endchoice
+
+source "lib/Kconfig.kgdb"
+source "lib/Kconfig.ubsan"
+source "lib/Kconfig.kcsan"
+endmenu
+
+menu "Networking Debugging"
+
+source "net/Kconfig.debug"
+
+endmenu # "Networking Debugging"
menu "Memory Debugging"
@@ -529,139 +747,77 @@ config DEBUG_OBJECTS_PERCPU_COUNTER
config DEBUG_OBJECTS_ENABLE_DEFAULT
int "debug_objects bootup default value (0-1)"
- range 0 1
- default "1"
- depends on DEBUG_OBJECTS
- help
- Debug objects boot parameter default value
-
-config DEBUG_SLAB
- bool "Debug slab memory allocations"
- depends on DEBUG_KERNEL && SLAB
- help
- Say Y here to have the kernel do limited verification on memory
- allocation as well as poisoning memory on free to catch use of freed
- memory. This can make kmalloc/kfree-intensive workloads much slower.
-
-config SLUB_DEBUG_ON
- bool "SLUB debugging on by default"
- depends on SLUB && SLUB_DEBUG
- default n
- help
- Boot with debugging on by default. SLUB boots by default with
- the runtime debug capabilities switched off. Enabling this is
- equivalent to specifying the "slub_debug" parameter on boot.
- There is no support for more fine grained debug control like
- possible with slub_debug=xxx. SLUB debugging may be switched
- off in a kernel built with CONFIG_SLUB_DEBUG_ON by specifying
- "slub_debug=-".
-
-config SLUB_STATS
- default n
- bool "Enable SLUB performance statistics"
- depends on SLUB && SYSFS
- help
- SLUB statistics are useful to debug SLUBs allocation behavior in
- order find ways to optimize the allocator. This should never be
- enabled for production use since keeping statistics slows down
- the allocator by a few percentage points. The slabinfo command
- supports the determination of the most active slabs to figure
- out which slabs are relevant to a particular load.
- Try running: slabinfo -DA
-
-config HAVE_DEBUG_KMEMLEAK
- bool
-
-config DEBUG_KMEMLEAK
- bool "Kernel memory leak detector"
- depends on DEBUG_KERNEL && HAVE_DEBUG_KMEMLEAK
- select DEBUG_FS
- select STACKTRACE if STACKTRACE_SUPPORT
- select KALLSYMS
- select CRC32
- help
- Say Y here if you want to enable the memory leak
- detector. The memory allocation/freeing is traced in a way
- similar to the Boehm's conservative garbage collector, the
- difference being that the orphan objects are not freed but
- only shown in /sys/kernel/debug/kmemleak. Enabling this
- feature will introduce an overhead to memory
- allocations. See Documentation/dev-tools/kmemleak.rst for more
- details.
-
- Enabling DEBUG_SLAB or SLUB_DEBUG may increase the chances
- of finding leaks due to the slab objects poisoning.
-
- In order to access the kmemleak file, debugfs needs to be
- mounted (usually at /sys/kernel/debug).
-
-config DEBUG_KMEMLEAK_EARLY_LOG_SIZE
- int "Maximum kmemleak early log entries"
- depends on DEBUG_KMEMLEAK
- range 200 40000
- default 400
- help
- Kmemleak must track all the memory allocations to avoid
- reporting false positives. Since memory may be allocated or
- freed before kmemleak is initialised, an early log buffer is
- used to store these actions. If kmemleak reports "early log
- buffer exceeded", please increase this value.
-
-config DEBUG_KMEMLEAK_TEST
- tristate "Simple test for the kernel memory leak detector"
- depends on DEBUG_KMEMLEAK && m
- help
- This option enables a module that explicitly leaks memory.
-
- If unsure, say N.
-
-config DEBUG_KMEMLEAK_DEFAULT_OFF
- bool "Default kmemleak to off"
- depends on DEBUG_KMEMLEAK
+ range 0 1
+ default "1"
+ depends on DEBUG_OBJECTS
help
- Say Y here to disable kmemleak by default. It can then be enabled
- on the command line via kmemleak=on.
+ Debug objects boot parameter default value
-config DEBUG_KMEMLEAK_AUTO_SCAN
- bool "Enable kmemleak auto scan thread on boot up"
- default y
- depends on DEBUG_KMEMLEAK
+config SHRINKER_DEBUG
+ bool "Enable shrinker debugging support"
+ depends on DEBUG_FS
help
- Depending on the cpu, kmemleak scan may be cpu intensive and can
- stall user tasks at times. This option enables/disables automatic
- kmemleak scan at boot up.
-
- Say N here to disable kmemleak auto scan thread to stop automatic
- scanning. Disabling this option disables automatic reporting of
- memory leaks.
-
- If unsure, say Y.
+ Say Y to enable the shrinker debugfs interface which provides
+ visibility into the kernel memory shrinkers subsystem.
+ Disable it to avoid an extra memory footprint.
config DEBUG_STACK_USAGE
bool "Stack utilization instrumentation"
- depends on DEBUG_KERNEL && !IA64
+ depends on DEBUG_KERNEL
help
Enables the display of the minimum amount of free stack which each
task has ever had available in the sysrq-T and sysrq-P debug output.
+ Also emits a message to dmesg when a process exits if that process
+ used more stack space than previously exiting processes.
This option will slow down process creation somewhat.
+config SCHED_STACK_END_CHECK
+ bool "Detect stack corruption on calls to schedule()"
+ depends on DEBUG_KERNEL
+ default n
+ help
+ This option checks for a stack overrun on calls to schedule().
+ If the stack end location is found to be over written always panic as
+ the content of the corrupted region can no longer be trusted.
+ This is to ensure no erroneous behaviour occurs which could result in
+ data corruption or a sporadic crash at a later stage once the region
+ is examined. The runtime overhead introduced is minimal.
+
+config ARCH_HAS_DEBUG_VM_PGTABLE
+ bool
+ help
+ An architecture should select this when it can successfully
+ build and run DEBUG_VM_PGTABLE.
+
+config DEBUG_VM_IRQSOFF
+ def_bool DEBUG_VM && !PREEMPT_RT
+
config DEBUG_VM
bool "Debug VM"
depends on DEBUG_KERNEL
help
Enable this to turn on extended checks in the virtual-memory system
- that may impact performance.
+ that may impact performance.
+
+ If unsure, say N.
+
+config DEBUG_VM_SHOOT_LAZIES
+ bool "Debug MMU_LAZY_TLB_SHOOTDOWN implementation"
+ depends on DEBUG_VM
+ depends on MMU_LAZY_TLB_SHOOTDOWN
+ help
+ Enable additional IPIs that ensure lazy tlb mm references are removed
+ before the mm is freed.
If unsure, say N.
-config DEBUG_VM_VMACACHE
- bool "Debug VMA caching"
+config DEBUG_VM_MAPLE_TREE
+ bool "Debug VM maple trees"
depends on DEBUG_VM
+ select DEBUG_MAPLE_TREE
help
- Enable this to turn on VMA caching debug information. Doing so
- can cause significant overhead, so only enable it in non-production
- environments.
+ Enable VM maple tree debugging information and extra validations.
If unsure, say N.
@@ -681,6 +837,22 @@ config DEBUG_VM_PGFLAGS
If unsure, say N.
+config DEBUG_VM_PGTABLE
+ bool "Debug arch page table for semantics compliance"
+ depends on MMU
+ depends on ARCH_HAS_DEBUG_VM_PGTABLE
+ default y if DEBUG_VM
+ help
+ This option provides a debug method which can be used to test
+ architecture page table helper functions on various platforms in
+ verifying if they comply with expected generic MM semantics. This
+ will help architecture code in making sure that any changes or
+ new additions of these helpers still conform to expected
+ semantics of the generic MM. Platforms will have to opt in for
+ this through ARCH_HAS_DEBUG_VM_PGTABLE.
+
+ If unsure, say N.
+
config ARCH_HAS_DEBUG_VIRTUAL
bool
@@ -714,7 +886,7 @@ config DEBUG_MEMORY_INIT
config MEMORY_NOTIFIER_ERROR_INJECT
tristate "Memory hotplug notifier error injection module"
- depends on MEMORY_HOTPLUG_SPARSE && NOTIFIER_ERROR_INJECTION
+ depends on MEMORY_HOTPLUG && NOTIFIER_ERROR_INJECTION
help
This option provides the ability to inject artificial errors to
memory hotplug notifier chain callbacks. It is controlled through
@@ -746,9 +918,31 @@ config DEBUG_PER_CPU_MAPS
Say N if unsure.
+config DEBUG_KMAP_LOCAL
+ bool "Debug kmap_local temporary mappings"
+ depends on DEBUG_KERNEL && KMAP_LOCAL
+ help
+ This option enables additional error checking for the kmap_local
+ infrastructure. Disable for production use.
+
+config ARCH_SUPPORTS_KMAP_LOCAL_FORCE_MAP
+ bool
+
+config DEBUG_KMAP_LOCAL_FORCE_MAP
+ bool "Enforce kmap_local temporary mappings"
+ depends on DEBUG_KERNEL && ARCH_SUPPORTS_KMAP_LOCAL_FORCE_MAP
+ select KMAP_LOCAL
+ select DEBUG_KMAP_LOCAL
+ help
+ This option enforces temporary mappings through the kmap_local
+ mechanism for non-highmem pages and on non-highmem systems.
+ Disable this for production systems!
+
config DEBUG_HIGHMEM
bool "Highmem debugging"
depends on DEBUG_KERNEL && HIGHMEM
+ select DEBUG_KMAP_LOCAL_FORCE_MAP if ARCH_SUPPORTS_KMAP_LOCAL_FORCE_MAP
+ select DEBUG_KMAP_LOCAL
help
This option enables additional error checking for high memory
systems. Disable for production systems.
@@ -759,7 +953,7 @@ config HAVE_DEBUG_STACKOVERFLOW
config DEBUG_STACKOVERFLOW
bool "Check for stack overflows"
depends on DEBUG_KERNEL && HAVE_DEBUG_STACKOVERFLOW
- ---help---
+ help
Say Y here if you want to check for overflows of kernel, IRQ
and exception stacks (if your architecture uses them). This
option will show detailed messages if free stack space drops
@@ -775,66 +969,49 @@ config DEBUG_STACKOVERFLOW
If in doubt, say "N".
source "lib/Kconfig.kasan"
+source "lib/Kconfig.kfence"
+source "lib/Kconfig.kmsan"
endmenu # "Memory Debugging"
-config ARCH_HAS_KCOV
- bool
+config DEBUG_SHIRQ
+ bool "Debug shared IRQ handlers"
+ depends on DEBUG_KERNEL
help
- An architecture should select this when it can successfully
- build and run with CONFIG_KCOV. This typically requires
- disabling instrumentation for some early boot code.
+ Enable this to generate a spurious interrupt just before a shared
+ interrupt handler is deregistered (generating one when registering
+ is currently disabled). Drivers need to handle this correctly. Some
+ don't and need to be caught.
-config CC_HAS_SANCOV_TRACE_PC
- def_bool $(cc-option,-fsanitize-coverage=trace-pc)
+menu "Debug Oops, Lockups and Hangs"
-config KCOV
- bool "Code coverage for fuzzing"
- depends on ARCH_HAS_KCOV
- depends on CC_HAS_SANCOV_TRACE_PC || GCC_PLUGINS
- select DEBUG_FS
- select GCC_PLUGIN_SANCOV if !CC_HAS_SANCOV_TRACE_PC
+config PANIC_ON_OOPS
+ bool "Panic on Oops"
help
- KCOV exposes kernel code coverage information in a form suitable
- for coverage-guided fuzzing (randomized testing).
-
- If RANDOMIZE_BASE is enabled, PC values will not be stable across
- different machines and across reboots. If you need stable PC values,
- disable RANDOMIZE_BASE.
+ Say Y here to enable the kernel to panic when it oopses. This
+ has the same effect as setting oops=panic on the kernel command
+ line.
- For more details, see Documentation/dev-tools/kcov.rst.
+ This feature is useful to ensure that the kernel does not do
+ anything erroneous after an oops which could result in data
+ corruption or other issues.
-config KCOV_ENABLE_COMPARISONS
- bool "Enable comparison operands collection by KCOV"
- depends on KCOV
- depends on $(cc-option,-fsanitize-coverage=trace-cmp)
- help
- KCOV also exposes operands of every comparison in the instrumented
- code along with operand sizes and PCs of the comparison instructions.
- These operands can be used by fuzzing engines to improve the quality
- of fuzzing coverage.
+ Say N if unsure.
-config KCOV_INSTRUMENT_ALL
- bool "Instrument all code by default"
- depends on KCOV
- default y
- help
- If you are doing generic system call fuzzing (like e.g. syzkaller),
- then you will want to instrument the whole kernel and you should
- say y here. If you are doing more targeted fuzzing (like e.g.
- filesystem fuzzing with AFL) then you will want to enable coverage
- for more specific subsets of files, and should say n here.
+config PANIC_ON_OOPS_VALUE
+ int
+ range 0 1
+ default 0 if !PANIC_ON_OOPS
+ default 1 if PANIC_ON_OOPS
-config DEBUG_SHIRQ
- bool "Debug shared IRQ handlers"
- depends on DEBUG_KERNEL
+config PANIC_TIMEOUT
+ int "panic timeout"
+ default 0
help
- Enable this to generate a spurious interrupt as soon as a shared
- interrupt handler is registered, and just before one is deregistered.
- Drivers ought to be able to handle interrupts coming in at those
- points; some don't and need to be caught.
-
-menu "Debug Lockups and Hangs"
+ Set the timeout value (in seconds) until a reboot occurs when
+ the kernel panics. If n = 0, then we wait forever. A timeout
+ value n > 0 will wait n seconds before rebooting, while a timeout
+ value n < 0 will reboot immediately.
config LOCKUP_DETECTOR
bool
@@ -869,35 +1046,30 @@ config BOOTPARAM_SOFTLOCKUP_PANIC
Say N if unsure.
-config BOOTPARAM_SOFTLOCKUP_PANIC_VALUE
- int
- depends on SOFTLOCKUP_DETECTOR
- range 0 1
- default 0 if !BOOTPARAM_SOFTLOCKUP_PANIC
- default 1 if BOOTPARAM_SOFTLOCKUP_PANIC
-
-config HARDLOCKUP_DETECTOR_PERF
+config HAVE_HARDLOCKUP_DETECTOR_BUDDY
bool
- select SOFTLOCKUP_DETECTOR
+ depends on SMP
+ default y
#
-# Enables a timestamp based low pass filter to compensate for perf based
-# hard lockup detection which runs too fast due to turbo modes.
+# Global switch whether to build a hardlockup detector at all. It is available
+# only when the architecture supports at least one implementation. There are
+# two exceptions. The hardlockup detector is never enabled on:
#
-config HARDLOCKUP_CHECK_TIMESTAMP
- bool
-
+# s390: it reported many false positives there
#
-# arch/ can define HAVE_HARDLOCKUP_DETECTOR_ARCH to provide their own hard
-# lockup detector rather than the perf based detector.
+# sparc64: has a custom implementation which is not using the common
+# hardlockup command line options and sysctl interface.
#
config HARDLOCKUP_DETECTOR
bool "Detect Hard Lockups"
- depends on DEBUG_KERNEL && !S390
- depends on HAVE_HARDLOCKUP_DETECTOR_PERF || HAVE_HARDLOCKUP_DETECTOR_ARCH
+ depends on DEBUG_KERNEL && !S390 && !HARDLOCKUP_DETECTOR_SPARC64
+ depends on HAVE_HARDLOCKUP_DETECTOR_PERF || HAVE_HARDLOCKUP_DETECTOR_BUDDY || HAVE_HARDLOCKUP_DETECTOR_ARCH
+ imply HARDLOCKUP_DETECTOR_PERF
+ imply HARDLOCKUP_DETECTOR_BUDDY
+ imply HARDLOCKUP_DETECTOR_ARCH
select LOCKUP_DETECTOR
- select HARDLOCKUP_DETECTOR_PERF if HAVE_HARDLOCKUP_DETECTOR_PERF
- select HARDLOCKUP_DETECTOR_ARCH if HAVE_HARDLOCKUP_DETECTOR_ARCH
+
help
Say Y here to enable the kernel to act as a watchdog to detect
hard lockups.
@@ -907,6 +1079,63 @@ config HARDLOCKUP_DETECTOR
chance to run. The current stack trace is displayed upon detection
and the system will stay locked up.
+#
+# Note that arch-specific variants are always preferred.
+#
+config HARDLOCKUP_DETECTOR_PREFER_BUDDY
+ bool "Prefer the buddy CPU hardlockup detector"
+ depends on HARDLOCKUP_DETECTOR
+ depends on HAVE_HARDLOCKUP_DETECTOR_PERF && HAVE_HARDLOCKUP_DETECTOR_BUDDY
+ depends on !HAVE_HARDLOCKUP_DETECTOR_ARCH
+ help
+ Say Y here to prefer the buddy hardlockup detector over the perf one.
+
+ With the buddy detector, each CPU uses its softlockup hrtimer
+ to check that the next CPU is processing hrtimer interrupts by
+ verifying that a counter is increasing.
+
+ This hardlockup detector is useful on systems that don't have
+ an arch-specific hardlockup detector or if resources needed
+ for the hardlockup detector are better used for other things.
+
+config HARDLOCKUP_DETECTOR_PERF
+ bool
+ depends on HARDLOCKUP_DETECTOR
+ depends on HAVE_HARDLOCKUP_DETECTOR_PERF && !HARDLOCKUP_DETECTOR_PREFER_BUDDY
+ depends on !HAVE_HARDLOCKUP_DETECTOR_ARCH
+ select HARDLOCKUP_DETECTOR_COUNTS_HRTIMER
+
+config HARDLOCKUP_DETECTOR_BUDDY
+ bool
+ depends on HARDLOCKUP_DETECTOR
+ depends on HAVE_HARDLOCKUP_DETECTOR_BUDDY
+ depends on !HAVE_HARDLOCKUP_DETECTOR_PERF || HARDLOCKUP_DETECTOR_PREFER_BUDDY
+ depends on !HAVE_HARDLOCKUP_DETECTOR_ARCH
+ select HARDLOCKUP_DETECTOR_COUNTS_HRTIMER
+
+config HARDLOCKUP_DETECTOR_ARCH
+ bool
+ depends on HARDLOCKUP_DETECTOR
+ depends on HAVE_HARDLOCKUP_DETECTOR_ARCH
+ help
+ The arch-specific implementation of the hardlockup detector will
+ be used.
+
+#
+# Both the "perf" and "buddy" hardlockup detectors count hrtimer
+# interrupts. This config enables functions managing this common code.
+#
+config HARDLOCKUP_DETECTOR_COUNTS_HRTIMER
+ bool
+ select SOFTLOCKUP_DETECTOR
+
+#
+# Enables a timestamp based low pass filter to compensate for perf based
+# hard lockup detection which runs too fast due to turbo modes.
+#
+config HARDLOCKUP_CHECK_TIMESTAMP
+ bool
+
config BOOTPARAM_HARDLOCKUP_PANIC
bool "Panic (Reboot) On Hard Lockups"
depends on HARDLOCKUP_DETECTOR
@@ -918,13 +1147,6 @@ config BOOTPARAM_HARDLOCKUP_PANIC
Say N if unsure.
-config BOOTPARAM_HARDLOCKUP_PANIC_VALUE
- int
- depends on HARDLOCKUP_DETECTOR
- range 0 1
- default 0 if !BOOTPARAM_HARDLOCKUP_PANIC
- default 1 if BOOTPARAM_HARDLOCKUP_PANIC
-
config DETECT_HUNG_TASK
bool "Detect Hung Tasks"
depends on DEBUG_KERNEL
@@ -972,13 +1194,6 @@ config BOOTPARAM_HUNG_TASK_PANIC
Say N if unsure.
-config BOOTPARAM_HUNG_TASK_PANIC_VALUE
- int
- depends on DETECT_HUNG_TASK
- range 0 1
- default 0 if !BOOTPARAM_HUNG_TASK_PANIC
- default 1 if BOOTPARAM_HUNG_TASK_PANIC
-
config WQ_WATCHDOG
bool "Detect Workqueue Stalls"
depends on DEBUG_KERNEL
@@ -990,42 +1205,42 @@ config WQ_WATCHDOG
state. This can be configured through kernel parameter
"workqueue.watchdog_thresh" and its sysfs counterpart.
-endmenu # "Debug lockups and hangs"
-
-config PANIC_ON_OOPS
- bool "Panic on Oops"
+config WQ_CPU_INTENSIVE_REPORT
+ bool "Report per-cpu work items which hog CPU for too long"
+ depends on DEBUG_KERNEL
help
- Say Y here to enable the kernel to panic when it oopses. This
- has the same effect as setting oops=panic on the kernel command
- line.
+ Say Y here to enable reporting of concurrency-managed per-cpu work
+ items that hog CPUs for longer than
+ workqueue.cpu_intensive_thresh_us. Workqueue automatically
+ detects and excludes them from concurrency management to prevent
+ them from stalling other per-cpu work items. Occassional
+ triggering may not necessarily indicate a problem. Repeated
+ triggering likely indicates that the work item should be switched
+ to use an unbound workqueue.
+
+config TEST_LOCKUP
+ tristate "Test module to generate lockups"
+ depends on m
+ help
+ This builds the "test_lockup" module that helps to make sure
+ that watchdogs and lockup detectors are working properly.
- This feature is useful to ensure that the kernel does not do
- anything erroneous after an oops which could result in data
- corruption or other issues.
+ Depending on module parameters it could emulate soft or hard
+ lockup, "hung task", or locking arbitrary lock for a long time.
+ Also it could generate series of lockups with cooling-down periods.
- Say N if unsure.
+ If unsure, say N.
-config PANIC_ON_OOPS_VALUE
- int
- range 0 1
- default 0 if !PANIC_ON_OOPS
- default 1 if PANIC_ON_OOPS
+endmenu # "Debug lockups and hangs"
-config PANIC_TIMEOUT
- int "panic timeout"
- default 0
- help
- Set the timeout value (in seconds) until a reboot occurs when the
- the kernel panics. If n = 0, then we wait forever. A timeout
- value n > 0 will wait n seconds before rebooting, while a timeout
- value n < 0 will reboot immediately.
+menu "Scheduler Debugging"
config SCHED_DEBUG
bool "Collect scheduler debugging info"
- depends on DEBUG_KERNEL && PROC_FS
+ depends on DEBUG_KERNEL && DEBUG_FS
default y
help
- If you say Y here, the /proc/sched_debug file will be provided
+ If you say Y here, the /sys/kernel/debug/sched file will be provided
that can help debug the scheduler. The runtime overhead of this
option is minimal.
@@ -1046,17 +1261,7 @@ config SCHEDSTATS
application, you can say N to avoid the very slight overhead
this adds.
-config SCHED_STACK_END_CHECK
- bool "Detect stack corruption on calls to schedule()"
- depends on DEBUG_KERNEL
- default n
- help
- This option checks for a stack overrun on calls to schedule().
- If the stack end location is found to be over written always panic as
- the content of the corrupted region can no longer be trusted.
- This is to ensure no erroneous behaviour occurs which could result in
- data corruption or a sporadic crash at a later stage once the region
- is examined. The runtime overhead introduced is minimal.
+endmenu
config DEBUG_TIMEKEEPING
bool "Enable extra timekeeping sanity checking"
@@ -1073,14 +1278,17 @@ config DEBUG_TIMEKEEPING
config DEBUG_PREEMPT
bool "Debug preemptible kernel"
- depends on DEBUG_KERNEL && PREEMPT && TRACE_IRQFLAGS_SUPPORT
- default y
+ depends on DEBUG_KERNEL && PREEMPTION && TRACE_IRQFLAGS_SUPPORT
help
If you say Y here then the kernel will use a debug variant of the
commonly used smp_processor_id() function and will print warnings
if kernel code uses it in a preemption-unsafe way. Also, the kernel
will detect preemption count underflows.
+ This option has potential to introduce high runtime overhead,
+ depending on workload as it triggers debugging routines for each
+ this_cpu operation. It should only be used for debugging purposes.
+
menu "Lock Debugging (spinlocks, mutexes, etc...)"
config LOCK_DEBUGGING_SUPPORT
@@ -1093,11 +1301,12 @@ config PROVE_LOCKING
depends on DEBUG_KERNEL && LOCK_DEBUGGING_SUPPORT
select LOCKDEP
select DEBUG_SPINLOCK
- select DEBUG_MUTEXES
+ select DEBUG_MUTEXES if !PREEMPT_RT
select DEBUG_RT_MUTEXES if RT_MUTEXES
- select DEBUG_RWSEMS if RWSEM_SPIN_ON_OWNER
+ select DEBUG_RWSEMS
select DEBUG_WW_MUTEX_SLOWPATH
select DEBUG_LOCK_ALLOC
+ select PREEMPT_COUNT if !ARCH_NO_PREEMPT
select TRACE_IRQFLAGS
default n
help
@@ -1132,21 +1341,38 @@ config PROVE_LOCKING
the proof of observed correctness is also maintained for an
arbitrary combination of these separate locking variants.
- For more details, see Documentation/locking/lockdep-design.txt.
+ For more details, see Documentation/locking/lockdep-design.rst.
+
+config PROVE_RAW_LOCK_NESTING
+ bool "Enable raw_spinlock - spinlock nesting checks"
+ depends on PROVE_LOCKING
+ default n
+ help
+ Enable the raw_spinlock vs. spinlock nesting checks which ensure
+ that the lock nesting rules for PREEMPT_RT enabled kernels are
+ not violated.
+
+ NOTE: There are known nesting problems. So if you enable this
+ option expect lockdep splats until these problems have been fully
+ addressed which is work in progress. This config switch allows to
+ identify and analyze these problems. It will be removed and the
+ check permanently enabled once the main issues have been fixed.
+
+ If unsure, select N.
config LOCK_STAT
bool "Lock usage statistics"
depends on DEBUG_KERNEL && LOCK_DEBUGGING_SUPPORT
select LOCKDEP
select DEBUG_SPINLOCK
- select DEBUG_MUTEXES
+ select DEBUG_MUTEXES if !PREEMPT_RT
select DEBUG_RT_MUTEXES if RT_MUTEXES
select DEBUG_LOCK_ALLOC
default n
help
This feature enables tracking lock contention points
- For more details, see Documentation/locking/lockstat.txt
+ For more details, see Documentation/locking/lockstat.rst
This also enables lock events required by "perf lock",
subcommand of perf.
@@ -1175,7 +1401,7 @@ config DEBUG_SPINLOCK
config DEBUG_MUTEXES
bool "Mutex debugging: basic checks"
- depends on DEBUG_KERNEL
+ depends on DEBUG_KERNEL && !PREEMPT_RT
help
This feature allows mutex semantics violations to be detected and
reported.
@@ -1185,7 +1411,8 @@ config DEBUG_WW_MUTEX_SLOWPATH
depends on DEBUG_KERNEL && LOCK_DEBUGGING_SUPPORT
select DEBUG_LOCK_ALLOC
select DEBUG_SPINLOCK
- select DEBUG_MUTEXES
+ select DEBUG_MUTEXES if !PREEMPT_RT
+ select DEBUG_RT_MUTEXES if PREEMPT_RT
help
This feature enables slowpath testing for w/w mutex users by
injecting additional -EDEADLK wound/backoff cases. Together with
@@ -1199,16 +1426,16 @@ config DEBUG_WW_MUTEX_SLOWPATH
config DEBUG_RWSEMS
bool "RW Semaphore debugging: basic checks"
- depends on DEBUG_KERNEL && RWSEM_SPIN_ON_OWNER
+ depends on DEBUG_KERNEL
help
- This debugging feature allows mismatched rw semaphore locks and unlocks
- to be detected and reported.
+ This debugging feature allows mismatched rw semaphore locks
+ and unlocks to be detected and reported.
config DEBUG_LOCK_ALLOC
bool "Lock debugging: detect incorrect freeing of live locks"
depends on DEBUG_KERNEL && LOCK_DEBUGGING_SUPPORT
select DEBUG_SPINLOCK
- select DEBUG_MUTEXES
+ select DEBUG_MUTEXES if !PREEMPT_RT
select DEBUG_RT_MUTEXES if RT_MUTEXES
select LOCKDEP
help
@@ -1223,16 +1450,56 @@ config LOCKDEP
bool
depends on DEBUG_KERNEL && LOCK_DEBUGGING_SUPPORT
select STACKTRACE
- select FRAME_POINTER if !MIPS && !PPC && !ARM && !S390 && !MICROBLAZE && !ARC && !X86
select KALLSYMS
select KALLSYMS_ALL
config LOCKDEP_SMALL
bool
+config LOCKDEP_BITS
+ int "Bitsize for MAX_LOCKDEP_ENTRIES"
+ depends on LOCKDEP && !LOCKDEP_SMALL
+ range 10 30
+ default 15
+ help
+ Try increasing this value if you hit "BUG: MAX_LOCKDEP_ENTRIES too low!" message.
+
+config LOCKDEP_CHAINS_BITS
+ int "Bitsize for MAX_LOCKDEP_CHAINS"
+ depends on LOCKDEP && !LOCKDEP_SMALL
+ range 10 30
+ default 16
+ help
+ Try increasing this value if you hit "BUG: MAX_LOCKDEP_CHAINS too low!" message.
+
+config LOCKDEP_STACK_TRACE_BITS
+ int "Bitsize for MAX_STACK_TRACE_ENTRIES"
+ depends on LOCKDEP && !LOCKDEP_SMALL
+ range 10 30
+ default 19
+ help
+ Try increasing this value if you hit "BUG: MAX_STACK_TRACE_ENTRIES too low!" message.
+
+config LOCKDEP_STACK_TRACE_HASH_BITS
+ int "Bitsize for STACK_TRACE_HASH_SIZE"
+ depends on LOCKDEP && !LOCKDEP_SMALL
+ range 10 30
+ default 14
+ help
+ Try increasing this value if you need large STACK_TRACE_HASH_SIZE.
+
+config LOCKDEP_CIRCULAR_QUEUE_BITS
+ int "Bitsize for elements in circular_queue struct"
+ depends on LOCKDEP
+ range 10 30
+ default 12
+ help
+ Try increasing this value if you hit "lockdep bfs error:-1" warning due to __cq_enqueue() failure.
+
config DEBUG_LOCKDEP
bool "Lock dependency engine debugging"
depends on DEBUG_KERNEL && LOCKDEP
+ select DEBUG_IRQFLAGS
help
If you say Y here, the lock dependency engine will do
additional runtime checks to debug itself, at the price
@@ -1256,7 +1523,7 @@ config DEBUG_LOCKING_API_SELFTESTS
Say Y here if you want the kernel to run a short self-test during
bootup. The self-test checks whether common types of locking bugs
are detected by debugging mechanisms or not. (if you disable
- lock debugging then those bugs wont be detected of course.)
+ lock debugging then those bugs won't be detected of course.)
The following locking APIs are covered: spinlocks, rwlocks,
mutexes and rwsems.
@@ -1286,14 +1553,68 @@ config WW_MUTEX_SELFTEST
Say M if you want these self tests to build as a module.
Say N if you are unsure.
+config SCF_TORTURE_TEST
+ tristate "torture tests for smp_call_function*()"
+ depends on DEBUG_KERNEL
+ select TORTURE_TEST
+ help
+ This option provides a kernel module that runs torture tests
+ on the smp_call_function() family of primitives. The kernel
+ module may be built after the fact on the running kernel to
+ be tested, if desired.
+
+config CSD_LOCK_WAIT_DEBUG
+ bool "Debugging for csd_lock_wait(), called from smp_call_function*()"
+ depends on DEBUG_KERNEL
+ depends on 64BIT
+ default n
+ help
+ This option enables debug prints when CPUs are slow to respond
+ to the smp_call_function*() IPI wrappers. These debug prints
+ include the IPI handler function currently executing (if any)
+ and relevant stack traces.
+
+config CSD_LOCK_WAIT_DEBUG_DEFAULT
+ bool "Default csd_lock_wait() debugging on at boot time"
+ depends on CSD_LOCK_WAIT_DEBUG
+ depends on 64BIT
+ default n
+ help
+ This option causes the csdlock_debug= kernel boot parameter to
+ default to 1 (basic debugging) instead of 0 (no debugging).
+
endmenu # lock debugging
config TRACE_IRQFLAGS
+ depends on TRACE_IRQFLAGS_SUPPORT
bool
help
Enables hooks to interrupt enabling and disabling for
either tracing or lock debugging.
+config TRACE_IRQFLAGS_NMI
+ def_bool y
+ depends on TRACE_IRQFLAGS
+ depends on TRACE_IRQFLAGS_NMI_SUPPORT
+
+config NMI_CHECK_CPU
+ bool "Debugging for CPUs failing to respond to backtrace requests"
+ depends on DEBUG_KERNEL
+ depends on X86
+ default n
+ help
+ Enables debug prints when a CPU fails to respond to a given
+ backtrace NMI. These prints provide some reasons why a CPU
+ might legitimately be failing to respond, for example, if it
+ is offline of if ignore_nmis is set.
+
+config DEBUG_IRQFLAGS
+ bool "Debug IRQ flag manipulation"
+ help
+ Enables checks for potentially unsafe enabling or disabling of
+ interrupts, such as calling raw_local_irq_restore() when interrupts
+ are enabled.
+
config STACKTRACE
bool "Stack backtrace support"
depends on STACKTRACE_SUPPORT
@@ -1322,8 +1643,7 @@ config WARN_ALL_UNSEEDED_RANDOM
so architecture maintainers really need to do what they can
to get the CRNG seeded sooner after the system is booted.
However, since users cannot do anything actionable to
- address this, by default the kernel will issue only a single
- warning for the first use of unseeded randomness.
+ address this, by default this option is disabled.
Say Y here if you want to receive warnings for all uses of
unseeded randomness. This will be of use primarily for
@@ -1344,7 +1664,7 @@ config DEBUG_KOBJECT_RELEASE
help
kobjects are reference counted objects. This means that their
last reference count put is not predictable, and the kobject can
- live on past the point at which a driver decides to drop it's
+ live on past the point at which a driver decides to drop its
initial reference to the kobject gained on allocation. An
example of this would be a struct device which has just been
unregistered.
@@ -1360,21 +1680,19 @@ config DEBUG_KOBJECT_RELEASE
config HAVE_DEBUG_BUGVERBOSE
bool
-config DEBUG_BUGVERBOSE
- bool "Verbose BUG() reporting (adds 70K)" if DEBUG_KERNEL && EXPERT
- depends on BUG && (GENERIC_BUG || HAVE_DEBUG_BUGVERBOSE)
- default y
- help
- Say Y here to make BUG() panics output the file name and line number
- of the BUG call as well as the EIP and oops trace. This aids
- debugging but costs about 70-100K of memory.
+menu "Debug kernel data structures"
config DEBUG_LIST
bool "Debug linked list manipulation"
- depends on DEBUG_KERNEL || BUG_ON_DATA_CORRUPTION
+ depends on DEBUG_KERNEL
+ select LIST_HARDENED
help
- Enable this to turn on extended checks in the linked-list
- walking routines.
+ Enable this to turn on extended checks in the linked-list walking
+ routines.
+
+ This option trades better quality error reports for performance, and
+ is more suitable for kernel debugging. If you care about performance,
+ you should only enable CONFIG_LIST_HARDENED instead.
If unsure, say N.
@@ -1408,21 +1726,25 @@ config DEBUG_NOTIFIERS
This is a relatively cheap check but if you care about maximum
performance, say N.
-config DEBUG_CREDENTIALS
- bool "Debug credential management"
- depends on DEBUG_KERNEL
+config DEBUG_CLOSURES
+ bool "Debug closures (bcache async widgits)"
+ depends on CLOSURES
+ select DEBUG_FS
help
- Enable this to turn on some debug checking for credential
- management. The additional code keeps track of the number of
- pointers from task_structs to any given cred struct, and checks to
- see that this number never exceeds the usage count of the cred
- struct.
+ Keeps all active closures in a linked list and provides a debugfs
+ interface to list them, which makes it possible to see asynchronous
+ operations that get stuck.
- Furthermore, if SELinux is enabled, this also checks that the
- security pointer in the cred struct is never seen to be invalid.
+config DEBUG_MAPLE_TREE
+ bool "Debug maple trees"
+ depends on DEBUG_KERNEL
+ help
+ Enable maple tree debugging information and extra validations.
If unsure, say N.
+endmenu
+
source "kernel/rcu/Kconfig.debug"
config DEBUG_WQ_FORCE_RR_CPU
@@ -1440,33 +1762,6 @@ config DEBUG_WQ_FORCE_RR_CPU
feature by default. When enabled, memory and cache locality will
be impacted.
-config DEBUG_BLOCK_EXT_DEVT
- bool "Force extended block device numbers and spread them"
- depends on DEBUG_KERNEL
- depends on BLOCK
- default n
- help
- BIG FAT WARNING: ENABLING THIS OPTION MIGHT BREAK BOOTING ON
- SOME DISTRIBUTIONS. DO NOT ENABLE THIS UNLESS YOU KNOW WHAT
- YOU ARE DOING. Distros, please enable this and fix whatever
- is broken.
-
- Conventionally, block device numbers are allocated from
- predetermined contiguous area. However, extended block area
- may introduce non-contiguous block device numbers. This
- option forces most block device numbers to be allocated from
- the extended space and spreads them to discover kernel or
- userland code paths which assume predetermined contiguous
- device number allocation.
-
- Note that turning on this debug option shuffles all the
- device numbers for all IDE and SCSI devices including libata
- ones, so root partition specified using device number
- directly (via rdev or root=MAJ:MIN) won't work anymore.
- Textual device names (root=/dev/sdXn) will continue to work.
-
- Say N if you are unsure.
-
config CPU_HOTPLUG_STATE_CONTROL
bool "Enable CPU hotplug state control"
depends on DEBUG_KERNEL
@@ -1480,6 +1775,112 @@ config CPU_HOTPLUG_STATE_CONTROL
Say N if your are unsure.
+config LATENCYTOP
+ bool "Latency measuring infrastructure"
+ depends on DEBUG_KERNEL
+ depends on STACKTRACE_SUPPORT
+ depends on PROC_FS
+ depends on FRAME_POINTER || MIPS || PPC || S390 || MICROBLAZE || ARM || ARC || X86
+ select KALLSYMS
+ select KALLSYMS_ALL
+ select STACKTRACE
+ select SCHEDSTATS
+ help
+ Enable this option if you want to use the LatencyTOP tool
+ to find out which userspace is blocking on what kernel operations.
+
+config DEBUG_CGROUP_REF
+ bool "Disable inlining of cgroup css reference count functions"
+ depends on DEBUG_KERNEL
+ depends on CGROUPS
+ depends on KPROBES
+ default n
+ help
+ Force cgroup css reference count functions to not be inlined so
+ that they can be kprobed for debugging.
+
+source "kernel/trace/Kconfig"
+
+config PROVIDE_OHCI1394_DMA_INIT
+ bool "Remote debugging over FireWire early on boot"
+ depends on PCI && X86
+ help
+ If you want to debug problems which hang or crash the kernel early
+ on boot and the crashing machine has a FireWire port, you can use
+ this feature to remotely access the memory of the crashed machine
+ over FireWire. This employs remote DMA as part of the OHCI1394
+ specification which is now the standard for FireWire controllers.
+
+ With remote DMA, you can monitor the printk buffer remotely using
+ firescope and access all memory below 4GB using fireproxy from gdb.
+ Even controlling a kernel debugger is possible using remote DMA.
+
+ Usage:
+
+ If ohci1394_dma=early is used as boot parameter, it will initialize
+ all OHCI1394 controllers which are found in the PCI config space.
+
+ As all changes to the FireWire bus such as enabling and disabling
+ devices cause a bus reset and thereby disable remote DMA for all
+ devices, be sure to have the cable plugged and FireWire enabled on
+ the debugging host before booting the debug target for debugging.
+
+ This code (~1k) is freed after boot. By then, the firewire stack
+ in charge of the OHCI-1394 controllers should be used instead.
+
+ See Documentation/core-api/debugging-via-ohci1394.rst for more information.
+
+source "samples/Kconfig"
+
+config ARCH_HAS_DEVMEM_IS_ALLOWED
+ bool
+
+config STRICT_DEVMEM
+ bool "Filter access to /dev/mem"
+ depends on MMU && DEVMEM
+ depends on ARCH_HAS_DEVMEM_IS_ALLOWED || GENERIC_LIB_DEVMEM_IS_ALLOWED
+ default y if PPC || X86 || ARM64
+ help
+ If this option is disabled, you allow userspace (root) access to all
+ of memory, including kernel and userspace memory. Accidental
+ access to this is obviously disastrous, but specific access can
+ be used by people debugging the kernel. Note that with PAT support
+ enabled, even in this case there are restrictions on /dev/mem
+ use due to the cache aliasing requirements.
+
+ If this option is switched on, and IO_STRICT_DEVMEM=n, the /dev/mem
+ file only allows userspace access to PCI space and the BIOS code and
+ data regions. This is sufficient for dosemu and X and all common
+ users of /dev/mem.
+
+ If in doubt, say Y.
+
+config IO_STRICT_DEVMEM
+ bool "Filter I/O access to /dev/mem"
+ depends on STRICT_DEVMEM
+ help
+ If this option is disabled, you allow userspace (root) access to all
+ io-memory regardless of whether a driver is actively using that
+ range. Accidental access to this is obviously disastrous, but
+ specific access can be used by people debugging kernel drivers.
+
+ If this option is switched on, the /dev/mem file only allows
+ userspace access to *idle* io-memory ranges (see /proc/iomem) This
+ may break traditional users of /dev/mem (dosemu, legacy X, etc...)
+ if the driver using a given range cannot be disabled.
+
+ If in doubt, say Y.
+
+menu "$(SRCARCH) Debugging"
+
+source "arch/$(SRCARCH)/Kconfig.debug"
+
+endmenu
+
+menu "Kernel Testing and Coverage"
+
+source "lib/kunit/Kconfig"
+
config NOTIFIER_ERROR_INJECTION
tristate "Notifier error injection"
depends on DEBUG_KERNEL
@@ -1556,8 +1957,14 @@ config NETDEV_NOTIFIER_ERROR_INJECT
If unsure, say N.
config FUNCTION_ERROR_INJECTION
- def_bool y
+ bool "Fault-injections of functions"
depends on HAVE_FUNCTION_ERROR_INJECTION && KPROBES
+ help
+ Add fault injections into various functions that are annotated with
+ ALLOW_ERROR_INJECTION() in the kernel. BPF may also modify the return
+ value of these functions. This is useful to test error paths of code.
+
+ If unsure, say N
config FAULT_INJECTION
bool "Fault-injection framework"
@@ -1569,16 +1976,22 @@ config FAULT_INJECTION
config FAILSLAB
bool "Fault-injection capability for kmalloc"
depends on FAULT_INJECTION
- depends on SLAB || SLUB
help
Provide fault-injection capability for kmalloc.
config FAIL_PAGE_ALLOC
- bool "Fault-injection capabilitiy for alloc_pages()"
+ bool "Fault-injection capability for alloc_pages()"
depends on FAULT_INJECTION
help
Provide fault-injection capability for alloc_pages().
+config FAULT_INJECTION_USERCOPY
+ bool "Fault injection capability for usercopy functions"
+ depends on FAULT_INJECTION
+ help
+ Provides fault-injection capability to inject failures
+ in usercopy functions (copy_from_user(), get_user(), ...).
+
config FAIL_MAKE_REQUEST
bool "Fault-injection capability for disk IO"
depends on FAULT_INJECTION && BLOCK
@@ -1594,7 +2007,7 @@ config FAIL_IO_TIMEOUT
thus exercising the error handling.
Only works with drivers that use the generic timeout handling,
- for others it wont do anything.
+ for others it won't do anything.
config FAIL_FUTEX
bool "Fault-injection capability for futexes"
@@ -1629,60 +2042,88 @@ config FAIL_MMC_REQUEST
and to test how the mmc host driver handles retries from
the block device.
+config FAIL_SUNRPC
+ bool "Fault-injection capability for SunRPC"
+ depends on FAULT_INJECTION_DEBUG_FS && SUNRPC_DEBUG
+ help
+ Provide fault-injection capability for SunRPC and
+ its consumers.
+
+config FAULT_INJECTION_CONFIGFS
+ bool "Configfs interface for fault-injection capabilities"
+ depends on FAULT_INJECTION
+ select CONFIGFS_FS
+ help
+ This option allows configfs-based drivers to dynamically configure
+ fault-injection via configfs. Each parameter for driver-specific
+ fault-injection can be made visible as a configfs attribute in a
+ configfs group.
+
+
config FAULT_INJECTION_STACKTRACE_FILTER
bool "stacktrace filter for fault-injection capabilities"
- depends on FAULT_INJECTION_DEBUG_FS && STACKTRACE_SUPPORT
- depends on !X86_64
+ depends on FAULT_INJECTION
+ depends on (FAULT_INJECTION_DEBUG_FS || FAULT_INJECTION_CONFIGFS) && STACKTRACE_SUPPORT
select STACKTRACE
- select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM && !ARC && !X86
+ depends on FRAME_POINTER || MIPS || PPC || S390 || MICROBLAZE || ARM || ARC || X86
help
Provide stacktrace filter for fault-injection capabilities
-config LATENCYTOP
- bool "Latency measuring infrastructure"
- depends on DEBUG_KERNEL
- depends on STACKTRACE_SUPPORT
- depends on PROC_FS
- select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM && !ARC && !X86
- select KALLSYMS
- select KALLSYMS_ALL
- select STACKTRACE
- select SCHEDSTATS
- select SCHED_DEBUG
+config ARCH_HAS_KCOV
+ bool
help
- Enable this option if you want to use the LatencyTOP tool
- to find out which userspace is blocking on what kernel operations.
-
-source "kernel/trace/Kconfig"
+ An architecture should select this when it can successfully
+ build and run with CONFIG_KCOV. This typically requires
+ disabling instrumentation for some early boot code.
-config PROVIDE_OHCI1394_DMA_INIT
- bool "Remote debugging over FireWire early on boot"
- depends on PCI && X86
- help
- If you want to debug problems which hang or crash the kernel early
- on boot and the crashing machine has a FireWire port, you can use
- this feature to remotely access the memory of the crashed machine
- over FireWire. This employs remote DMA as part of the OHCI1394
- specification which is now the standard for FireWire controllers.
+config CC_HAS_SANCOV_TRACE_PC
+ def_bool $(cc-option,-fsanitize-coverage=trace-pc)
- With remote DMA, you can monitor the printk buffer remotely using
- firescope and access all memory below 4GB using fireproxy from gdb.
- Even controlling a kernel debugger is possible using remote DMA.
- Usage:
+config KCOV
+ bool "Code coverage for fuzzing"
+ depends on ARCH_HAS_KCOV
+ depends on CC_HAS_SANCOV_TRACE_PC || GCC_PLUGINS
+ depends on !ARCH_WANTS_NO_INSTR || HAVE_NOINSTR_HACK || \
+ GCC_VERSION >= 120000 || CLANG_VERSION >= 130000
+ select DEBUG_FS
+ select GCC_PLUGIN_SANCOV if !CC_HAS_SANCOV_TRACE_PC
+ select OBJTOOL if HAVE_NOINSTR_HACK
+ help
+ KCOV exposes kernel code coverage information in a form suitable
+ for coverage-guided fuzzing (randomized testing).
- If ohci1394_dma=early is used as boot parameter, it will initialize
- all OHCI1394 controllers which are found in the PCI config space.
+ For more details, see Documentation/dev-tools/kcov.rst.
- As all changes to the FireWire bus such as enabling and disabling
- devices cause a bus reset and thereby disable remote DMA for all
- devices, be sure to have the cable plugged and FireWire enabled on
- the debugging host before booting the debug target for debugging.
+config KCOV_ENABLE_COMPARISONS
+ bool "Enable comparison operands collection by KCOV"
+ depends on KCOV
+ depends on $(cc-option,-fsanitize-coverage=trace-cmp)
+ help
+ KCOV also exposes operands of every comparison in the instrumented
+ code along with operand sizes and PCs of the comparison instructions.
+ These operands can be used by fuzzing engines to improve the quality
+ of fuzzing coverage.
- This code (~1k) is freed after boot. By then, the firewire stack
- in charge of the OHCI-1394 controllers should be used instead.
+config KCOV_INSTRUMENT_ALL
+ bool "Instrument all code by default"
+ depends on KCOV
+ default y
+ help
+ If you are doing generic system call fuzzing (like e.g. syzkaller),
+ then you will want to instrument the whole kernel and you should
+ say y here. If you are doing more targeted fuzzing (like e.g.
+ filesystem fuzzing with AFL) then you will want to enable coverage
+ for more specific subsets of files, and should say n here.
- See Documentation/debugging-via-ohci1394.txt for more information.
+config KCOV_IRQ_AREA_SIZE
+ hex "Size of interrupt coverage collection area in words"
+ depends on KCOV
+ default 0x40000
+ help
+ KCOV uses preallocated per-cpu areas to collect coverage from
+ soft interrupts. This specifies the size of those areas in the
+ number of unsigned long words.
menuconfig RUNTIME_TESTING_MENU
bool "Runtime Testing"
@@ -1690,6 +2131,41 @@ menuconfig RUNTIME_TESTING_MENU
if RUNTIME_TESTING_MENU
+config TEST_DHRY
+ tristate "Dhrystone benchmark test"
+ help
+ Enable this to include the Dhrystone 2.1 benchmark. This test
+ calculates the number of Dhrystones per second, and the number of
+ DMIPS (Dhrystone MIPS) obtained when the Dhrystone score is divided
+ by 1757 (the number of Dhrystones per second obtained on the VAX
+ 11/780, nominally a 1 MIPS machine).
+
+ To run the benchmark, it needs to be enabled explicitly, either from
+ the kernel command line (when built-in), or from userspace (when
+ built-in or modular.
+
+ Run once during kernel boot:
+
+ test_dhry.run
+
+ Set number of iterations from kernel command line:
+
+ test_dhry.iterations=<n>
+
+ Set number of iterations from userspace:
+
+ echo <n> > /sys/module/test_dhry/parameters/iterations
+
+ Trigger manual run from userspace:
+
+ echo y > /sys/module/test_dhry/parameters/run
+
+ If the number of iterations is <= 0, the test will devise a suitable
+ number of iterations (test runs for at least 2s) automatically.
+ This process takes ca. 4s.
+
+ If unsure, say N.
+
config LKDTM
tristate "Linux Kernel Dump Test Tool Module"
depends on DEBUG_FS
@@ -1701,11 +2177,24 @@ config LKDTM
called lkdtm.
Documentation on how to use the module can be found in
- Documentation/fault-injection/provoke-crashes.txt
+ Documentation/fault-injection/provoke-crashes.rst
+
+config CPUMASK_KUNIT_TEST
+ tristate "KUnit test for cpumask" if !KUNIT_ALL_TESTS
+ depends on KUNIT
+ default KUNIT_ALL_TESTS
+ help
+ Enable to turn on cpumask tests, running at boot or module load time.
+
+ For more information on KUnit and unit tests in general, please refer
+ to the KUnit documentation in Documentation/dev-tools/kunit/.
+
+ If unsure, say N.
config TEST_LIST_SORT
- tristate "Linked list sorting test"
- depends on DEBUG_KERNEL || m
+ tristate "Linked list sorting test" if !KUNIT_ALL_TESTS
+ depends on KUNIT
+ default KUNIT_ALL_TESTS
help
Enable this to turn on 'list_sort()' function test. This test is
executed only once during system boot (so affects only boot time),
@@ -1713,19 +2202,55 @@ config TEST_LIST_SORT
If unsure, say N.
-config TEST_SORT
- tristate "Array-based sort test"
+config TEST_MIN_HEAP
+ tristate "Min heap test"
depends on DEBUG_KERNEL || m
help
+ Enable this to turn on min heap function tests. This test is
+ executed only once during system boot (so affects only boot time),
+ or at module load time.
+
+ If unsure, say N.
+
+config TEST_SORT
+ tristate "Array-based sort test" if !KUNIT_ALL_TESTS
+ depends on KUNIT
+ default KUNIT_ALL_TESTS
+ help
This option enables the self-test function of 'sort()' at boot,
or at module load time.
If unsure, say N.
+config TEST_DIV64
+ tristate "64bit/32bit division and modulo test"
+ depends on DEBUG_KERNEL || m
+ help
+ Enable this to turn on 'do_div()' function test. This test is
+ executed only once during system boot (so affects only boot time),
+ or at module load time.
+
+ If unsure, say N.
+
+config TEST_IOV_ITER
+ tristate "Test iov_iter operation" if !KUNIT_ALL_TESTS
+ depends on KUNIT
+ depends on MMU
+ default KUNIT_ALL_TESTS
+ help
+ Enable this to turn on testing of the operation of the I/O iterator
+ (iov_iter). This test is executed only once during system boot (so
+ affects only boot time), or at module load time.
+
+ If unsure, say N.
+
config KPROBES_SANITY_TEST
- bool "Kprobes sanity tests"
+ tristate "Kprobes sanity tests" if !KUNIT_ALL_TESTS
depends on DEBUG_KERNEL
depends on KPROBES
+ depends on KUNIT
+ select STACKTRACE if ARCH_CORRECT_STACKTRACE_ON_KRETPROBE
+ default KUNIT_ALL_TESTS
help
This option provides for testing basic kprobes functionality on
boot. Samples of kprobe and kretprobe are inserted and
@@ -1733,6 +2258,18 @@ config KPROBES_SANITY_TEST
Say N if you are unsure.
+config FPROBE_SANITY_TEST
+ bool "Self test for fprobe"
+ depends on DEBUG_KERNEL
+ depends on FPROBE
+ depends on KUNIT=y
+ help
+ This option will enable testing the fprobe when the system boot.
+ A series of tests are made to verify that the fprobe is functioning
+ properly.
+
+ Say N if you are unsure.
+
config BACKTRACE_SELF_TEST
tristate "Self test for the backtrace code"
depends on DEBUG_KERNEL
@@ -1747,6 +2284,16 @@ config BACKTRACE_SELF_TEST
Say N if you are unsure.
+config TEST_REF_TRACKER
+ tristate "Self test for reference tracker"
+ depends on DEBUG_KERNEL && STACKTRACE_SUPPORT
+ select REF_TRACKER
+ help
+ This option provides a kernel module performing tests
+ using reference tracker infrastructure.
+
+ Say N if you are unsure.
+
config RBTREE_TEST
tristate "Red-Black tree test"
depends on DEBUG_KERNEL
@@ -1754,6 +2301,18 @@ config RBTREE_TEST
A benchmark measuring the performance of the rbtree library.
Also includes rbtree invariant checks.
+config REED_SOLOMON_TEST
+ tristate "Reed-Solomon library test"
+ depends on DEBUG_KERNEL || m
+ select REED_SOLOMON
+ select REED_SOLOMON_ENC16
+ select REED_SOLOMON_DEC16
+ help
+ This option enables the self-test function of rslib at boot,
+ or at module load time.
+
+ If unsure, say N.
+
config INTERVAL_TREE_TEST
tristate "Interval tree test"
depends on DEBUG_KERNEL
@@ -1782,7 +2341,7 @@ config ASYNC_RAID6_TEST
tristate "Self test for hardware accelerated raid6 recovery"
depends on ASYNC_RAID6_RECOV
select ASYNC_MEMCPY
- ---help---
+ help
This is a one-shot self test that permutes through the
recovery of all the possible two disk failure scenarios for a
N-disk array. Recovery is performed with the asynchronous
@@ -1794,18 +2353,21 @@ config ASYNC_RAID6_TEST
config TEST_HEXDUMP
tristate "Test functions located in the hexdump module at runtime"
+config STRING_SELFTEST
+ tristate "Test string functions at runtime"
+
config TEST_STRING_HELPERS
tristate "Test functions located in the string_helpers module at runtime"
-config TEST_STRSCPY
- tristate "Test strscpy*() family of functions at runtime"
-
config TEST_KSTRTOX
tristate "Test kstrto*() family of functions at runtime"
config TEST_PRINTF
tristate "Test printf() family of functions at runtime"
+config TEST_SCANF
+ tristate "Test scanf() family of functions at runtime"
+
config TEST_BITMAP
tristate "Test bitmap_*() family of functions at runtime"
help
@@ -1813,21 +2375,20 @@ config TEST_BITMAP
If unsure, say N.
-config TEST_BITFIELD
- tristate "Test bitfield functions at runtime"
- help
- Enable this option to test the bitfield functions at boot.
-
- If unsure, say N.
-
config TEST_UUID
tristate "Test functions located in the uuid module at runtime"
config TEST_XARRAY
tristate "Test the XArray code at runtime"
-config TEST_OVERFLOW
- tristate "Test check_*_overflow() functions at runtime"
+config TEST_MAPLE_TREE
+ tristate "Test the Maple Tree code at runtime or module load"
+ help
+ Enable this option to test the maple tree code functions at boot, or
+ when the module is loaded. Enable "Debug Maple Trees" will enable
+ more verbose output on failures.
+
+ If unsure, say N.
config TEST_RHASHTABLE
tristate "Perform selftest on resizable hash table"
@@ -1836,16 +2397,6 @@ config TEST_RHASHTABLE
If unsure, say N.
-config TEST_HASH
- tristate "Perform selftest on hash functions"
- help
- Enable this option to test the kernel's integer (<linux/hash.h>),
- string (<linux/stringhash.h>), and siphash (<linux/siphash.h>)
- hash functions on boot (or module load).
-
- This is intended to help people writing architecture-specific
- optimized versions. If unsure, say N.
-
config TEST_IDA
tristate "Perform selftest on IDA functions"
@@ -1858,6 +2409,14 @@ config TEST_PARMAN
If unsure, say N.
+config TEST_IRQ_TIMINGS
+ bool "IRQ timings selftest"
+ depends on IRQ_TIMINGS
+ help
+ Enable this option to test the irq timings code on boot.
+
+ If unsure, say N.
+
config TEST_LKM
tristate "Test module loading with 'hello world' module"
depends on m
@@ -1871,6 +2430,19 @@ config TEST_LKM
If unsure, say N.
+config TEST_BITOPS
+ tristate "Test module for compilation of bitops operations"
+ depends on m
+ help
+ This builds the "test_bitops" module that is much like the
+ TEST_LKM module except that it does a basic exercise of the
+ set/clear_bit macros and get_count_order/long to make sure there are
+ no compiler warnings from C=1 sparse checker or -Wextra
+ compilations. It has no dependencies and doesn't run or load unless
+ explicitly requested by name. for example: modprobe test_bitops.
+
+ If unsure, say N.
+
config TEST_VMALLOC
tristate "Test module for stress/performance analysis of vmalloc allocator"
default n
@@ -1909,6 +2481,15 @@ config TEST_BPF
If unsure, say N.
+config TEST_BLACKHOLE_DEV
+ tristate "Test blackhole netdev functionality"
+ depends on m && NET
+ help
+ This builds the "test_blackhole_dev" module that validates the
+ data path through this blackhole netdev.
+
+ If unsure, say N.
+
config FIND_BIT_BENCHMARK
tristate "Test find_bit functions"
help
@@ -1939,6 +2520,273 @@ config TEST_SYSCTL
If unsure, say N.
+config BITFIELD_KUNIT
+ tristate "KUnit test bitfield functions at runtime" if !KUNIT_ALL_TESTS
+ depends on KUNIT
+ default KUNIT_ALL_TESTS
+ help
+ Enable this option to test the bitfield functions at boot.
+
+ KUnit tests run during boot and output the results to the debug log
+ in TAP format (http://testanything.org/). Only useful for kernel devs
+ running the KUnit test harness, and not intended for inclusion into a
+ production build.
+
+ For more information on KUnit and unit tests in general please refer
+ to the KUnit documentation in Documentation/dev-tools/kunit/.
+
+ If unsure, say N.
+
+config CHECKSUM_KUNIT
+ tristate "KUnit test checksum functions at runtime" if !KUNIT_ALL_TESTS
+ depends on KUNIT
+ default KUNIT_ALL_TESTS
+ help
+ Enable this option to test the checksum functions at boot.
+
+ KUnit tests run during boot and output the results to the debug log
+ in TAP format (http://testanything.org/). Only useful for kernel devs
+ running the KUnit test harness, and not intended for inclusion into a
+ production build.
+
+ For more information on KUnit and unit tests in general please refer
+ to the KUnit documentation in Documentation/dev-tools/kunit/.
+
+ If unsure, say N.
+
+config HASH_KUNIT_TEST
+ tristate "KUnit Test for integer hash functions" if !KUNIT_ALL_TESTS
+ depends on KUNIT
+ default KUNIT_ALL_TESTS
+ help
+ Enable this option to test the kernel's string (<linux/stringhash.h>), and
+ integer (<linux/hash.h>) hash functions on boot.
+
+ KUnit tests run during boot and output the results to the debug log
+ in TAP format (https://testanything.org/). Only useful for kernel devs
+ running the KUnit test harness, and not intended for inclusion into a
+ production build.
+
+ For more information on KUnit and unit tests in general please refer
+ to the KUnit documentation in Documentation/dev-tools/kunit/.
+
+ This is intended to help people writing architecture-specific
+ optimized versions. If unsure, say N.
+
+config RESOURCE_KUNIT_TEST
+ tristate "KUnit test for resource API" if !KUNIT_ALL_TESTS
+ depends on KUNIT
+ default KUNIT_ALL_TESTS
+ help
+ This builds the resource API unit test.
+ Tests the logic of API provided by resource.c and ioport.h.
+ For more information on KUnit and unit tests in general please refer
+ to the KUnit documentation in Documentation/dev-tools/kunit/.
+
+ If unsure, say N.
+
+config SYSCTL_KUNIT_TEST
+ tristate "KUnit test for sysctl" if !KUNIT_ALL_TESTS
+ depends on KUNIT
+ default KUNIT_ALL_TESTS
+ help
+ This builds the proc sysctl unit test, which runs on boot.
+ Tests the API contract and implementation correctness of sysctl.
+ For more information on KUnit and unit tests in general please refer
+ to the KUnit documentation in Documentation/dev-tools/kunit/.
+
+ If unsure, say N.
+
+config LIST_KUNIT_TEST
+ tristate "KUnit Test for Kernel Linked-list structures" if !KUNIT_ALL_TESTS
+ depends on KUNIT
+ default KUNIT_ALL_TESTS
+ help
+ This builds the linked list KUnit test suite.
+ It tests that the API and basic functionality of the list_head type
+ and associated macros.
+
+ KUnit tests run during boot and output the results to the debug log
+ in TAP format (https://testanything.org/). Only useful for kernel devs
+ running the KUnit test harness, and not intended for inclusion into a
+ production build.
+
+ For more information on KUnit and unit tests in general please refer
+ to the KUnit documentation in Documentation/dev-tools/kunit/.
+
+ If unsure, say N.
+
+config HASHTABLE_KUNIT_TEST
+ tristate "KUnit Test for Kernel Hashtable structures" if !KUNIT_ALL_TESTS
+ depends on KUNIT
+ default KUNIT_ALL_TESTS
+ help
+ This builds the hashtable KUnit test suite.
+ It tests the basic functionality of the API defined in
+ include/linux/hashtable.h. For more information on KUnit and
+ unit tests in general please refer to the KUnit documentation
+ in Documentation/dev-tools/kunit/.
+
+ If unsure, say N.
+
+config LINEAR_RANGES_TEST
+ tristate "KUnit test for linear_ranges"
+ depends on KUNIT
+ select LINEAR_RANGES
+ help
+ This builds the linear_ranges unit test, which runs on boot.
+ Tests the linear_ranges logic correctness.
+ For more information on KUnit and unit tests in general please refer
+ to the KUnit documentation in Documentation/dev-tools/kunit/.
+
+ If unsure, say N.
+
+config CMDLINE_KUNIT_TEST
+ tristate "KUnit test for cmdline API" if !KUNIT_ALL_TESTS
+ depends on KUNIT
+ default KUNIT_ALL_TESTS
+ help
+ This builds the cmdline API unit test.
+ Tests the logic of API provided by cmdline.c.
+ For more information on KUnit and unit tests in general please refer
+ to the KUnit documentation in Documentation/dev-tools/kunit/.
+
+ If unsure, say N.
+
+config BITS_TEST
+ tristate "KUnit test for bits.h" if !KUNIT_ALL_TESTS
+ depends on KUNIT
+ default KUNIT_ALL_TESTS
+ help
+ This builds the bits unit test.
+ Tests the logic of macros defined in bits.h.
+ For more information on KUnit and unit tests in general please refer
+ to the KUnit documentation in Documentation/dev-tools/kunit/.
+
+ If unsure, say N.
+
+config SLUB_KUNIT_TEST
+ tristate "KUnit test for SLUB cache error detection" if !KUNIT_ALL_TESTS
+ depends on SLUB_DEBUG && KUNIT
+ default KUNIT_ALL_TESTS
+ help
+ This builds SLUB allocator unit test.
+ Tests SLUB cache debugging functionality.
+ For more information on KUnit and unit tests in general please refer
+ to the KUnit documentation in Documentation/dev-tools/kunit/.
+
+ If unsure, say N.
+
+config RATIONAL_KUNIT_TEST
+ tristate "KUnit test for rational.c" if !KUNIT_ALL_TESTS
+ depends on KUNIT && RATIONAL
+ default KUNIT_ALL_TESTS
+ help
+ This builds the rational math unit test.
+ For more information on KUnit and unit tests in general please refer
+ to the KUnit documentation in Documentation/dev-tools/kunit/.
+
+ If unsure, say N.
+
+config MEMCPY_KUNIT_TEST
+ tristate "Test memcpy(), memmove(), and memset() functions at runtime" if !KUNIT_ALL_TESTS
+ depends on KUNIT
+ default KUNIT_ALL_TESTS
+ help
+ Builds unit tests for memcpy(), memmove(), and memset() functions.
+ For more information on KUnit and unit tests in general please refer
+ to the KUnit documentation in Documentation/dev-tools/kunit/.
+
+ If unsure, say N.
+
+config MEMCPY_SLOW_KUNIT_TEST
+ bool "Include exhaustive memcpy tests"
+ depends on MEMCPY_KUNIT_TEST
+ default y
+ help
+ Some memcpy tests are quite exhaustive in checking for overlaps
+ and bit ranges. These can be very slow, so they are split out
+ as a separate config, in case they need to be disabled.
+
+ Note this config option will be replaced by the use of KUnit test
+ attributes.
+
+config IS_SIGNED_TYPE_KUNIT_TEST
+ tristate "Test is_signed_type() macro" if !KUNIT_ALL_TESTS
+ depends on KUNIT
+ default KUNIT_ALL_TESTS
+ help
+ Builds unit tests for the is_signed_type() macro.
+
+ For more information on KUnit and unit tests in general please refer
+ to the KUnit documentation in Documentation/dev-tools/kunit/.
+
+ If unsure, say N.
+
+config OVERFLOW_KUNIT_TEST
+ tristate "Test check_*_overflow() functions at runtime" if !KUNIT_ALL_TESTS
+ depends on KUNIT
+ default KUNIT_ALL_TESTS
+ help
+ Builds unit tests for the check_*_overflow(), size_*(), allocation, and
+ related functions.
+
+ For more information on KUnit and unit tests in general please refer
+ to the KUnit documentation in Documentation/dev-tools/kunit/.
+
+ If unsure, say N.
+
+config STACKINIT_KUNIT_TEST
+ tristate "Test level of stack variable initialization" if !KUNIT_ALL_TESTS
+ depends on KUNIT
+ default KUNIT_ALL_TESTS
+ help
+ Test if the kernel is zero-initializing stack variables and
+ padding. Coverage is controlled by compiler flags,
+ CONFIG_INIT_STACK_ALL_PATTERN, CONFIG_INIT_STACK_ALL_ZERO,
+ CONFIG_GCC_PLUGIN_STRUCTLEAK, CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF,
+ or CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF_ALL.
+
+config FORTIFY_KUNIT_TEST
+ tristate "Test fortified str*() and mem*() function internals at runtime" if !KUNIT_ALL_TESTS
+ depends on KUNIT && FORTIFY_SOURCE
+ default KUNIT_ALL_TESTS
+ help
+ Builds unit tests for checking internals of FORTIFY_SOURCE as used
+ by the str*() and mem*() family of functions. For testing runtime
+ traps of FORTIFY_SOURCE, see LKDTM's "FORTIFY_*" tests.
+
+config HW_BREAKPOINT_KUNIT_TEST
+ bool "Test hw_breakpoint constraints accounting" if !KUNIT_ALL_TESTS
+ depends on HAVE_HW_BREAKPOINT
+ depends on KUNIT=y
+ default KUNIT_ALL_TESTS
+ help
+ Tests for hw_breakpoint constraints accounting.
+
+ If unsure, say N.
+
+config STRCAT_KUNIT_TEST
+ tristate "Test strcat() family of functions at runtime" if !KUNIT_ALL_TESTS
+ depends on KUNIT
+ default KUNIT_ALL_TESTS
+
+config STRSCPY_KUNIT_TEST
+ tristate "Test strscpy*() family of functions at runtime" if !KUNIT_ALL_TESTS
+ depends on KUNIT
+ default KUNIT_ALL_TESTS
+
+config SIPHASH_KUNIT_TEST
+ tristate "Perform selftest on siphash functions" if !KUNIT_ALL_TESTS
+ depends on KUNIT
+ default KUNIT_ALL_TESTS
+ help
+ Enable this option to test the kernel's siphash (<linux/siphash.h>) hash
+ functions on boot (or module load).
+
+ This is intended to help people writing architecture-specific
+ optimized versions. If unsure, say N.
+
config TEST_UDELAY
tristate "udelay test driver"
help
@@ -1955,11 +2803,22 @@ config TEST_STATIC_KEYS
If unsure, say N.
+config TEST_DYNAMIC_DEBUG
+ tristate "Test DYNAMIC_DEBUG"
+ depends on DYNAMIC_DEBUG
+ help
+ This module registers a tracer callback to count enabled
+ pr_debugs in a 'do_debugging' function, then alters their
+ enablements, calls the function, and compares counts.
+
+ If unsure, say N.
+
config TEST_KMOD
tristate "kmod stress tester"
depends on m
depends on NETDEVICES && NET_CORE && INET # for TUN
depends on BLOCK
+ depends on PAGE_SIZE_LESS_THAN_256KB # for BTRFS
select TEST_LKM
select XFS_FS
select TUN
@@ -2029,85 +2888,162 @@ config TEST_OBJAGG
Enable this option to test object aggregation manager on boot
(or module load).
+config TEST_MEMINIT
+ tristate "Test heap/page initialization"
+ help
+ Test if the kernel is zero-initializing heap and page allocations.
+ This can be useful to test init_on_alloc and init_on_free features.
-config TEST_STACKINIT
- tristate "Test level of stack variable initialization"
+ If unsure, say N.
+
+config TEST_HMM
+ tristate "Test HMM (Heterogeneous Memory Management)"
+ depends on TRANSPARENT_HUGEPAGE
+ depends on DEVICE_PRIVATE
+ select HMM_MIRROR
+ select MMU_NOTIFIER
help
- Test if the kernel is zero-initializing stack variables and
- padding. Coverage is controlled by compiler flags,
- CONFIG_GCC_PLUGIN_STRUCTLEAK, CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF,
- or CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF_ALL.
+ This is a pseudo device driver solely for testing HMM.
+ Say M here if you want to build the HMM test module.
+ Doing so will allow you to run tools/testing/selftest/vm/hmm-tests.
+
+ If unsure, say N.
+
+config TEST_FREE_PAGES
+ tristate "Test freeing pages"
+ help
+ Test that a memory leak does not occur due to a race between
+ freeing a block of pages and a speculative page reference.
+ Loading this module is safe if your kernel has the bug fixed.
+ If the bug is not fixed, it will leak gigabytes of memory and
+ probably OOM your system.
+
+config TEST_FPU
+ tristate "Test floating point operations in kernel space"
+ depends on X86 && !KCOV_INSTRUMENT_ALL
+ help
+ Enable this option to add /sys/kernel/debug/selftest_helpers/test_fpu
+ which will trigger a sequence of floating point operations. This is used
+ for self-testing floating point control register setting in
+ kernel_fpu_begin().
+
+ If unsure, say N.
+
+config TEST_CLOCKSOURCE_WATCHDOG
+ tristate "Test clocksource watchdog in kernel space"
+ depends on CLOCKSOURCE_WATCHDOG
+ help
+ Enable this option to create a kernel module that will trigger
+ a test of the clocksource watchdog. This module may be loaded
+ via modprobe or insmod in which case it will run upon being
+ loaded, or it may be built in, in which case it will run
+ shortly after boot.
+
+ If unsure, say N.
+
+config TEST_OBJPOOL
+ tristate "Test module for correctness and stress of objpool"
+ default n
+ depends on m && DEBUG_KERNEL
+ help
+ This builds the "test_objpool" module that should be used for
+ correctness verification and concurrent testings of objects
+ allocation and reclamation.
If unsure, say N.
endif # RUNTIME_TESTING_MENU
+config ARCH_USE_MEMTEST
+ bool
+ help
+ An architecture should select this when it uses early_memtest()
+ during boot process.
+
config MEMTEST
bool "Memtest"
- ---help---
+ depends on ARCH_USE_MEMTEST
+ help
This option adds a kernel parameter 'memtest', which allows memtest
- to be set.
+ to be set and executed.
memtest=0, mean disabled; -- default
memtest=1, mean do 1 test pattern;
...
memtest=17, mean do 17 test patterns.
If you are unsure how to answer this question, answer N.
-config BUG_ON_DATA_CORRUPTION
- bool "Trigger a BUG when data corruption is detected"
- select DEBUG_LIST
+
+
+config HYPERV_TESTING
+ bool "Microsoft Hyper-V driver testing"
+ default n
+ depends on HYPERV && DEBUG_FS
+ help
+ Select this option to enable Hyper-V vmbus testing.
+
+endmenu # "Kernel Testing and Coverage"
+
+menu "Rust hacking"
+
+config RUST_DEBUG_ASSERTIONS
+ bool "Debug assertions"
+ depends on RUST
help
- Select this option if the kernel should BUG when it encounters
- data corruption in kernel memory structures when they get checked
- for validity.
+ Enables rustc's `-Cdebug-assertions` codegen option.
+
+ This flag lets you turn `cfg(debug_assertions)` conditional
+ compilation on or off. This can be used to enable extra debugging
+ code in development but not in production. For example, it controls
+ the behavior of the standard library's `debug_assert!` macro.
+
+ Note that this will apply to all Rust code, including `core`.
If unsure, say N.
-source "samples/Kconfig"
+config RUST_OVERFLOW_CHECKS
+ bool "Overflow checks"
+ default y
+ depends on RUST
+ help
+ Enables rustc's `-Coverflow-checks` codegen option.
-source "lib/Kconfig.kgdb"
+ This flag allows you to control the behavior of runtime integer
+ overflow. When overflow-checks are enabled, a Rust panic will occur
+ on overflow.
-source "lib/Kconfig.ubsan"
+ Note that this will apply to all Rust code, including `core`.
-config ARCH_HAS_DEVMEM_IS_ALLOWED
- bool
+ If unsure, say Y.
-config STRICT_DEVMEM
- bool "Filter access to /dev/mem"
- depends on MMU && DEVMEM
- depends on ARCH_HAS_DEVMEM_IS_ALLOWED
- default y if PPC || X86 || ARM64
- ---help---
- If this option is disabled, you allow userspace (root) access to all
- of memory, including kernel and userspace memory. Accidental
- access to this is obviously disastrous, but specific access can
- be used by people debugging the kernel. Note that with PAT support
- enabled, even in this case there are restrictions on /dev/mem
- use due to the cache aliasing requirements.
+config RUST_BUILD_ASSERT_ALLOW
+ bool "Allow unoptimized build-time assertions"
+ depends on RUST
+ help
+ Controls how are `build_error!` and `build_assert!` handled during build.
- If this option is switched on, and IO_STRICT_DEVMEM=n, the /dev/mem
- file only allows userspace access to PCI space and the BIOS code and
- data regions. This is sufficient for dosemu and X and all common
- users of /dev/mem.
+ If calls to them exist in the binary, it may indicate a violated invariant
+ or that the optimizer failed to verify the invariant during compilation.
- If in doubt, say Y.
+ This should not happen, thus by default the build is aborted. However,
+ as an escape hatch, you can choose Y here to ignore them during build
+ and let the check be carried at runtime (with `panic!` being called if
+ the check fails).
-config IO_STRICT_DEVMEM
- bool "Filter I/O access to /dev/mem"
- depends on STRICT_DEVMEM
- ---help---
- If this option is disabled, you allow userspace (root) access to all
- io-memory regardless of whether a driver is actively using that
- range. Accidental access to this is obviously disastrous, but
- specific access can be used by people debugging kernel drivers.
+ If unsure, say N.
- If this option is switched on, the /dev/mem file only allows
- userspace access to *idle* io-memory ranges (see /proc/iomem) This
- may break traditional users of /dev/mem (dosemu, legacy X, etc...)
- if the driver using a given range cannot be disabled.
+config RUST_KERNEL_DOCTESTS
+ bool "Doctests for the `kernel` crate" if !KUNIT_ALL_TESTS
+ depends on RUST && KUNIT=y
+ default KUNIT_ALL_TESTS
+ help
+ This builds the documentation tests of the `kernel` crate
+ as KUnit tests.
- If in doubt, say Y.
+ For more information on KUnit and unit tests in general,
+ please refer to the KUnit documentation in Documentation/dev-tools/kunit/.
-source "arch/$(SRCARCH)/Kconfig.debug"
+ If unsure, say N.
+
+endmenu # "Rust"
endmenu # Kernel hacking
diff --git a/lib/Kconfig.kasan b/lib/Kconfig.kasan
index 4fafba1a923b..e6eda054ab27 100644
--- a/lib/Kconfig.kasan
+++ b/lib/Kconfig.kasan
@@ -1,4 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-only
+
# This config refers to the generic KASAN mode.
config HAVE_ARCH_KASAN
bool
@@ -6,139 +7,220 @@ config HAVE_ARCH_KASAN
config HAVE_ARCH_KASAN_SW_TAGS
bool
+config HAVE_ARCH_KASAN_HW_TAGS
+ bool
+
+config HAVE_ARCH_KASAN_VMALLOC
+ bool
+
+config ARCH_DISABLE_KASAN_INLINE
+ bool
+ help
+ Disables both inline and stack instrumentation. Selected by
+ architectures that do not support these instrumentation types.
+
config CC_HAS_KASAN_GENERIC
def_bool $(cc-option, -fsanitize=kernel-address)
config CC_HAS_KASAN_SW_TAGS
def_bool $(cc-option, -fsanitize=kernel-hwaddress)
-config KASAN
- bool "KASAN: runtime memory debugger"
- depends on (HAVE_ARCH_KASAN && CC_HAS_KASAN_GENERIC) || \
- (HAVE_ARCH_KASAN_SW_TAGS && CC_HAS_KASAN_SW_TAGS)
- depends on (SLUB && SYSFS) || (SLAB && !DEBUG_SLAB)
+# This option is only required for software KASAN modes.
+# Old GCC versions do not have proper support for no_sanitize_address.
+# See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=89124 for details.
+config CC_HAS_WORKING_NOSANITIZE_ADDRESS
+ def_bool !CC_IS_GCC || GCC_VERSION >= 80300
+
+menuconfig KASAN
+ bool "KASAN: dynamic memory safety error detector"
+ depends on (((HAVE_ARCH_KASAN && CC_HAS_KASAN_GENERIC) || \
+ (HAVE_ARCH_KASAN_SW_TAGS && CC_HAS_KASAN_SW_TAGS)) && \
+ CC_HAS_WORKING_NOSANITIZE_ADDRESS) || \
+ HAVE_ARCH_KASAN_HW_TAGS
+ depends on SYSFS && !SLUB_TINY
+ select STACKDEPOT_ALWAYS_INIT
help
- Enables KASAN (KernelAddressSANitizer) - runtime memory debugger,
- designed to find out-of-bounds accesses and use-after-free bugs.
+ Enables KASAN (Kernel Address Sanitizer) - a dynamic memory safety
+ error detector designed to find out-of-bounds and use-after-free bugs.
+
See Documentation/dev-tools/kasan.rst for details.
+ For better error reports, also enable CONFIG_STACKTRACE.
+
+if KASAN
+
+config CC_HAS_KASAN_MEMINTRINSIC_PREFIX
+ def_bool (CC_IS_CLANG && $(cc-option,-fsanitize=kernel-address -mllvm -asan-kernel-mem-intrinsic-prefix=1)) || \
+ (CC_IS_GCC && $(cc-option,-fsanitize=kernel-address --param asan-kernel-mem-intrinsic-prefix=1))
+ # Don't define it if we don't need it: compilation of the test uses
+ # this variable to decide how the compiler should treat builtins.
+ depends on !KASAN_HW_TAGS
+ help
+ The compiler is able to prefix memintrinsics with __asan or __hwasan.
+
choice
prompt "KASAN mode"
- depends on KASAN
default KASAN_GENERIC
help
- KASAN has two modes: generic KASAN (similar to userspace ASan,
- x86_64/arm64/xtensa, enabled with CONFIG_KASAN_GENERIC) and
- software tag-based KASAN (a version based on software memory
- tagging, arm64 only, similar to userspace HWASan, enabled with
- CONFIG_KASAN_SW_TAGS).
- Both generic and tag-based KASAN are strictly debugging features.
+ KASAN has three modes:
+
+ 1. Generic KASAN (supported by many architectures, enabled with
+ CONFIG_KASAN_GENERIC, similar to userspace ASan),
+ 2. Software Tag-Based KASAN (arm64 only, based on software memory
+ tagging, enabled with CONFIG_KASAN_SW_TAGS, similar to userspace
+ HWASan), and
+ 3. Hardware Tag-Based KASAN (arm64 only, based on hardware memory
+ tagging, enabled with CONFIG_KASAN_HW_TAGS).
+
+ See Documentation/dev-tools/kasan.rst for details about each mode.
config KASAN_GENERIC
- bool "Generic mode"
+ bool "Generic KASAN"
depends on HAVE_ARCH_KASAN && CC_HAS_KASAN_GENERIC
- depends on (SLUB && SYSFS) || (SLAB && !DEBUG_SLAB)
- select SLUB_DEBUG if SLUB
+ depends on CC_HAS_WORKING_NOSANITIZE_ADDRESS
+ select SLUB_DEBUG
select CONSTRUCTORS
- select STACKDEPOT
help
- Enables generic KASAN mode.
- Supported in both GCC and Clang. With GCC it requires version 4.9.2
- or later for basic support and version 5.0 or later for detection of
- out-of-bounds accesses for stack and global variables and for inline
- instrumentation mode (CONFIG_KASAN_INLINE). With Clang it requires
- version 3.7.0 or later and it doesn't support detection of
- out-of-bounds accesses for global variables yet.
- This mode consumes about 1/8th of available memory at kernel start
- and introduces an overhead of ~x1.5 for the rest of the allocations.
+ Enables Generic KASAN.
+
+ Requires GCC 8.3.0+ or Clang.
+
+ Consumes about 1/8th of available memory at kernel start and adds an
+ overhead of ~50% for dynamic allocations.
The performance slowdown is ~x3.
- For better error detection enable CONFIG_STACKTRACE.
- Currently CONFIG_KASAN_GENERIC doesn't work with CONFIG_DEBUG_SLAB
- (the resulting kernel does not boot).
config KASAN_SW_TAGS
- bool "Software tag-based mode"
+ bool "Software Tag-Based KASAN"
depends on HAVE_ARCH_KASAN_SW_TAGS && CC_HAS_KASAN_SW_TAGS
- depends on (SLUB && SYSFS) || (SLAB && !DEBUG_SLAB)
- select SLUB_DEBUG if SLUB
+ depends on CC_HAS_WORKING_NOSANITIZE_ADDRESS
+ select SLUB_DEBUG
select CONSTRUCTORS
- select STACKDEPOT
help
- Enables software tag-based KASAN mode.
- This mode requires Top Byte Ignore support by the CPU and therefore
- is only supported for arm64.
- This mode requires Clang version 7.0.0 or later.
- This mode consumes about 1/16th of available memory at kernel start
- and introduces an overhead of ~20% for the rest of the allocations.
- This mode may potentially introduce problems relating to pointer
- casting and comparison, as it embeds tags into the top byte of each
- pointer.
- For better error detection enable CONFIG_STACKTRACE.
- Currently CONFIG_KASAN_SW_TAGS doesn't work with CONFIG_DEBUG_SLAB
- (the resulting kernel does not boot).
+ Enables Software Tag-Based KASAN.
+
+ Requires GCC 11+ or Clang.
+
+ Supported only on arm64 CPUs and relies on Top Byte Ignore.
+
+ Consumes about 1/16th of available memory at kernel start and
+ add an overhead of ~20% for dynamic allocations.
+
+ May potentially introduce problems related to pointer casting and
+ comparison, as it embeds a tag into the top byte of each pointer.
+
+config KASAN_HW_TAGS
+ bool "Hardware Tag-Based KASAN"
+ depends on HAVE_ARCH_KASAN_HW_TAGS
+ help
+ Enables Hardware Tag-Based KASAN.
+
+ Requires GCC 10+ or Clang 12+.
+
+ Supported only on arm64 CPUs starting from ARMv8.5 and relies on
+ Memory Tagging Extension and Top Byte Ignore.
+
+ Consumes about 1/32nd of available memory.
+
+ May potentially introduce problems related to pointer casting and
+ comparison, as it embeds a tag into the top byte of each pointer.
endchoice
choice
prompt "Instrumentation type"
- depends on KASAN
- default KASAN_OUTLINE
+ depends on KASAN_GENERIC || KASAN_SW_TAGS
+ default KASAN_INLINE if !ARCH_DISABLE_KASAN_INLINE
config KASAN_OUTLINE
bool "Outline instrumentation"
help
- Before every memory access compiler insert function call
- __asan_load*/__asan_store*. These functions performs check
- of shadow memory. This is slower than inline instrumentation,
- however it doesn't bloat size of kernel's .text section so
- much as inline does.
+ Makes the compiler insert function calls that check whether the memory
+ is accessible before each memory access. Slower than KASAN_INLINE, but
+ does not bloat the size of the kernel's .text section so much.
config KASAN_INLINE
bool "Inline instrumentation"
+ depends on !ARCH_DISABLE_KASAN_INLINE
help
- Compiler directly inserts code checking shadow memory before
- memory accesses. This is faster than outline (in some workloads
- it gives about x2 boost over outline instrumentation), but
- make kernel's .text size much bigger.
- For CONFIG_KASAN_GENERIC this requires GCC 5.0 or later.
+ Makes the compiler directly insert memory accessibility checks before
+ each memory access. Faster than KASAN_OUTLINE (gives ~x2 boost for
+ some workloads), but makes the kernel's .text size much bigger.
endchoice
-config KASAN_STACK_ENABLE
- bool "Enable stack instrumentation (unsafe)" if CC_IS_CLANG && !COMPILE_TEST
- default !(CLANG_VERSION < 90000)
- depends on KASAN
+config KASAN_STACK
+ bool "Stack instrumentation (unsafe)" if CC_IS_CLANG && !COMPILE_TEST
+ depends on KASAN_GENERIC || KASAN_SW_TAGS
+ depends on !ARCH_DISABLE_KASAN_INLINE
+ default y if CC_IS_GCC
help
- The LLVM stack address sanitizer has a know problem that
- causes excessive stack usage in a lot of functions, see
- https://bugs.llvm.org/show_bug.cgi?id=38809
- Disabling asan-stack makes it safe to run kernels build
- with clang-8 with KASAN enabled, though it loses some of
- the functionality.
- This feature is always disabled when compile-testing with clang-8
- or earlier to avoid cluttering the output in stack overflow
- warnings, but clang-8 users can still enable it for builds without
- CONFIG_COMPILE_TEST. On gcc and later clang versions it is
- assumed to always be safe to use and enabled by default.
+ Disables stack instrumentation and thus KASAN's ability to detect
+ out-of-bounds bugs in stack variables.
-config KASAN_STACK
- int
- default 1 if KASAN_STACK_ENABLE || CC_IS_GCC
- default 0
+ With Clang, stack instrumentation has a problem that causes excessive
+ stack usage, see https://bugs.llvm.org/show_bug.cgi?id=38809. Thus,
+ with Clang, this option is deemed unsafe.
+
+ This option is always disabled when compile-testing with Clang to
+ avoid cluttering the log with stack overflow warnings.
+
+ With GCC, enabling stack instrumentation is assumed to be safe.
+
+ If the architecture disables inline instrumentation via
+ ARCH_DISABLE_KASAN_INLINE, stack instrumentation gets disabled
+ as well, as it adds inline-style instrumentation that is run
+ unconditionally.
+
+config KASAN_VMALLOC
+ bool "Check accesses to vmalloc allocations"
+ depends on HAVE_ARCH_KASAN_VMALLOC
+ help
+ Makes KASAN check the validity of accesses to vmalloc allocations.
+
+ With software KASAN modes, all types vmalloc allocations are
+ checked. Enabling this option leads to higher memory usage.
+
+ With Hardware Tag-Based KASAN, only non-executable VM_ALLOC mappings
+ are checked. There is no additional memory usage.
+
+config KASAN_KUNIT_TEST
+ tristate "KUnit-compatible tests of KASAN bug detection capabilities" if !KUNIT_ALL_TESTS
+ depends on KASAN && KUNIT && TRACEPOINTS
+ default KUNIT_ALL_TESTS
+ help
+ A KUnit-based KASAN test suite. Triggers different kinds of
+ out-of-bounds and use-after-free accesses. Useful for testing whether
+ KASAN can detect certain bug types.
+
+ For more information on KUnit and unit tests in general, please refer
+ to the KUnit documentation in Documentation/dev-tools/kunit/.
-config KASAN_S390_4_LEVEL_PAGING
- bool "KASan: use 4-level paging"
- depends on KASAN && S390
+config KASAN_MODULE_TEST
+ tristate "KUnit-incompatible tests of KASAN bug detection capabilities"
+ depends on m && KASAN && !KASAN_HW_TAGS
help
- Compiling the kernel with KASan disables automatic 3-level vs
- 4-level paging selection. 3-level paging is used by default (up
- to 3TB of RAM with KASan enabled). This options allows to force
- 4-level paging instead.
-
-config TEST_KASAN
- tristate "Module for testing KASAN for bug detection"
- depends on m && KASAN
+ A part of the KASAN test suite that is not integrated with KUnit.
+ Incompatible with Hardware Tag-Based KASAN.
+
+config KASAN_EXTRA_INFO
+ bool "Record and report more information"
+ depends on KASAN
help
- This is a test module doing various nasty things like
- out of bounds accesses, use after free. It is useful for testing
- kernel debugging features like KASAN.
+ Record and report more information to help us find the cause of the
+ bug and to help us correlate the error with other system events.
+
+ Currently, the CPU number and timestamp are additionally
+ recorded for each heap block at allocation and free time, and
+ 8 bytes will be added to each metadata structure that records
+ allocation or free information.
+
+ In Generic KASAN, each kmalloc-8 and kmalloc-16 object will add
+ 16 bytes of additional memory consumption, and each kmalloc-32
+ object will add 8 bytes of additional memory consumption, not
+ affecting other larger objects.
+
+ In SW_TAGS KASAN and HW_TAGS KASAN, depending on the stack_ring_size
+ boot parameter, it will add 8 * stack_ring_size bytes of additional
+ memory consumption.
+
+endif # KASAN
diff --git a/lib/Kconfig.kcsan b/lib/Kconfig.kcsan
new file mode 100644
index 000000000000..609ddfc73de5
--- /dev/null
+++ b/lib/Kconfig.kcsan
@@ -0,0 +1,257 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+config HAVE_ARCH_KCSAN
+ bool
+
+config HAVE_KCSAN_COMPILER
+ def_bool (CC_IS_CLANG && $(cc-option,-fsanitize=thread -mllvm -tsan-distinguish-volatile=1)) || \
+ (CC_IS_GCC && $(cc-option,-fsanitize=thread --param tsan-distinguish-volatile=1))
+ help
+ For the list of compilers that support KCSAN, please see
+ <file:Documentation/dev-tools/kcsan.rst>.
+
+menuconfig KCSAN
+ bool "KCSAN: dynamic data race detector"
+ depends on HAVE_ARCH_KCSAN && HAVE_KCSAN_COMPILER
+ depends on DEBUG_KERNEL && !KASAN
+ select CONSTRUCTORS
+ select STACKTRACE
+ help
+ The Kernel Concurrency Sanitizer (KCSAN) is a dynamic
+ data-race detector that relies on compile-time instrumentation.
+ KCSAN uses a watchpoint-based sampling approach to detect races.
+
+ While KCSAN's primary purpose is to detect data races, it
+ also provides assertions to check data access constraints.
+ These assertions can expose bugs that do not manifest as
+ data races.
+
+ See <file:Documentation/dev-tools/kcsan.rst> for more details.
+
+if KCSAN
+
+config CC_HAS_TSAN_COMPOUND_READ_BEFORE_WRITE
+ def_bool (CC_IS_CLANG && $(cc-option,-fsanitize=thread -mllvm -tsan-compound-read-before-write=1)) || \
+ (CC_IS_GCC && $(cc-option,-fsanitize=thread --param tsan-compound-read-before-write=1))
+ help
+ The compiler instruments plain compound read-write operations
+ differently (++, --, +=, -=, |=, &=, etc.), which allows KCSAN to
+ distinguish them from other plain accesses. This is currently
+ supported by Clang 12 or later.
+
+config KCSAN_VERBOSE
+ bool "Show verbose reports with more information about system state"
+ depends on PROVE_LOCKING
+ help
+ If enabled, reports show more information about the system state that
+ may help better analyze and debug races. This includes held locks and
+ IRQ trace events.
+
+ While this option should generally be benign, we call into more
+ external functions on report generation; if a race report is
+ generated from any one of them, system stability may suffer due to
+ deadlocks or recursion. If in doubt, say N.
+
+config KCSAN_SELFTEST
+ bool "Perform short selftests on boot"
+ default y
+ help
+ Run KCSAN selftests on boot. On test failure, causes the kernel to
+ panic. Recommended to be enabled, ensuring critical functionality
+ works as intended.
+
+config KCSAN_KUNIT_TEST
+ tristate "KCSAN test for integrated runtime behaviour" if !KUNIT_ALL_TESTS
+ default KUNIT_ALL_TESTS
+ depends on TRACEPOINTS && KUNIT
+ select TORTURE_TEST
+ help
+ KCSAN test focusing on behaviour of the integrated runtime. Tests
+ various race scenarios, and verifies the reports generated to
+ console. Makes use of KUnit for test organization, and the Torture
+ framework for test thread control.
+
+ Each test case may run at least up to KCSAN_REPORT_ONCE_IN_MS
+ milliseconds. Test run duration may be optimized by building the
+ kernel and KCSAN test with KCSAN_REPORT_ONCE_IN_MS set to a lower
+ than default value.
+
+ Say Y here if you want the test to be built into the kernel and run
+ during boot; say M if you want the test to build as a module; say N
+ if you are unsure.
+
+config KCSAN_EARLY_ENABLE
+ bool "Early enable during boot"
+ default y
+ help
+ If KCSAN should be enabled globally as soon as possible. KCSAN can
+ later be enabled/disabled via debugfs.
+
+config KCSAN_NUM_WATCHPOINTS
+ int "Number of available watchpoints"
+ default 64
+ help
+ Total number of available watchpoints. An address range maps into a
+ specific watchpoint slot as specified in kernel/kcsan/encoding.h.
+ Although larger number of watchpoints may not be usable due to
+ limited number of CPUs, a larger value helps to improve performance
+ due to reducing cache-line contention. The chosen default is a
+ conservative value; we should almost never observe "no_capacity"
+ events (see /sys/kernel/debug/kcsan).
+
+config KCSAN_UDELAY_TASK
+ int "Delay in microseconds (for tasks)"
+ default 80
+ help
+ For tasks, the microsecond delay after setting up a watchpoint.
+
+config KCSAN_UDELAY_INTERRUPT
+ int "Delay in microseconds (for interrupts)"
+ default 20
+ help
+ For interrupts, the microsecond delay after setting up a watchpoint.
+ Interrupts have tighter latency requirements, and their delay should
+ be lower than for tasks.
+
+config KCSAN_DELAY_RANDOMIZE
+ bool "Randomize above delays"
+ default y
+ help
+ If delays should be randomized, where the maximum is KCSAN_UDELAY_*.
+ If false, the chosen delays are always the KCSAN_UDELAY_* values
+ as defined above.
+
+config KCSAN_SKIP_WATCH
+ int "Skip instructions before setting up watchpoint"
+ default 4000
+ help
+ The number of per-CPU memory operations to skip, before another
+ watchpoint is set up, i.e. one in KCSAN_SKIP_WATCH per-CPU
+ memory operations are used to set up a watchpoint. A smaller value
+ results in more aggressive race detection, whereas a larger value
+ improves system performance at the cost of missing some races.
+
+config KCSAN_SKIP_WATCH_RANDOMIZE
+ bool "Randomize watchpoint instruction skip count"
+ default y
+ help
+ If instruction skip count should be randomized, where the maximum is
+ KCSAN_SKIP_WATCH. If false, the chosen value is always
+ KCSAN_SKIP_WATCH.
+
+config KCSAN_INTERRUPT_WATCHER
+ bool "Interruptible watchers" if !KCSAN_STRICT
+ default KCSAN_STRICT
+ help
+ If enabled, a task that set up a watchpoint may be interrupted while
+ delayed. This option will allow KCSAN to detect races between
+ interrupted tasks and other threads of execution on the same CPU.
+
+ Currently disabled by default, because not all safe per-CPU access
+ primitives and patterns may be accounted for, and therefore could
+ result in false positives.
+
+config KCSAN_REPORT_ONCE_IN_MS
+ int "Duration in milliseconds, in which any given race is only reported once"
+ default 3000
+ help
+ Any given race is only reported once in the defined time window.
+ Different races may still generate reports within a duration that is
+ smaller than the duration defined here. This allows rate limiting
+ reporting to avoid flooding the console with reports. Setting this
+ to 0 disables rate limiting.
+
+# The main purpose of the below options is to control reported data races, and
+# are not expected to be switched frequently by non-testers or at runtime.
+# The defaults are chosen to be conservative, and can miss certain bugs.
+
+config KCSAN_REPORT_RACE_UNKNOWN_ORIGIN
+ bool "Report races of unknown origin"
+ default y
+ help
+ If KCSAN should report races where only one access is known, and the
+ conflicting access is of unknown origin. This type of race is
+ reported if it was only possible to infer a race due to a data value
+ change while an access is being delayed on a watchpoint.
+
+config KCSAN_STRICT
+ bool "Strict data-race checking"
+ help
+ KCSAN will report data races with the strictest possible rules, which
+ closely aligns with the rules defined by the Linux-kernel memory
+ consistency model (LKMM).
+
+config KCSAN_WEAK_MEMORY
+ bool "Enable weak memory modeling to detect missing memory barriers"
+ default y
+ depends on KCSAN_STRICT
+ # We can either let objtool nop __tsan_func_{entry,exit}() and builtin
+ # atomics instrumentation in .noinstr.text, or use a compiler that can
+ # implement __no_kcsan to really remove all instrumentation.
+ depends on !ARCH_WANTS_NO_INSTR || HAVE_NOINSTR_HACK || \
+ CC_IS_GCC || CLANG_VERSION >= 140000
+ select OBJTOOL if HAVE_NOINSTR_HACK
+ help
+ Enable support for modeling a subset of weak memory, which allows
+ detecting a subset of data races due to missing memory barriers.
+
+ Depends on KCSAN_STRICT, because the options strengthening certain
+ plain accesses by default (depending on !KCSAN_STRICT) reduce the
+ ability to detect any data races invoving reordered accesses, in
+ particular reordered writes.
+
+ Weak memory modeling relies on additional instrumentation and may
+ affect performance.
+
+config KCSAN_REPORT_VALUE_CHANGE_ONLY
+ bool "Only report races where watcher observed a data value change"
+ default y
+ depends on !KCSAN_STRICT
+ help
+ If enabled and a conflicting write is observed via a watchpoint, but
+ the data value of the memory location was observed to remain
+ unchanged, do not report the data race.
+
+config KCSAN_ASSUME_PLAIN_WRITES_ATOMIC
+ bool "Assume that plain aligned writes up to word size are atomic"
+ default y
+ depends on !KCSAN_STRICT
+ help
+ Assume that plain aligned writes up to word size are atomic by
+ default, and also not subject to other unsafe compiler optimizations
+ resulting in data races. This will cause KCSAN to not report data
+ races due to conflicts where the only plain accesses are aligned
+ writes up to word size: conflicts between marked reads and plain
+ aligned writes up to word size will not be reported as data races;
+ notice that data races between two conflicting plain aligned writes
+ will also not be reported.
+
+config KCSAN_IGNORE_ATOMICS
+ bool "Do not instrument marked atomic accesses"
+ depends on !KCSAN_STRICT
+ help
+ Never instrument marked atomic accesses. This option can be used for
+ additional filtering. Conflicting marked atomic reads and plain
+ writes will never be reported as a data race, however, will cause
+ plain reads and marked writes to result in "unknown origin" reports.
+ If combined with CONFIG_KCSAN_REPORT_RACE_UNKNOWN_ORIGIN=n, data
+ races where at least one access is marked atomic will never be
+ reported.
+
+ Similar to KCSAN_ASSUME_PLAIN_WRITES_ATOMIC, but including unaligned
+ accesses, conflicting marked atomic reads and plain writes will not
+ be reported as data races; however, unlike that option, data races
+ due to two conflicting plain writes will be reported (aligned and
+ unaligned, if CONFIG_KCSAN_ASSUME_PLAIN_WRITES_ATOMIC=n).
+
+config KCSAN_PERMISSIVE
+ bool "Enable all additional permissive rules"
+ depends on KCSAN_REPORT_VALUE_CHANGE_ONLY
+ help
+ Enable additional permissive rules to ignore certain classes of data
+ races (also see kernel/kcsan/permissive.h). None of the permissive
+ rules imply that such data races are generally safe, but can be used
+ to further reduce reported data races due to data-racy patterns
+ common across the kernel.
+
+endif # KCSAN
diff --git a/lib/Kconfig.kfence b/lib/Kconfig.kfence
new file mode 100644
index 000000000000..6fbbebec683a
--- /dev/null
+++ b/lib/Kconfig.kfence
@@ -0,0 +1,99 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+config HAVE_ARCH_KFENCE
+ bool
+
+menuconfig KFENCE
+ bool "KFENCE: low-overhead sampling-based memory safety error detector"
+ depends on HAVE_ARCH_KFENCE
+ select STACKTRACE
+ select IRQ_WORK
+ help
+ KFENCE is a low-overhead sampling-based detector of heap out-of-bounds
+ access, use-after-free, and invalid-free errors. KFENCE is designed
+ to have negligible cost to permit enabling it in production
+ environments.
+
+ See <file:Documentation/dev-tools/kfence.rst> for more details.
+
+ Note that, KFENCE is not a substitute for explicit testing with tools
+ such as KASAN. KFENCE can detect a subset of bugs that KASAN can
+ detect, albeit at very different performance profiles. If you can
+ afford to use KASAN, continue using KASAN, for example in test
+ environments. If your kernel targets production use, and cannot
+ enable KASAN due to its cost, consider using KFENCE.
+
+if KFENCE
+
+config KFENCE_SAMPLE_INTERVAL
+ int "Default sample interval in milliseconds"
+ default 100
+ help
+ The KFENCE sample interval determines the frequency with which heap
+ allocations will be guarded by KFENCE. May be overridden via boot
+ parameter "kfence.sample_interval".
+
+ Set this to 0 to disable KFENCE by default, in which case only
+ setting "kfence.sample_interval" to a non-zero value enables KFENCE.
+
+config KFENCE_NUM_OBJECTS
+ int "Number of guarded objects available"
+ range 1 65535
+ default 255
+ help
+ The number of guarded objects available. For each KFENCE object, 2
+ pages are required; with one containing the object and two adjacent
+ ones used as guard pages.
+
+config KFENCE_DEFERRABLE
+ bool "Use a deferrable timer to trigger allocations"
+ help
+ Use a deferrable timer to trigger allocations. This avoids forcing
+ CPU wake-ups if the system is idle, at the risk of a less predictable
+ sample interval.
+
+ Warning: The KUnit test suite fails with this option enabled - due to
+ the unpredictability of the sample interval!
+
+ Say N if you are unsure.
+
+config KFENCE_STATIC_KEYS
+ bool "Use static keys to set up allocations" if EXPERT
+ depends on JUMP_LABEL
+ help
+ Use static keys (static branches) to set up KFENCE allocations. This
+ option is only recommended when using very large sample intervals, or
+ performance has carefully been evaluated with this option.
+
+ Using static keys comes with trade-offs that need to be carefully
+ evaluated given target workloads and system architectures. Notably,
+ enabling and disabling static keys invoke IPI broadcasts, the latency
+ and impact of which is much harder to predict than a dynamic branch.
+
+ Say N if you are unsure.
+
+config KFENCE_STRESS_TEST_FAULTS
+ int "Stress testing of fault handling and error reporting" if EXPERT
+ default 0
+ help
+ The inverse probability with which to randomly protect KFENCE object
+ pages, resulting in spurious use-after-frees. The main purpose of
+ this option is to stress test KFENCE with concurrent error reports
+ and allocations/frees. A value of 0 disables stress testing logic.
+
+ Only for KFENCE testing; set to 0 if you are not a KFENCE developer.
+
+config KFENCE_KUNIT_TEST
+ tristate "KFENCE integration test suite" if !KUNIT_ALL_TESTS
+ default KUNIT_ALL_TESTS
+ depends on TRACEPOINTS && KUNIT
+ help
+ Test suite for KFENCE, testing various error detection scenarios with
+ various allocation types, and checking that reports are correctly
+ output to console.
+
+ Say Y here if you want the test to be built into the kernel and run
+ during boot; say M if you want the test to build as a module; say N
+ if you are unsure.
+
+endif # KFENCE
diff --git a/lib/Kconfig.kgdb b/lib/Kconfig.kgdb
index bbe397df04a3..3b9a44008433 100644
--- a/lib/Kconfig.kgdb
+++ b/lib/Kconfig.kgdb
@@ -3,6 +3,11 @@
config HAVE_ARCH_KGDB
bool
+# set if architecture has the its kgdb_arch_handle_qxfer_pkt
+# function to enable gdb stub to address XML packet sent from GDB.
+config HAVE_ARCH_KGDB_QXFER_PKT
+ bool
+
menuconfig KGDB
bool "KGDB: kernel debugger"
depends on HAVE_ARCH_KGDB
@@ -19,6 +24,21 @@ menuconfig KGDB
if KGDB
+config KGDB_HONOUR_BLOCKLIST
+ bool "KGDB: use kprobe blocklist to prohibit unsafe breakpoints"
+ depends on HAVE_KPROBES
+ depends on MODULES
+ select KPROBES
+ default y
+ help
+ If set to Y the debug core will use the kprobe blocklist to
+ identify symbols where it is unsafe to set breakpoints.
+ In particular this disallows instrumentation of functions
+ called during debug trap handling and thus makes it very
+ difficult to inadvertently provoke recursive trap handling.
+
+ If unsure, say Y.
+
config KGDB_SERIAL_CONSOLE
tristate "KGDB: use kgdb over the serial console"
select CONSOLE_POLL
@@ -64,9 +84,9 @@ config KGDB_LOW_LEVEL_TRAP
depends on X86 || MIPS
default n
help
- This will add an extra call back to kgdb for the breakpoint
- exception handler which will allow kgdb to step through a
- notify handler.
+ This will add an extra call back to kgdb for the breakpoint
+ exception handler which will allow kgdb to step through a
+ notify handler.
config KGDB_KDB
bool "KGDB_KDB: include kdb frontend for kgdb"
@@ -96,12 +116,12 @@ config KDB_DEFAULT_ENABLE
The config option merely sets the default at boot time. Both
issuing 'echo X > /sys/module/kdb/parameters/cmd_enable' or
- setting with kdb.cmd_enable=X kernel command line option will
+ setting with kdb.cmd_enable=X kernel command line option will
override the default settings.
config KDB_KEYBOARD
bool "KGDB_KDB: keyboard as input device"
- depends on VT && KGDB_KDB
+ depends on VT && KGDB_KDB && !PARISC
default n
help
KDB can use a PS/2 type keyboard for an input device
@@ -124,4 +144,22 @@ config KDB_CONTINUE_CATASTROPHIC
CONFIG_KDB_CONTINUE_CATASTROPHIC == 2. KDB forces a reboot.
If you are not sure, say 0.
+config ARCH_HAS_EARLY_DEBUG
+ bool
+ default n
+ help
+ If an architecture can definitely handle entering the debugger
+ when early_param's are parsed then it select this config.
+ Otherwise, if "kgdbwait" is passed on the kernel command line it
+ won't actually be processed until dbg_late_init() just after the
+ call to kgdb_arch_late() is made.
+
+ NOTE: Even if this isn't selected by an architecture we will
+ still try to register kgdb to handle breakpoints and crashes
+ when early_param's are parsed, we just won't act on the
+ "kgdbwait" parameter until dbg_late_init(). If you get a
+ crash and try to drop into kgdb somewhere between these two
+ places you might or might not end up being able to use kgdb
+ depending on exactly how far along the architecture has initted.
+
endif # KGDB
diff --git a/lib/Kconfig.kmsan b/lib/Kconfig.kmsan
new file mode 100644
index 000000000000..0541d7b079cc
--- /dev/null
+++ b/lib/Kconfig.kmsan
@@ -0,0 +1,63 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config HAVE_ARCH_KMSAN
+ bool
+
+config HAVE_KMSAN_COMPILER
+ # Clang versions <14.0.0 also support -fsanitize=kernel-memory, but not
+ # all the features necessary to build the kernel with KMSAN.
+ depends on CC_IS_CLANG && CLANG_VERSION >= 140000
+ def_bool $(cc-option,-fsanitize=kernel-memory -mllvm -msan-disable-checks=1)
+
+config KMSAN
+ bool "KMSAN: detector of uninitialized values use"
+ depends on HAVE_ARCH_KMSAN && HAVE_KMSAN_COMPILER
+ depends on DEBUG_KERNEL && !KASAN && !KCSAN
+ depends on !PREEMPT_RT
+ select STACKDEPOT
+ select STACKDEPOT_ALWAYS_INIT
+ help
+ KernelMemorySanitizer (KMSAN) is a dynamic detector of uses of
+ uninitialized values in the kernel. It is based on compiler
+ instrumentation provided by Clang and thus requires Clang to build.
+
+ An important note is that KMSAN is not intended for production use,
+ because it drastically increases kernel memory footprint and slows
+ the whole system down.
+
+ See <file:Documentation/dev-tools/kmsan.rst> for more details.
+
+if KMSAN
+
+config HAVE_KMSAN_PARAM_RETVAL
+ # -fsanitize-memory-param-retval is supported only by Clang >= 14.
+ depends on HAVE_KMSAN_COMPILER
+ def_bool $(cc-option,-fsanitize=kernel-memory -fsanitize-memory-param-retval)
+
+config KMSAN_CHECK_PARAM_RETVAL
+ bool "Check for uninitialized values passed to and returned from functions"
+ default y
+ depends on HAVE_KMSAN_PARAM_RETVAL
+ help
+ If the compiler supports -fsanitize-memory-param-retval, KMSAN will
+ eagerly check every function parameter passed by value and every
+ function return value.
+
+ Disabling KMSAN_CHECK_PARAM_RETVAL will result in tracking shadow for
+ function parameters and return values across function borders. This
+ is a more relaxed mode, but it generates more instrumentation code and
+ may potentially report errors in corner cases when non-instrumented
+ functions call instrumented ones.
+
+config KMSAN_KUNIT_TEST
+ tristate "KMSAN integration test suite" if !KUNIT_ALL_TESTS
+ default KUNIT_ALL_TESTS
+ depends on TRACEPOINTS && KUNIT
+ help
+ Test suite for KMSAN, testing various error detection scenarios,
+ and checking that reports are correctly output to console.
+
+ Say Y here if you want the test to be built into the kernel and run
+ during boot; say M if you want the test to build as a module; say N
+ if you are unsure.
+
+endif
diff --git a/lib/Kconfig.ubsan b/lib/Kconfig.ubsan
index 0e04fcb3ab3d..59e21bfec188 100644
--- a/lib/Kconfig.ubsan
+++ b/lib/Kconfig.ubsan
@@ -2,24 +2,149 @@
config ARCH_HAS_UBSAN_SANITIZE_ALL
bool
-config UBSAN
+menuconfig UBSAN
bool "Undefined behaviour sanity checker"
help
- This option enables undefined behaviour sanity checker
+ This option enables the Undefined Behaviour sanity checker.
Compile-time instrumentation is used to detect various undefined
- behaviours in runtime. Various types of checks may be enabled
- via boot parameter ubsan_handle
- (see: Documentation/dev-tools/ubsan.rst).
+ behaviours at runtime. For more details, see:
+ Documentation/dev-tools/ubsan.rst
+
+if UBSAN
+
+config UBSAN_TRAP
+ bool "Abort on Sanitizer warnings (smaller kernel but less verbose)"
+ depends on !COMPILE_TEST
+ help
+ Building kernels with Sanitizer features enabled tends to grow
+ the kernel size by around 5%, due to adding all the debugging
+ text on failure paths. To avoid this, Sanitizer instrumentation
+ can just issue a trap. This reduces the kernel size overhead but
+ turns all warnings (including potentially harmless conditions)
+ into full exceptions that abort the running kernel code
+ (regardless of context, locks held, etc), which may destabilize
+ the system. For some system builders this is an acceptable
+ trade-off.
+
+ Also note that selecting Y will cause your kernel to Oops
+ with an "illegal instruction" error with no further details
+ when a UBSAN violation occurs. (Except on arm64, which will
+ report which Sanitizer failed.) This may make it hard to
+ determine whether an Oops was caused by UBSAN or to figure
+ out the details of a UBSAN violation. It makes the kernel log
+ output less useful for bug reports.
+
+config CC_HAS_UBSAN_BOUNDS_STRICT
+ def_bool $(cc-option,-fsanitize=bounds-strict)
+ help
+ The -fsanitize=bounds-strict option is only available on GCC,
+ but uses the more strict handling of arrays that includes knowledge
+ of flexible arrays, which is comparable to Clang's regular
+ -fsanitize=bounds.
+
+config CC_HAS_UBSAN_ARRAY_BOUNDS
+ def_bool $(cc-option,-fsanitize=array-bounds)
+ help
+ Under Clang, the -fsanitize=bounds option is actually composed
+ of two more specific options, -fsanitize=array-bounds and
+ -fsanitize=local-bounds. However, -fsanitize=local-bounds can
+ only be used when trap mode is enabled. (See also the help for
+ CONFIG_LOCAL_BOUNDS.) Explicitly check for -fsanitize=array-bounds
+ so that we can build up the options needed for UBSAN_BOUNDS
+ with or without UBSAN_TRAP.
+
+config UBSAN_BOUNDS
+ bool "Perform array index bounds checking"
+ default UBSAN
+ depends on CC_HAS_UBSAN_ARRAY_BOUNDS || CC_HAS_UBSAN_BOUNDS_STRICT
+ help
+ This option enables detection of directly indexed out of bounds
+ array accesses, where the array size is known at compile time.
+ Note that this does not protect array overflows via bad calls
+ to the {str,mem}*cpy() family of functions (that is addressed
+ by CONFIG_FORTIFY_SOURCE).
+
+config UBSAN_BOUNDS_STRICT
+ def_bool UBSAN_BOUNDS && CC_HAS_UBSAN_BOUNDS_STRICT
+ help
+ GCC's bounds sanitizer. This option is used to select the
+ correct options in Makefile.ubsan.
+
+config UBSAN_ARRAY_BOUNDS
+ def_bool UBSAN_BOUNDS && CC_HAS_UBSAN_ARRAY_BOUNDS
+ help
+ Clang's array bounds sanitizer. This option is used to select
+ the correct options in Makefile.ubsan.
+
+config UBSAN_LOCAL_BOUNDS
+ def_bool UBSAN_ARRAY_BOUNDS && UBSAN_TRAP
+ help
+ This option enables Clang's -fsanitize=local-bounds which traps
+ when an access through a pointer that is derived from an object
+ of a statically-known size, where an added offset (which may not
+ be known statically) is out-of-bounds. Since this option is
+ trap-only, it depends on CONFIG_UBSAN_TRAP.
+
+config UBSAN_SHIFT
+ bool "Perform checking for bit-shift overflows"
+ default UBSAN
+ depends on $(cc-option,-fsanitize=shift)
+ help
+ This option enables -fsanitize=shift which checks for bit-shift
+ operations that overflow to the left or go switch to negative
+ for signed types.
+
+config UBSAN_DIV_ZERO
+ bool "Perform checking for integer divide-by-zero"
+ depends on $(cc-option,-fsanitize=integer-divide-by-zero)
+ # https://github.com/ClangBuiltLinux/linux/issues/1657
+ # https://github.com/llvm/llvm-project/issues/56289
+ depends on !CC_IS_CLANG
+ help
+ This option enables -fsanitize=integer-divide-by-zero which checks
+ for integer division by zero. This is effectively redundant with the
+ kernel's existing exception handling, though it can provide greater
+ debugging information under CONFIG_UBSAN_REPORT_FULL.
+
+config UBSAN_UNREACHABLE
+ bool "Perform checking for unreachable code"
+ # objtool already handles unreachable checking and gets angry about
+ # seeing UBSan instrumentation located in unreachable places.
+ depends on !(OBJTOOL && (STACK_VALIDATION || UNWINDER_ORC || HAVE_UACCESS_VALIDATION))
+ depends on $(cc-option,-fsanitize=unreachable)
+ help
+ This option enables -fsanitize=unreachable which checks for control
+ flow reaching an expected-to-be-unreachable position.
+
+config UBSAN_BOOL
+ bool "Perform checking for non-boolean values used as boolean"
+ default UBSAN
+ depends on $(cc-option,-fsanitize=bool)
+ help
+ This option enables -fsanitize=bool which checks for boolean values being
+ loaded that are neither 0 nor 1.
+
+config UBSAN_ENUM
+ bool "Perform checking for out of bounds enum values"
+ default UBSAN
+ depends on $(cc-option,-fsanitize=enum)
+ help
+ This option enables -fsanitize=enum which checks for values being loaded
+ into an enum that are outside the range of given values for the given enum.
+
+config UBSAN_ALIGNMENT
+ bool "Perform checking for misaligned pointer usage"
+ default !HAVE_EFFICIENT_UNALIGNED_ACCESS
+ depends on !UBSAN_TRAP && !COMPILE_TEST
+ depends on $(cc-option,-fsanitize=alignment)
+ help
+ This option enables the check of unaligned memory accesses.
+ Enabling this option on architectures that support unaligned
+ accesses may produce a lot of false positives.
config UBSAN_SANITIZE_ALL
bool "Enable instrumentation for the entire kernel"
- depends on UBSAN
depends on ARCH_HAS_UBSAN_SANITIZE_ALL
-
- # We build with -Wno-maybe-uninitilzed, but we still want to
- # use -Wmaybe-uninitilized in allmodconfig builds.
- # So dependsy bellow used to disable this option in allmodconfig
- depends on !COMPILE_TEST
default y
help
This option activates instrumentation for the entire kernel.
@@ -28,22 +153,11 @@ config UBSAN_SANITIZE_ALL
Enabling this option will get kernel image size increased
significantly.
-config UBSAN_NO_ALIGNMENT
- bool "Disable checking of pointers alignment"
- depends on UBSAN
- default y if HAVE_EFFICIENT_UNALIGNED_ACCESS
- help
- This option disables the check of unaligned memory accesses.
- This option should be used when building allmodconfig.
- Disabling this option on architectures that support unaligned
- accesses may produce a lot of false positives.
-
-config UBSAN_ALIGNMENT
- def_bool !UBSAN_NO_ALIGNMENT
-
config TEST_UBSAN
tristate "Module for testing for undefined behavior detection"
- depends on m && UBSAN
+ depends on m
help
This is a test module for UBSAN.
It triggers various undefined behavior, and detect it.
+
+endif # if UBSAN
diff --git a/lib/Makefile b/lib/Makefile
index 7c3c1ad21afc..6b09731d8e61 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -3,10 +3,7 @@
# Makefile for some libs needed in the kernel.
#
-ifdef CONFIG_FUNCTION_TRACER
-ORIG_CFLAGS := $(KBUILD_CFLAGS)
-KBUILD_CFLAGS = $(subst $(CC_FLAGS_FTRACE),,$(ORIG_CFLAGS))
-endif
+ccflags-remove-$(CONFIG_FUNCTION_TRACER) += $(CC_FLAGS_FTRACE)
# These files are disabled because they produce lots of non-interesting and/or
# flaky coverage that is not a function of syscall inputs. For example,
@@ -16,29 +13,30 @@ KCOV_INSTRUMENT_rbtree.o := n
KCOV_INSTRUMENT_list_debug.o := n
KCOV_INSTRUMENT_debugobjects.o := n
KCOV_INSTRUMENT_dynamic_debug.o := n
+KCOV_INSTRUMENT_fault-inject.o := n
+
+# string.o implements standard library functions like memset/memcpy etc.
+# Use -ffreestanding to ensure that the compiler does not try to "optimize"
+# them into calls to themselves.
+CFLAGS_string.o := -ffreestanding
# Early boot use of cmdline, don't instrument it
ifdef CONFIG_AMD_MEM_ENCRYPT
KASAN_SANITIZE_string.o := n
-ifdef CONFIG_FUNCTION_TRACER
-CFLAGS_REMOVE_string.o = -pg
-endif
-
-CFLAGS_string.o := $(call cc-option, -fno-stack-protector)
+CFLAGS_string.o += -fno-stack-protector
endif
lib-y := ctype.o string.o vsprintf.o cmdline.o \
rbtree.o radix-tree.o timerqueue.o xarray.o \
- idr.o extable.o \
- sha1.o chacha.o irq_regs.o argv_split.o \
- flex_proportions.o ratelimit.o show_mem.o \
+ maple_tree.o idr.o extable.o irq_regs.o argv_split.o \
+ flex_proportions.o ratelimit.o \
is_single_threaded.o plist.o decompress.o kobject_uevent.o \
earlycpio.o seq_buf.o siphash.o dec_and_lock.o \
- nmi_backtrace.o nodemask.o win_minmax.o memcat_p.o
+ nmi_backtrace.o win_minmax.o memcat_p.o \
+ buildid.o objpool.o
lib-$(CONFIG_PRINTK) += dump_stack.o
-lib-$(CONFIG_MMU) += ioremap.o
lib-$(CONFIG_SMP) += cpumask.o
lib-y += kobject.o klist.o
@@ -47,10 +45,10 @@ obj-y += lockref.o
obj-y += bcd.o sort.o parser.o debug_locks.o random32.o \
bust_spinlocks.o kasprintf.o bitmap.o scatterlist.o \
list_sort.o uuid.o iov_iter.o clz_ctz.o \
- bsearch.o find_bit.o llist.o memweight.o kfifo.o \
- percpu-refcount.o rhashtable.o \
- once.o refcount.o usercopy.o errseq.o bucket_locks.o \
- generic-radix-tree.o
+ bsearch.o find_bit.o llist.o lwq.o memweight.o kfifo.o \
+ percpu-refcount.o rhashtable.o base64.o \
+ once.o refcount.o rcuref.o usercopy.o errseq.o bucket_locks.o \
+ generic-radix-tree.o bitmap-str.o
obj-$(CONFIG_STRING_SELFTEST) += test_string.o
obj-y += string_helpers.o
obj-$(CONFIG_TEST_STRING_HELPERS) += test-string_helpers.o
@@ -59,41 +57,91 @@ obj-$(CONFIG_TEST_HEXDUMP) += test_hexdump.o
obj-y += kstrtox.o
obj-$(CONFIG_FIND_BIT_BENCHMARK) += find_bit_benchmark.o
obj-$(CONFIG_TEST_BPF) += test_bpf.o
+test_dhry-objs := dhry_1.o dhry_2.o dhry_run.o
+obj-$(CONFIG_TEST_DHRY) += test_dhry.o
obj-$(CONFIG_TEST_FIRMWARE) += test_firmware.o
+obj-$(CONFIG_TEST_BITOPS) += test_bitops.o
+CFLAGS_test_bitops.o += -Werror
+obj-$(CONFIG_CPUMASK_KUNIT_TEST) += cpumask_kunit.o
obj-$(CONFIG_TEST_SYSCTL) += test_sysctl.o
-obj-$(CONFIG_TEST_HASH) += test_hash.o test_siphash.o
+obj-$(CONFIG_TEST_IOV_ITER) += kunit_iov_iter.o
+obj-$(CONFIG_HASH_KUNIT_TEST) += test_hash.o
obj-$(CONFIG_TEST_IDA) += test_ida.o
-obj-$(CONFIG_TEST_KASAN) += test_kasan.o
-CFLAGS_test_kasan.o += -fno-builtin
-CFLAGS_test_kasan.o += $(call cc-disable-warning, vla)
obj-$(CONFIG_TEST_UBSAN) += test_ubsan.o
CFLAGS_test_ubsan.o += $(call cc-disable-warning, vla)
UBSAN_SANITIZE_test_ubsan.o := y
obj-$(CONFIG_TEST_KSTRTOX) += test-kstrtox.o
obj-$(CONFIG_TEST_LIST_SORT) += test_list_sort.o
+obj-$(CONFIG_TEST_MIN_HEAP) += test_min_heap.o
obj-$(CONFIG_TEST_LKM) += test_module.o
obj-$(CONFIG_TEST_VMALLOC) += test_vmalloc.o
-obj-$(CONFIG_TEST_OVERFLOW) += test_overflow.o
obj-$(CONFIG_TEST_RHASHTABLE) += test_rhashtable.o
obj-$(CONFIG_TEST_SORT) += test_sort.o
obj-$(CONFIG_TEST_USER_COPY) += test_user_copy.o
obj-$(CONFIG_TEST_STATIC_KEYS) += test_static_keys.o
obj-$(CONFIG_TEST_STATIC_KEYS) += test_static_key_base.o
+obj-$(CONFIG_TEST_DYNAMIC_DEBUG) += test_dynamic_debug.o
obj-$(CONFIG_TEST_PRINTF) += test_printf.o
+obj-$(CONFIG_TEST_SCANF) += test_scanf.o
+
obj-$(CONFIG_TEST_BITMAP) += test_bitmap.o
-obj-$(CONFIG_TEST_STRSCPY) += test_strscpy.o
-obj-$(CONFIG_TEST_BITFIELD) += test_bitfield.o
+ifeq ($(CONFIG_CC_IS_CLANG)$(CONFIG_KASAN),yy)
+# FIXME: Clang breaks test_bitmap_const_eval when KASAN and GCOV are enabled
+GCOV_PROFILE_test_bitmap.o := n
+endif
+
obj-$(CONFIG_TEST_UUID) += test_uuid.o
obj-$(CONFIG_TEST_XARRAY) += test_xarray.o
+obj-$(CONFIG_TEST_MAPLE_TREE) += test_maple_tree.o
obj-$(CONFIG_TEST_PARMAN) += test_parman.o
obj-$(CONFIG_TEST_KMOD) += test_kmod.o
obj-$(CONFIG_TEST_DEBUG_VIRTUAL) += test_debug_virtual.o
obj-$(CONFIG_TEST_MEMCAT_P) += test_memcat_p.o
obj-$(CONFIG_TEST_OBJAGG) += test_objagg.o
-obj-$(CONFIG_TEST_STACKINIT) += test_stackinit.o
+obj-$(CONFIG_TEST_BLACKHOLE_DEV) += test_blackhole_dev.o
+obj-$(CONFIG_TEST_MEMINIT) += test_meminit.o
+obj-$(CONFIG_TEST_LOCKUP) += test_lockup.o
+obj-$(CONFIG_TEST_HMM) += test_hmm.o
+obj-$(CONFIG_TEST_FREE_PAGES) += test_free_pages.o
+obj-$(CONFIG_KPROBES_SANITY_TEST) += test_kprobes.o
+obj-$(CONFIG_TEST_REF_TRACKER) += test_ref_tracker.o
+CFLAGS_test_fprobe.o += $(CC_FLAGS_FTRACE)
+obj-$(CONFIG_FPROBE_SANITY_TEST) += test_fprobe.o
+obj-$(CONFIG_TEST_OBJPOOL) += test_objpool.o
+
+#
+# CFLAGS for compiling floating point code inside the kernel. x86/Makefile turns
+# off the generation of FPU/SSE* instructions for kernel proper but FPU_FLAGS
+# get appended last to CFLAGS and thus override those previous compiler options.
+#
+FPU_CFLAGS := -msse -msse2
+ifdef CONFIG_CC_IS_GCC
+# Stack alignment mismatch, proceed with caution.
+# GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3
+# (8B stack alignment).
+# See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=53383
+#
+# The "-msse" in the first argument is there so that the
+# -mpreferred-stack-boundary=3 build error:
+#
+# -mpreferred-stack-boundary=3 is not between 4 and 12
+#
+# can be triggered. Otherwise gcc doesn't complain.
+FPU_CFLAGS += -mhard-float
+FPU_CFLAGS += $(call cc-option,-msse -mpreferred-stack-boundary=3,-mpreferred-stack-boundary=4)
+endif
+
+obj-$(CONFIG_TEST_FPU) += test_fpu.o
+CFLAGS_test_fpu.o += $(FPU_CFLAGS)
obj-$(CONFIG_TEST_LIVEPATCH) += livepatch/
+# Some KUnit files (hooks.o) need to be built-in even when KUnit is a module,
+# so we can't just use obj-$(CONFIG_KUNIT).
+ifdef CONFIG_KUNIT
+obj-y += kunit/
+endif
+
ifeq ($(CONFIG_DEBUG_KOBJECT),y)
CFLAGS_kobject.o += -DDEBUG
CFLAGS_kobject_uevent.o += -DDEBUG
@@ -102,7 +150,7 @@ endif
obj-$(CONFIG_DEBUG_INFO_REDUCED) += debug_info.o
CFLAGS_debug_info.o += $(call cc-option, -femit-struct-debug-detailed=any)
-obj-y += math/
+obj-y += math/ crypto/
obj-$(CONFIG_GENERIC_IOMAP) += iomap.o
obj-$(CONFIG_GENERIC_PCI_IOMAP) += pci_iomap.o
@@ -110,7 +158,11 @@ obj-$(CONFIG_HAS_IOMEM) += iomap_copy.o devres.o
obj-$(CONFIG_CHECK_SIGNATURE) += check_signature.o
obj-$(CONFIG_DEBUG_LOCKING_API_SELFTESTS) += locking-selftest.o
-obj-y += logic_pio.o
+lib-y += logic_pio.o
+
+lib-$(CONFIG_INDIRECT_IOMEM) += logic_iomem.o
+
+obj-$(CONFIG_TRACE_MMIO_ACCESS) += trace_readwrite.o
obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o
@@ -118,10 +170,11 @@ obj-$(CONFIG_BTREE) += btree.o
obj-$(CONFIG_INTERVAL_TREE) += interval_tree.o
obj-$(CONFIG_ASSOCIATIVE_ARRAY) += assoc_array.o
obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o
-obj-$(CONFIG_DEBUG_LIST) += list_debug.o
+obj-$(CONFIG_LIST_HARDENED) += list_debug.o
obj-$(CONFIG_DEBUG_OBJECTS) += debugobjects.o
obj-$(CONFIG_BITREVERSE) += bitrev.o
+obj-$(CONFIG_LINEAR_RANGES) += linear_ranges.o
obj-$(CONFIG_PACKING) += packing.o
obj-$(CONFIG_CRC_CCITT) += crc-ccitt.o
obj-$(CONFIG_CRC16) += crc16.o
@@ -134,6 +187,7 @@ obj-$(CONFIG_CRC4) += crc4.o
obj-$(CONFIG_CRC7) += crc7.o
obj-$(CONFIG_LIBCRC32C) += libcrc32c.o
obj-$(CONFIG_CRC8) += crc8.o
+obj-$(CONFIG_CRC64_ROCKSOFT) += crc64-rocksoft.o
obj-$(CONFIG_XXHASH) += xxhash.o
obj-$(CONFIG_GENERIC_ALLOCATOR) += genalloc.o
@@ -141,6 +195,7 @@ obj-$(CONFIG_842_COMPRESS) += 842/
obj-$(CONFIG_842_DECOMPRESS) += 842/
obj-$(CONFIG_ZLIB_INFLATE) += zlib_inflate/
obj-$(CONFIG_ZLIB_DEFLATE) += zlib_deflate/
+obj-$(CONFIG_ZLIB_DFLTCC) += zlib_dfltcc/
obj-$(CONFIG_REED_SOLOMON) += reed_solomon/
obj-$(CONFIG_BCH) += bch.o
obj-$(CONFIG_LZO_COMPRESS) += lzo/
@@ -159,6 +214,7 @@ lib-$(CONFIG_DECOMPRESS_LZMA) += decompress_unlzma.o
lib-$(CONFIG_DECOMPRESS_XZ) += decompress_unxz.o
lib-$(CONFIG_DECOMPRESS_LZO) += decompress_unlzo.o
lib-$(CONFIG_DECOMPRESS_LZ4) += decompress_unlz4.o
+lib-$(CONFIG_DECOMPRESS_ZSTD) += decompress_unzstd.o
obj-$(CONFIG_TEXTSEARCH) += textsearch.o
obj-$(CONFIG_TEXTSEARCH_KMP) += ts_kmp.o
@@ -170,6 +226,7 @@ obj-$(CONFIG_AUDIT_COMPAT_GENERIC) += compat_audit.o
obj-$(CONFIG_IOMMU_HELPER) += iommu-helper.o
obj-$(CONFIG_FAULT_INJECTION) += fault-inject.o
+obj-$(CONFIG_FAULT_INJECTION_USERCOPY) += fault-inject-usercopy.o
obj-$(CONFIG_NOTIFIER_ERROR_INJECTION) += notifier-error-inject.o
obj-$(CONFIG_PM_NOTIFIER_ERROR_INJECT) += pm-notifier-error-inject.o
obj-$(CONFIG_NETDEV_NOTIFIER_ERROR_INJECT) += netdev-notifier-error-inject.o
@@ -182,7 +239,11 @@ lib-$(CONFIG_GENERIC_BUG) += bug.o
obj-$(CONFIG_HAVE_ARCH_TRACEHOOK) += syscall.o
-obj-$(CONFIG_DYNAMIC_DEBUG) += dynamic_debug.o
+obj-$(CONFIG_DYNAMIC_DEBUG_CORE) += dynamic_debug.o
+#ensure exported functions have prototypes
+CFLAGS_dynamic_debug.o := -DDYNAMIC_DEBUG_MODULE
+
+obj-$(CONFIG_SYMBOLIC_ERRNAME) += errname.o
obj-$(CONFIG_NLATTR) += nlattr.o
@@ -196,18 +257,18 @@ obj-$(CONFIG_ATOMIC64_SELFTEST) += atomic64_test.o
obj-$(CONFIG_CPU_RMAP) += cpu_rmap.o
+obj-$(CONFIG_CLOSURES) += closure.o
+
obj-$(CONFIG_DQL) += dynamic_queue_limits.o
obj-$(CONFIG_GLOB) += glob.o
obj-$(CONFIG_GLOB_SELFTEST) += globtest.o
-obj-$(CONFIG_MPILIB) += mpi/
+obj-$(CONFIG_DIMLIB) += dim/
obj-$(CONFIG_SIGNATURE) += digsig.o
lib-$(CONFIG_CLZ_TAB) += clz_tab.o
-obj-$(CONFIG_DDR) += jedec_ddr_data.o
-
obj-$(CONFIG_GENERIC_STRNCPY_FROM_USER) += strncpy_from_user.o
obj-$(CONFIG_GENERIC_STRNLEN_USER) += strnlen_user.o
@@ -215,30 +276,53 @@ obj-$(CONFIG_GENERIC_NET_UTILS) += net_utils.o
obj-$(CONFIG_SG_SPLIT) += sg_split.o
obj-$(CONFIG_SG_POOL) += sg_pool.o
+obj-$(CONFIG_MEMREGION) += memregion.o
obj-$(CONFIG_STMP_DEVICE) += stmp_device.o
obj-$(CONFIG_IRQ_POLL) += irq_poll.o
+obj-$(CONFIG_POLYNOMIAL) += polynomial.o
+
+# stackdepot.c should not be instrumented or call instrumented functions.
+# Prevent the compiler from calling builtins like memcmp() or bcmp() from this
+# file.
+CFLAGS_stackdepot.o += -fno-builtin
obj-$(CONFIG_STACKDEPOT) += stackdepot.o
KASAN_SANITIZE_stackdepot.o := n
+# In particular, instrumenting stackdepot.c with KMSAN will result in infinite
+# recursion.
+KMSAN_SANITIZE_stackdepot.o := n
KCOV_INSTRUMENT_stackdepot.o := n
+obj-$(CONFIG_REF_TRACKER) += ref_tracker.o
+
libfdt_files = fdt.o fdt_ro.o fdt_wip.o fdt_rw.o fdt_sw.o fdt_strerror.o \
- fdt_empty_tree.o
+ fdt_empty_tree.o fdt_addresses.o
$(foreach file, $(libfdt_files), \
$(eval CFLAGS_$(file) = -I $(srctree)/scripts/dtc/libfdt))
lib-$(CONFIG_LIBFDT) += $(libfdt_files)
+obj-$(CONFIG_BOOT_CONFIG) += bootconfig.o
+obj-$(CONFIG_BOOT_CONFIG_EMBED) += bootconfig-data.o
+
+$(obj)/bootconfig-data.o: $(obj)/default.bconf
+
+targets += default.bconf
+filechk_defbconf = cat $(or $(real-prereqs), /dev/null)
+$(obj)/default.bconf: $(CONFIG_BOOT_CONFIG_EMBED_FILE) FORCE
+ $(call filechk,defbconf)
+
obj-$(CONFIG_RBTREE_TEST) += rbtree_test.o
obj-$(CONFIG_INTERVAL_TREE_TEST) += interval_tree_test.o
obj-$(CONFIG_PERCPU_TEST) += percpu_test.o
obj-$(CONFIG_ASN1) += asn1_decoder.o
+obj-$(CONFIG_ASN1_ENCODER) += asn1_encoder.o
obj-$(CONFIG_FONT_SUPPORT) += fonts/
-hostprogs-y := gen_crc32table
-hostprogs-y += gen_crc64table
+hostprogs := gen_crc32table
+hostprogs += gen_crc64table
clean-files := crc32table.h
clean-files += crc64table.h
@@ -279,12 +363,15 @@ obj-$(CONFIG_UBSAN) += ubsan.o
UBSAN_SANITIZE_ubsan.o := n
KASAN_SANITIZE_ubsan.o := n
-CFLAGS_ubsan.o := $(call cc-option, -fno-stack-protector) $(DISABLE_STACKLEAK_PLUGIN)
+KCSAN_SANITIZE_ubsan.o := n
+CFLAGS_ubsan.o := -fno-stack-protector $(DISABLE_STACKLEAK_PLUGIN)
obj-$(CONFIG_SBITMAP) += sbitmap.o
obj-$(CONFIG_PARMAN) += parman.o
+obj-y += group_cpus.o
+
# GCC library routines
obj-$(CONFIG_GENERIC_LIB_ASHLDI3) += ashldi3.o
obj-$(CONFIG_GENERIC_LIB_ASHRDI3) += ashrdi3.o
@@ -293,3 +380,67 @@ obj-$(CONFIG_GENERIC_LIB_MULDI3) += muldi3.o
obj-$(CONFIG_GENERIC_LIB_CMPDI2) += cmpdi2.o
obj-$(CONFIG_GENERIC_LIB_UCMPDI2) += ucmpdi2.o
obj-$(CONFIG_OBJAGG) += objagg.o
+
+# pldmfw library
+obj-$(CONFIG_PLDMFW) += pldmfw/
+
+# KUnit tests
+CFLAGS_bitfield_kunit.o := $(DISABLE_STRUCTLEAK_PLUGIN)
+obj-$(CONFIG_BITFIELD_KUNIT) += bitfield_kunit.o
+obj-$(CONFIG_CHECKSUM_KUNIT) += checksum_kunit.o
+obj-$(CONFIG_LIST_KUNIT_TEST) += list-test.o
+obj-$(CONFIG_HASHTABLE_KUNIT_TEST) += hashtable_test.o
+obj-$(CONFIG_LINEAR_RANGES_TEST) += test_linear_ranges.o
+obj-$(CONFIG_BITS_TEST) += test_bits.o
+obj-$(CONFIG_CMDLINE_KUNIT_TEST) += cmdline_kunit.o
+obj-$(CONFIG_SLUB_KUNIT_TEST) += slub_kunit.o
+obj-$(CONFIG_MEMCPY_KUNIT_TEST) += memcpy_kunit.o
+obj-$(CONFIG_IS_SIGNED_TYPE_KUNIT_TEST) += is_signed_type_kunit.o
+CFLAGS_overflow_kunit.o = $(call cc-disable-warning, tautological-constant-out-of-range-compare)
+obj-$(CONFIG_OVERFLOW_KUNIT_TEST) += overflow_kunit.o
+CFLAGS_stackinit_kunit.o += $(call cc-disable-warning, switch-unreachable)
+obj-$(CONFIG_STACKINIT_KUNIT_TEST) += stackinit_kunit.o
+CFLAGS_fortify_kunit.o += $(call cc-disable-warning, unsequenced)
+CFLAGS_fortify_kunit.o += $(DISABLE_STRUCTLEAK_PLUGIN)
+obj-$(CONFIG_FORTIFY_KUNIT_TEST) += fortify_kunit.o
+obj-$(CONFIG_STRCAT_KUNIT_TEST) += strcat_kunit.o
+obj-$(CONFIG_STRSCPY_KUNIT_TEST) += strscpy_kunit.o
+obj-$(CONFIG_SIPHASH_KUNIT_TEST) += siphash_kunit.o
+
+obj-$(CONFIG_GENERIC_LIB_DEVMEM_IS_ALLOWED) += devmem_is_allowed.o
+
+obj-$(CONFIG_FIRMWARE_TABLE) += fw_table.o
+
+# FORTIFY_SOURCE compile-time behavior tests
+TEST_FORTIFY_SRCS = $(wildcard $(srctree)/$(src)/test_fortify/*-*.c)
+TEST_FORTIFY_LOGS = $(patsubst $(srctree)/$(src)/%.c, %.log, $(TEST_FORTIFY_SRCS))
+TEST_FORTIFY_LOG = test_fortify.log
+
+quiet_cmd_test_fortify = TEST $@
+ cmd_test_fortify = $(CONFIG_SHELL) $(srctree)/scripts/test_fortify.sh \
+ $< $@ "$(NM)" $(CC) $(c_flags) \
+ $(call cc-disable-warning,fortify-source) \
+ -DKBUILD_EXTRA_WARN1
+
+targets += $(TEST_FORTIFY_LOGS)
+clean-files += $(TEST_FORTIFY_LOGS)
+clean-files += $(addsuffix .o, $(TEST_FORTIFY_LOGS))
+$(obj)/test_fortify/%.log: $(src)/test_fortify/%.c \
+ $(src)/test_fortify/test_fortify.h \
+ $(srctree)/include/linux/fortify-string.h \
+ $(srctree)/scripts/test_fortify.sh \
+ FORCE
+ $(call if_changed,test_fortify)
+
+quiet_cmd_gen_fortify_log = GEN $@
+ cmd_gen_fortify_log = cat </dev/null $(filter-out FORCE,$^) 2>/dev/null > $@ || true
+
+targets += $(TEST_FORTIFY_LOG)
+clean-files += $(TEST_FORTIFY_LOG)
+$(obj)/$(TEST_FORTIFY_LOG): $(addprefix $(obj)/, $(TEST_FORTIFY_LOGS)) FORCE
+ $(call if_changed,gen_fortify_log)
+
+# Fake dependency to trigger the fortify tests.
+ifeq ($(CONFIG_FORTIFY_SOURCE),y)
+$(obj)/string.o: $(obj)/$(TEST_FORTIFY_LOG)
+endif
diff --git a/lib/argv_split.c b/lib/argv_split.c
index 1a19a0a93dc1..e28db8e3b58c 100644
--- a/lib/argv_split.c
+++ b/lib/argv_split.c
@@ -28,7 +28,7 @@ static int count_argc(const char *str)
/**
* argv_free - free an argv
- * @argv - the argument vector to be freed
+ * @argv: the argument vector to be freed
*
* Frees an argv and the strings it points to.
*/
@@ -46,7 +46,7 @@ EXPORT_SYMBOL(argv_free);
* @str: the string to be split
* @argcp: returned argument count
*
- * Returns an array of pointers to strings which are split out from
+ * Returns: an array of pointers to strings which are split out from
* @str. This is performed by strictly splitting on white-space; no
* quote processing is performed. Multiple whitespace characters are
* considered to be a single argument separator. The returned array
diff --git a/lib/asn1_decoder.c b/lib/asn1_decoder.c
index 58f72b25f8e9..13da529e2e72 100644
--- a/lib/asn1_decoder.c
+++ b/lib/asn1_decoder.c
@@ -381,7 +381,7 @@ next_op:
case ASN1_OP_END_SET_ACT:
if (unlikely(!(flags & FLAG_MATCHED)))
goto tag_mismatch;
- /* fall through */
+ fallthrough;
case ASN1_OP_END_SEQ:
case ASN1_OP_END_SET_OF:
@@ -448,7 +448,7 @@ next_op:
pc += asn1_op_lengths[op];
goto next_op;
}
- /* fall through */
+ fallthrough;
case ASN1_OP_ACT:
ret = actions[machine[pc + 1]](context, hdr, tag, data + tdp, len);
diff --git a/lib/asn1_encoder.c b/lib/asn1_encoder.c
new file mode 100644
index 000000000000..0fd3c454a468
--- /dev/null
+++ b/lib/asn1_encoder.c
@@ -0,0 +1,452 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Simple encoder primitives for ASN.1 BER/DER/CER
+ *
+ * Copyright (C) 2019 James.Bottomley@HansenPartnership.com
+ */
+
+#include <linux/asn1_encoder.h>
+#include <linux/bug.h>
+#include <linux/string.h>
+#include <linux/module.h>
+
+/**
+ * asn1_encode_integer() - encode positive integer to ASN.1
+ * @data: pointer to the pointer to the data
+ * @end_data: end of data pointer, points one beyond last usable byte in @data
+ * @integer: integer to be encoded
+ *
+ * This is a simplified encoder: it only currently does
+ * positive integers, but it should be simple enough to add the
+ * negative case if a use comes along.
+ */
+unsigned char *
+asn1_encode_integer(unsigned char *data, const unsigned char *end_data,
+ s64 integer)
+{
+ int data_len = end_data - data;
+ unsigned char *d = &data[2];
+ bool found = false;
+ int i;
+
+ if (WARN(integer < 0,
+ "BUG: integer encode only supports positive integers"))
+ return ERR_PTR(-EINVAL);
+
+ if (IS_ERR(data))
+ return data;
+
+ /* need at least 3 bytes for tag, length and integer encoding */
+ if (data_len < 3)
+ return ERR_PTR(-EINVAL);
+
+ /* remaining length where at d (the start of the integer encoding) */
+ data_len -= 2;
+
+ data[0] = _tag(UNIV, PRIM, INT);
+ if (integer == 0) {
+ *d++ = 0;
+ goto out;
+ }
+
+ for (i = sizeof(integer); i > 0 ; i--) {
+ int byte = integer >> (8 * (i - 1));
+
+ if (!found && byte == 0)
+ continue;
+
+ /*
+ * for a positive number the first byte must have bit
+ * 7 clear in two's complement (otherwise it's a
+ * negative number) so prepend a leading zero if
+ * that's not the case
+ */
+ if (!found && (byte & 0x80)) {
+ /*
+ * no check needed here, we already know we
+ * have len >= 1
+ */
+ *d++ = 0;
+ data_len--;
+ }
+
+ found = true;
+ if (data_len == 0)
+ return ERR_PTR(-EINVAL);
+
+ *d++ = byte;
+ data_len--;
+ }
+
+ out:
+ data[1] = d - data - 2;
+
+ return d;
+}
+EXPORT_SYMBOL_GPL(asn1_encode_integer);
+
+/* calculate the base 128 digit values setting the top bit of the first octet */
+static int asn1_encode_oid_digit(unsigned char **_data, int *data_len, u32 oid)
+{
+ unsigned char *data = *_data;
+ int start = 7 + 7 + 7 + 7;
+ int ret = 0;
+
+ if (*data_len < 1)
+ return -EINVAL;
+
+ /* quick case */
+ if (oid == 0) {
+ *data++ = 0x80;
+ (*data_len)--;
+ goto out;
+ }
+
+ while (oid >> start == 0)
+ start -= 7;
+
+ while (start > 0 && *data_len > 0) {
+ u8 byte;
+
+ byte = oid >> start;
+ oid = oid - (byte << start);
+ start -= 7;
+ byte |= 0x80;
+ *data++ = byte;
+ (*data_len)--;
+ }
+
+ if (*data_len > 0) {
+ *data++ = oid;
+ (*data_len)--;
+ } else {
+ ret = -EINVAL;
+ }
+
+ out:
+ *_data = data;
+ return ret;
+}
+
+/**
+ * asn1_encode_oid() - encode an oid to ASN.1
+ * @data: position to begin encoding at
+ * @end_data: end of data pointer, points one beyond last usable byte in @data
+ * @oid: array of oids
+ * @oid_len: length of oid array
+ *
+ * this encodes an OID up to ASN.1 when presented as an array of OID values
+ */
+unsigned char *
+asn1_encode_oid(unsigned char *data, const unsigned char *end_data,
+ u32 oid[], int oid_len)
+{
+ int data_len = end_data - data;
+ unsigned char *d = data + 2;
+ int i, ret;
+
+ if (WARN(oid_len < 2, "OID must have at least two elements"))
+ return ERR_PTR(-EINVAL);
+
+ if (WARN(oid_len > 32, "OID is too large"))
+ return ERR_PTR(-EINVAL);
+
+ if (IS_ERR(data))
+ return data;
+
+
+ /* need at least 3 bytes for tag, length and OID encoding */
+ if (data_len < 3)
+ return ERR_PTR(-EINVAL);
+
+ data[0] = _tag(UNIV, PRIM, OID);
+ *d++ = oid[0] * 40 + oid[1];
+
+ data_len -= 3;
+
+ for (i = 2; i < oid_len; i++) {
+ ret = asn1_encode_oid_digit(&d, &data_len, oid[i]);
+ if (ret < 0)
+ return ERR_PTR(ret);
+ }
+
+ data[1] = d - data - 2;
+
+ return d;
+}
+EXPORT_SYMBOL_GPL(asn1_encode_oid);
+
+/**
+ * asn1_encode_length() - encode a length to follow an ASN.1 tag
+ * @data: pointer to encode at
+ * @data_len: pointer to remaining length (adjusted by routine)
+ * @len: length to encode
+ *
+ * This routine can encode lengths up to 65535 using the ASN.1 rules.
+ * It will accept a negative length and place a zero length tag
+ * instead (to keep the ASN.1 valid). This convention allows other
+ * encoder primitives to accept negative lengths as singalling the
+ * sequence will be re-encoded when the length is known.
+ */
+static int asn1_encode_length(unsigned char **data, int *data_len, int len)
+{
+ if (*data_len < 1)
+ return -EINVAL;
+
+ if (len < 0) {
+ *((*data)++) = 0;
+ (*data_len)--;
+ return 0;
+ }
+
+ if (len <= 0x7f) {
+ *((*data)++) = len;
+ (*data_len)--;
+ return 0;
+ }
+
+ if (*data_len < 2)
+ return -EINVAL;
+
+ if (len <= 0xff) {
+ *((*data)++) = 0x81;
+ *((*data)++) = len & 0xff;
+ *data_len -= 2;
+ return 0;
+ }
+
+ if (*data_len < 3)
+ return -EINVAL;
+
+ if (len <= 0xffff) {
+ *((*data)++) = 0x82;
+ *((*data)++) = (len >> 8) & 0xff;
+ *((*data)++) = len & 0xff;
+ *data_len -= 3;
+ return 0;
+ }
+
+ if (WARN(len > 0xffffff, "ASN.1 length can't be > 0xffffff"))
+ return -EINVAL;
+
+ if (*data_len < 4)
+ return -EINVAL;
+ *((*data)++) = 0x83;
+ *((*data)++) = (len >> 16) & 0xff;
+ *((*data)++) = (len >> 8) & 0xff;
+ *((*data)++) = len & 0xff;
+ *data_len -= 4;
+
+ return 0;
+}
+
+/**
+ * asn1_encode_tag() - add a tag for optional or explicit value
+ * @data: pointer to place tag at
+ * @end_data: end of data pointer, points one beyond last usable byte in @data
+ * @tag: tag to be placed
+ * @string: the data to be tagged
+ * @len: the length of the data to be tagged
+ *
+ * Note this currently only handles short form tags < 31.
+ *
+ * Standard usage is to pass in a @tag, @string and @length and the
+ * @string will be ASN.1 encoded with @tag and placed into @data. If
+ * the encoding would put data past @end_data then an error is
+ * returned, otherwise a pointer to a position one beyond the encoding
+ * is returned.
+ *
+ * To encode in place pass a NULL @string and -1 for @len and the
+ * maximum allowable beginning and end of the data; all this will do
+ * is add the current maximum length and update the data pointer to
+ * the place where the tag contents should be placed is returned. The
+ * data should be copied in by the calling routine which should then
+ * repeat the prior statement but now with the known length. In order
+ * to avoid having to keep both before and after pointers, the repeat
+ * expects to be called with @data pointing to where the first encode
+ * returned it and still NULL for @string but the real length in @len.
+ */
+unsigned char *
+asn1_encode_tag(unsigned char *data, const unsigned char *end_data,
+ u32 tag, const unsigned char *string, int len)
+{
+ int data_len = end_data - data;
+ int ret;
+
+ if (WARN(tag > 30, "ASN.1 tag can't be > 30"))
+ return ERR_PTR(-EINVAL);
+
+ if (!string && WARN(len > 127,
+ "BUG: recode tag is too big (>127)"))
+ return ERR_PTR(-EINVAL);
+
+ if (IS_ERR(data))
+ return data;
+
+ if (!string && len > 0) {
+ /*
+ * we're recoding, so move back to the start of the
+ * tag and install a dummy length because the real
+ * data_len should be NULL
+ */
+ data -= 2;
+ data_len = 2;
+ }
+
+ if (data_len < 2)
+ return ERR_PTR(-EINVAL);
+
+ *(data++) = _tagn(CONT, CONS, tag);
+ data_len--;
+ ret = asn1_encode_length(&data, &data_len, len);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ if (!string)
+ return data;
+
+ if (data_len < len)
+ return ERR_PTR(-EINVAL);
+
+ memcpy(data, string, len);
+ data += len;
+
+ return data;
+}
+EXPORT_SYMBOL_GPL(asn1_encode_tag);
+
+/**
+ * asn1_encode_octet_string() - encode an ASN.1 OCTET STRING
+ * @data: pointer to encode at
+ * @end_data: end of data pointer, points one beyond last usable byte in @data
+ * @string: string to be encoded
+ * @len: length of string
+ *
+ * Note ASN.1 octet strings may contain zeros, so the length is obligatory.
+ */
+unsigned char *
+asn1_encode_octet_string(unsigned char *data,
+ const unsigned char *end_data,
+ const unsigned char *string, u32 len)
+{
+ int data_len = end_data - data;
+ int ret;
+
+ if (IS_ERR(data))
+ return data;
+
+ /* need minimum of 2 bytes for tag and length of zero length string */
+ if (data_len < 2)
+ return ERR_PTR(-EINVAL);
+
+ *(data++) = _tag(UNIV, PRIM, OTS);
+ data_len--;
+
+ ret = asn1_encode_length(&data, &data_len, len);
+ if (ret)
+ return ERR_PTR(ret);
+
+ if (data_len < len)
+ return ERR_PTR(-EINVAL);
+
+ memcpy(data, string, len);
+ data += len;
+
+ return data;
+}
+EXPORT_SYMBOL_GPL(asn1_encode_octet_string);
+
+/**
+ * asn1_encode_sequence() - wrap a byte stream in an ASN.1 SEQUENCE
+ * @data: pointer to encode at
+ * @end_data: end of data pointer, points one beyond last usable byte in @data
+ * @seq: data to be encoded as a sequence
+ * @len: length of the data to be encoded as a sequence
+ *
+ * Fill in a sequence. To encode in place, pass NULL for @seq and -1
+ * for @len; then call again once the length is known (still with NULL
+ * for @seq). In order to avoid having to keep both before and after
+ * pointers, the repeat expects to be called with @data pointing to
+ * where the first encode placed it.
+ */
+unsigned char *
+asn1_encode_sequence(unsigned char *data, const unsigned char *end_data,
+ const unsigned char *seq, int len)
+{
+ int data_len = end_data - data;
+ int ret;
+
+ if (!seq && WARN(len > 127,
+ "BUG: recode sequence is too big (>127)"))
+ return ERR_PTR(-EINVAL);
+
+ if (IS_ERR(data))
+ return data;
+
+ if (!seq && len >= 0) {
+ /*
+ * we're recoding, so move back to the start of the
+ * sequence and install a dummy length because the
+ * real length should be NULL
+ */
+ data -= 2;
+ data_len = 2;
+ }
+
+ if (data_len < 2)
+ return ERR_PTR(-EINVAL);
+
+ *(data++) = _tag(UNIV, CONS, SEQ);
+ data_len--;
+
+ ret = asn1_encode_length(&data, &data_len, len);
+ if (ret)
+ return ERR_PTR(ret);
+
+ if (!seq)
+ return data;
+
+ if (data_len < len)
+ return ERR_PTR(-EINVAL);
+
+ memcpy(data, seq, len);
+ data += len;
+
+ return data;
+}
+EXPORT_SYMBOL_GPL(asn1_encode_sequence);
+
+/**
+ * asn1_encode_boolean() - encode a boolean value to ASN.1
+ * @data: pointer to encode at
+ * @end_data: end of data pointer, points one beyond last usable byte in @data
+ * @val: the boolean true/false value
+ */
+unsigned char *
+asn1_encode_boolean(unsigned char *data, const unsigned char *end_data,
+ bool val)
+{
+ int data_len = end_data - data;
+
+ if (IS_ERR(data))
+ return data;
+
+ /* booleans are 3 bytes: tag, length == 1 and value == 0 or 1 */
+ if (data_len < 3)
+ return ERR_PTR(-EINVAL);
+
+ *(data++) = _tag(UNIV, PRIM, BOOL);
+ data_len--;
+
+ asn1_encode_length(&data, &data_len, 1);
+
+ if (val)
+ *(data++) = 1;
+ else
+ *(data++) = 0;
+
+ return data;
+}
+EXPORT_SYMBOL_GPL(asn1_encode_boolean);
+
+MODULE_LICENSE("GPL");
diff --git a/lib/assoc_array.c b/lib/assoc_array.c
index 6f4bcf524554..ca0b4f360c1a 100644
--- a/lib/assoc_array.c
+++ b/lib/assoc_array.c
@@ -741,8 +741,7 @@ all_leaves_cluster_together:
keylen = round_up(diff, ASSOC_ARRAY_KEY_CHUNK_SIZE);
keylen >>= ASSOC_ARRAY_KEY_CHUNK_SHIFT;
- new_s0 = kzalloc(sizeof(struct assoc_array_shortcut) +
- keylen * sizeof(unsigned long), GFP_KERNEL);
+ new_s0 = kzalloc(struct_size(new_s0, index_key, keylen), GFP_KERNEL);
if (!new_s0)
return false;
edit->new_meta[2] = assoc_array_shortcut_to_ptr(new_s0);
@@ -849,8 +848,8 @@ static bool assoc_array_insert_mid_shortcut(struct assoc_array_edit *edit,
keylen = round_up(diff, ASSOC_ARRAY_KEY_CHUNK_SIZE);
keylen >>= ASSOC_ARRAY_KEY_CHUNK_SHIFT;
- new_s0 = kzalloc(sizeof(struct assoc_array_shortcut) +
- keylen * sizeof(unsigned long), GFP_KERNEL);
+ new_s0 = kzalloc(struct_size(new_s0, index_key, keylen),
+ GFP_KERNEL);
if (!new_s0)
return false;
edit->new_meta[1] = assoc_array_shortcut_to_ptr(new_s0);
@@ -864,7 +863,7 @@ static bool assoc_array_insert_mid_shortcut(struct assoc_array_edit *edit,
new_n0->parent_slot = 0;
memcpy(new_s0->index_key, shortcut->index_key,
- keylen * sizeof(unsigned long));
+ flex_array_size(new_s0, index_key, keylen));
blank = ULONG_MAX << (diff & ASSOC_ARRAY_KEY_CHUNK_MASK);
pr_devel("blank off [%zu] %d: %lx\n", keylen - 1, diff, blank);
@@ -899,8 +898,8 @@ static bool assoc_array_insert_mid_shortcut(struct assoc_array_edit *edit,
keylen = round_up(shortcut->skip_to_level, ASSOC_ARRAY_KEY_CHUNK_SIZE);
keylen >>= ASSOC_ARRAY_KEY_CHUNK_SHIFT;
- new_s1 = kzalloc(sizeof(struct assoc_array_shortcut) +
- keylen * sizeof(unsigned long), GFP_KERNEL);
+ new_s1 = kzalloc(struct_size(new_s1, index_key, keylen),
+ GFP_KERNEL);
if (!new_s1)
return false;
edit->new_meta[2] = assoc_array_shortcut_to_ptr(new_s1);
@@ -913,7 +912,7 @@ static bool assoc_array_insert_mid_shortcut(struct assoc_array_edit *edit,
new_n0->slots[sc_slot] = assoc_array_shortcut_to_ptr(new_s1);
memcpy(new_s1->index_key, shortcut->index_key,
- keylen * sizeof(unsigned long));
+ flex_array_size(new_s1, index_key, keylen));
edit->set[1].ptr = &side->back_pointer;
edit->set[1].to = assoc_array_shortcut_to_ptr(new_s1);
@@ -1113,7 +1112,7 @@ struct assoc_array_edit *assoc_array_delete(struct assoc_array *array,
index_key))
goto found_leaf;
}
- /* fall through */
+ fallthrough;
case assoc_array_walk_tree_empty:
case assoc_array_walk_found_wrong_shortcut:
default:
@@ -1462,6 +1461,7 @@ int assoc_array_gc(struct assoc_array *array,
struct assoc_array_ptr *cursor, *ptr;
struct assoc_array_ptr *new_root, *new_parent, **new_ptr_pp;
unsigned long nr_leaves_on_tree;
+ bool retained;
int keylen, slot, nr_free, next_slot, i;
pr_devel("-->%s()\n", __func__);
@@ -1490,13 +1490,12 @@ descend:
shortcut = assoc_array_ptr_to_shortcut(cursor);
keylen = round_up(shortcut->skip_to_level, ASSOC_ARRAY_KEY_CHUNK_SIZE);
keylen >>= ASSOC_ARRAY_KEY_CHUNK_SHIFT;
- new_s = kmalloc(sizeof(struct assoc_array_shortcut) +
- keylen * sizeof(unsigned long), GFP_KERNEL);
+ new_s = kmalloc(struct_size(new_s, index_key, keylen),
+ GFP_KERNEL);
if (!new_s)
goto enomem;
pr_devel("dup shortcut %p -> %p\n", shortcut, new_s);
- memcpy(new_s, shortcut, (sizeof(struct assoc_array_shortcut) +
- keylen * sizeof(unsigned long)));
+ memcpy(new_s, shortcut, struct_size(new_s, index_key, keylen));
new_s->back_pointer = new_parent;
new_s->parent_slot = shortcut->parent_slot;
*new_ptr_pp = new_parent = assoc_array_shortcut_to_ptr(new_s);
@@ -1538,6 +1537,7 @@ continue_node:
goto descend;
}
+retry_compress:
pr_devel("-- compress node %p --\n", new_n);
/* Count up the number of empty slots in this node and work out the
@@ -1555,6 +1555,7 @@ continue_node:
pr_devel("free=%d, leaves=%lu\n", nr_free, new_n->nr_leaves_on_branch);
/* See what we can fold in */
+ retained = false;
next_slot = 0;
for (slot = 0; slot < ASSOC_ARRAY_FAN_OUT; slot++) {
struct assoc_array_shortcut *s;
@@ -1604,9 +1605,14 @@ continue_node:
pr_devel("[%d] retain node %lu/%d [nx %d]\n",
slot, child->nr_leaves_on_branch, nr_free + 1,
next_slot);
+ retained = true;
}
}
+ if (retained && new_n->nr_leaves_on_branch <= ASSOC_ARRAY_FAN_OUT) {
+ pr_devel("internal nodes remain despite enough space, retrying\n");
+ goto retry_compress;
+ }
pr_devel("after: %lu\n", new_n->nr_leaves_on_branch);
nr_leaves_on_tree = new_n->nr_leaves_on_branch;
diff --git a/lib/atomic64.c b/lib/atomic64.c
index 7e6905751522..caf895789a1e 100644
--- a/lib/atomic64.c
+++ b/lib/atomic64.c
@@ -42,20 +42,20 @@ static inline raw_spinlock_t *lock_addr(const atomic64_t *v)
return &atomic64_lock[addr & (NR_LOCKS - 1)].lock;
}
-long long atomic64_read(const atomic64_t *v)
+s64 generic_atomic64_read(const atomic64_t *v)
{
unsigned long flags;
raw_spinlock_t *lock = lock_addr(v);
- long long val;
+ s64 val;
raw_spin_lock_irqsave(lock, flags);
val = v->counter;
raw_spin_unlock_irqrestore(lock, flags);
return val;
}
-EXPORT_SYMBOL(atomic64_read);
+EXPORT_SYMBOL(generic_atomic64_read);
-void atomic64_set(atomic64_t *v, long long i)
+void generic_atomic64_set(atomic64_t *v, s64 i)
{
unsigned long flags;
raw_spinlock_t *lock = lock_addr(v);
@@ -64,10 +64,10 @@ void atomic64_set(atomic64_t *v, long long i)
v->counter = i;
raw_spin_unlock_irqrestore(lock, flags);
}
-EXPORT_SYMBOL(atomic64_set);
+EXPORT_SYMBOL(generic_atomic64_set);
#define ATOMIC64_OP(op, c_op) \
-void atomic64_##op(long long a, atomic64_t *v) \
+void generic_atomic64_##op(s64 a, atomic64_t *v) \
{ \
unsigned long flags; \
raw_spinlock_t *lock = lock_addr(v); \
@@ -76,28 +76,28 @@ void atomic64_##op(long long a, atomic64_t *v) \
v->counter c_op a; \
raw_spin_unlock_irqrestore(lock, flags); \
} \
-EXPORT_SYMBOL(atomic64_##op);
+EXPORT_SYMBOL(generic_atomic64_##op);
#define ATOMIC64_OP_RETURN(op, c_op) \
-long long atomic64_##op##_return(long long a, atomic64_t *v) \
+s64 generic_atomic64_##op##_return(s64 a, atomic64_t *v) \
{ \
unsigned long flags; \
raw_spinlock_t *lock = lock_addr(v); \
- long long val; \
+ s64 val; \
\
raw_spin_lock_irqsave(lock, flags); \
val = (v->counter c_op a); \
raw_spin_unlock_irqrestore(lock, flags); \
return val; \
} \
-EXPORT_SYMBOL(atomic64_##op##_return);
+EXPORT_SYMBOL(generic_atomic64_##op##_return);
#define ATOMIC64_FETCH_OP(op, c_op) \
-long long atomic64_fetch_##op(long long a, atomic64_t *v) \
+s64 generic_atomic64_fetch_##op(s64 a, atomic64_t *v) \
{ \
unsigned long flags; \
raw_spinlock_t *lock = lock_addr(v); \
- long long val; \
+ s64 val; \
\
raw_spin_lock_irqsave(lock, flags); \
val = v->counter; \
@@ -105,7 +105,7 @@ long long atomic64_fetch_##op(long long a, atomic64_t *v) \
raw_spin_unlock_irqrestore(lock, flags); \
return val; \
} \
-EXPORT_SYMBOL(atomic64_fetch_##op);
+EXPORT_SYMBOL(generic_atomic64_fetch_##op);
#define ATOMIC64_OPS(op, c_op) \
ATOMIC64_OP(op, c_op) \
@@ -118,7 +118,6 @@ ATOMIC64_OPS(sub, -=)
#undef ATOMIC64_OPS
#define ATOMIC64_OPS(op, c_op) \
ATOMIC64_OP(op, c_op) \
- ATOMIC64_OP_RETURN(op, c_op) \
ATOMIC64_FETCH_OP(op, c_op)
ATOMIC64_OPS(and, &=)
@@ -127,14 +126,13 @@ ATOMIC64_OPS(xor, ^=)
#undef ATOMIC64_OPS
#undef ATOMIC64_FETCH_OP
-#undef ATOMIC64_OP_RETURN
#undef ATOMIC64_OP
-long long atomic64_dec_if_positive(atomic64_t *v)
+s64 generic_atomic64_dec_if_positive(atomic64_t *v)
{
unsigned long flags;
raw_spinlock_t *lock = lock_addr(v);
- long long val;
+ s64 val;
raw_spin_lock_irqsave(lock, flags);
val = v->counter - 1;
@@ -143,13 +141,13 @@ long long atomic64_dec_if_positive(atomic64_t *v)
raw_spin_unlock_irqrestore(lock, flags);
return val;
}
-EXPORT_SYMBOL(atomic64_dec_if_positive);
+EXPORT_SYMBOL(generic_atomic64_dec_if_positive);
-long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n)
+s64 generic_atomic64_cmpxchg(atomic64_t *v, s64 o, s64 n)
{
unsigned long flags;
raw_spinlock_t *lock = lock_addr(v);
- long long val;
+ s64 val;
raw_spin_lock_irqsave(lock, flags);
val = v->counter;
@@ -158,13 +156,13 @@ long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n)
raw_spin_unlock_irqrestore(lock, flags);
return val;
}
-EXPORT_SYMBOL(atomic64_cmpxchg);
+EXPORT_SYMBOL(generic_atomic64_cmpxchg);
-long long atomic64_xchg(atomic64_t *v, long long new)
+s64 generic_atomic64_xchg(atomic64_t *v, s64 new)
{
unsigned long flags;
raw_spinlock_t *lock = lock_addr(v);
- long long val;
+ s64 val;
raw_spin_lock_irqsave(lock, flags);
val = v->counter;
@@ -172,13 +170,13 @@ long long atomic64_xchg(atomic64_t *v, long long new)
raw_spin_unlock_irqrestore(lock, flags);
return val;
}
-EXPORT_SYMBOL(atomic64_xchg);
+EXPORT_SYMBOL(generic_atomic64_xchg);
-long long atomic64_fetch_add_unless(atomic64_t *v, long long a, long long u)
+s64 generic_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
{
unsigned long flags;
raw_spinlock_t *lock = lock_addr(v);
- long long val;
+ s64 val;
raw_spin_lock_irqsave(lock, flags);
val = v->counter;
@@ -188,4 +186,4 @@ long long atomic64_fetch_add_unless(atomic64_t *v, long long a, long long u)
return val;
}
-EXPORT_SYMBOL(atomic64_fetch_add_unless);
+EXPORT_SYMBOL(generic_atomic64_fetch_add_unless);
diff --git a/lib/audit.c b/lib/audit.c
index 5004bff928a7..738bda22dd39 100644
--- a/lib/audit.c
+++ b/lib/audit.c
@@ -45,23 +45,27 @@ int audit_classify_syscall(int abi, unsigned syscall)
switch(syscall) {
#ifdef __NR_open
case __NR_open:
- return 2;
+ return AUDITSC_OPEN;
#endif
#ifdef __NR_openat
case __NR_openat:
- return 3;
+ return AUDITSC_OPENAT;
#endif
#ifdef __NR_socketcall
case __NR_socketcall:
- return 4;
+ return AUDITSC_SOCKETCALL;
#endif
#ifdef __NR_execveat
case __NR_execveat:
#endif
case __NR_execve:
- return 5;
+ return AUDITSC_EXECVE;
+#ifdef __NR_openat2
+ case __NR_openat2:
+ return AUDITSC_OPENAT2;
+#endif
default:
- return 0;
+ return AUDITSC_NATIVE;
}
}
diff --git a/lib/base64.c b/lib/base64.c
new file mode 100644
index 000000000000..b736a7a431c5
--- /dev/null
+++ b/lib/base64.c
@@ -0,0 +1,103 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * base64.c - RFC4648-compliant base64 encoding
+ *
+ * Copyright (c) 2020 Hannes Reinecke, SUSE
+ *
+ * Based on the base64url routines from fs/crypto/fname.c
+ * (which are using the URL-safe base64 encoding),
+ * modified to use the standard coding table from RFC4648 section 4.
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/export.h>
+#include <linux/string.h>
+#include <linux/base64.h>
+
+static const char base64_table[65] =
+ "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
+
+/**
+ * base64_encode() - base64-encode some binary data
+ * @src: the binary data to encode
+ * @srclen: the length of @src in bytes
+ * @dst: (output) the base64-encoded string. Not NUL-terminated.
+ *
+ * Encodes data using base64 encoding, i.e. the "Base 64 Encoding" specified
+ * by RFC 4648, including the '='-padding.
+ *
+ * Return: the length of the resulting base64-encoded string in bytes.
+ */
+int base64_encode(const u8 *src, int srclen, char *dst)
+{
+ u32 ac = 0;
+ int bits = 0;
+ int i;
+ char *cp = dst;
+
+ for (i = 0; i < srclen; i++) {
+ ac = (ac << 8) | src[i];
+ bits += 8;
+ do {
+ bits -= 6;
+ *cp++ = base64_table[(ac >> bits) & 0x3f];
+ } while (bits >= 6);
+ }
+ if (bits) {
+ *cp++ = base64_table[(ac << (6 - bits)) & 0x3f];
+ bits -= 6;
+ }
+ while (bits < 0) {
+ *cp++ = '=';
+ bits += 2;
+ }
+ return cp - dst;
+}
+EXPORT_SYMBOL_GPL(base64_encode);
+
+/**
+ * base64_decode() - base64-decode a string
+ * @src: the string to decode. Doesn't need to be NUL-terminated.
+ * @srclen: the length of @src in bytes
+ * @dst: (output) the decoded binary data
+ *
+ * Decodes a string using base64 encoding, i.e. the "Base 64 Encoding"
+ * specified by RFC 4648, including the '='-padding.
+ *
+ * This implementation hasn't been optimized for performance.
+ *
+ * Return: the length of the resulting decoded binary data in bytes,
+ * or -1 if the string isn't a valid base64 string.
+ */
+int base64_decode(const char *src, int srclen, u8 *dst)
+{
+ u32 ac = 0;
+ int bits = 0;
+ int i;
+ u8 *bp = dst;
+
+ for (i = 0; i < srclen; i++) {
+ const char *p = strchr(base64_table, src[i]);
+
+ if (src[i] == '=') {
+ ac = (ac << 6);
+ bits += 6;
+ if (bits >= 8)
+ bits -= 8;
+ continue;
+ }
+ if (p == NULL || src[i] == 0)
+ return -1;
+ ac = (ac << 6) | (p - base64_table);
+ bits += 6;
+ if (bits >= 8) {
+ bits -= 8;
+ *bp++ = (u8)(ac >> bits);
+ }
+ }
+ if (ac & ((1 << bits) - 1))
+ return -1;
+ return bp - dst;
+}
+EXPORT_SYMBOL_GPL(base64_decode);
diff --git a/lib/bch.c b/lib/bch.c
index 5db6d3a4c8a6..5f71fd76eca8 100644
--- a/lib/bch.c
+++ b/lib/bch.c
@@ -23,15 +23,15 @@
* This library provides runtime configurable encoding/decoding of binary
* Bose-Chaudhuri-Hocquenghem (BCH) codes.
*
- * Call init_bch to get a pointer to a newly allocated bch_control structure for
+ * Call bch_init to get a pointer to a newly allocated bch_control structure for
* the given m (Galois field order), t (error correction capability) and
* (optional) primitive polynomial parameters.
*
- * Call encode_bch to compute and store ecc parity bytes to a given buffer.
- * Call decode_bch to detect and locate errors in received data.
+ * Call bch_encode to compute and store ecc parity bytes to a given buffer.
+ * Call bch_decode to detect and locate errors in received data.
*
* On systems supporting hw BCH features, intermediate results may be provided
- * to decode_bch in order to skip certain steps. See decode_bch() documentation
+ * to bch_decode in order to skip certain steps. See bch_decode() documentation
* for details.
*
* Option CONFIG_BCH_CONST_PARAMS can be used to force fixed values of
@@ -71,6 +71,7 @@
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/bitops.h>
+#include <linux/bitrev.h>
#include <asm/byteorder.h>
#include <linux/bch.h>
@@ -102,7 +103,7 @@
*/
struct gf_poly {
unsigned int deg; /* polynomial degree */
- unsigned int c[0]; /* polynomial terms */
+ unsigned int c[]; /* polynomial terms */
};
/* given its degree, compute a polynomial size in bytes */
@@ -114,10 +115,18 @@ struct gf_poly_deg1 {
unsigned int c[2];
};
+static u8 swap_bits(struct bch_control *bch, u8 in)
+{
+ if (!bch->swap_bits)
+ return in;
+
+ return bitrev8(in);
+}
+
/*
- * same as encode_bch(), but process input data one byte at a time
+ * same as bch_encode(), but process input data one byte at a time
*/
-static void encode_bch_unaligned(struct bch_control *bch,
+static void bch_encode_unaligned(struct bch_control *bch,
const unsigned char *data, unsigned int len,
uint32_t *ecc)
{
@@ -126,7 +135,9 @@ static void encode_bch_unaligned(struct bch_control *bch,
const int l = BCH_ECC_WORDS(bch)-1;
while (len--) {
- p = bch->mod8_tab + (l+1)*(((ecc[0] >> 24)^(*data++)) & 0xff);
+ u8 tmp = swap_bits(bch, *data++);
+
+ p = bch->mod8_tab + (l+1)*(((ecc[0] >> 24)^(tmp)) & 0xff);
for (i = 0; i < l; i++)
ecc[i] = ((ecc[i] << 8)|(ecc[i+1] >> 24))^(*p++);
@@ -145,10 +156,16 @@ static void load_ecc8(struct bch_control *bch, uint32_t *dst,
unsigned int i, nwords = BCH_ECC_WORDS(bch)-1;
for (i = 0; i < nwords; i++, src += 4)
- dst[i] = (src[0] << 24)|(src[1] << 16)|(src[2] << 8)|src[3];
+ dst[i] = ((u32)swap_bits(bch, src[0]) << 24) |
+ ((u32)swap_bits(bch, src[1]) << 16) |
+ ((u32)swap_bits(bch, src[2]) << 8) |
+ swap_bits(bch, src[3]);
memcpy(pad, src, BCH_ECC_BYTES(bch)-4*nwords);
- dst[nwords] = (pad[0] << 24)|(pad[1] << 16)|(pad[2] << 8)|pad[3];
+ dst[nwords] = ((u32)swap_bits(bch, pad[0]) << 24) |
+ ((u32)swap_bits(bch, pad[1]) << 16) |
+ ((u32)swap_bits(bch, pad[2]) << 8) |
+ swap_bits(bch, pad[3]);
}
/*
@@ -161,20 +178,20 @@ static void store_ecc8(struct bch_control *bch, uint8_t *dst,
unsigned int i, nwords = BCH_ECC_WORDS(bch)-1;
for (i = 0; i < nwords; i++) {
- *dst++ = (src[i] >> 24);
- *dst++ = (src[i] >> 16) & 0xff;
- *dst++ = (src[i] >> 8) & 0xff;
- *dst++ = (src[i] >> 0) & 0xff;
+ *dst++ = swap_bits(bch, src[i] >> 24);
+ *dst++ = swap_bits(bch, src[i] >> 16);
+ *dst++ = swap_bits(bch, src[i] >> 8);
+ *dst++ = swap_bits(bch, src[i]);
}
- pad[0] = (src[nwords] >> 24);
- pad[1] = (src[nwords] >> 16) & 0xff;
- pad[2] = (src[nwords] >> 8) & 0xff;
- pad[3] = (src[nwords] >> 0) & 0xff;
+ pad[0] = swap_bits(bch, src[nwords] >> 24);
+ pad[1] = swap_bits(bch, src[nwords] >> 16);
+ pad[2] = swap_bits(bch, src[nwords] >> 8);
+ pad[3] = swap_bits(bch, src[nwords]);
memcpy(dst, pad, BCH_ECC_BYTES(bch)-4*nwords);
}
/**
- * encode_bch - calculate BCH ecc parity of data
+ * bch_encode - calculate BCH ecc parity of data
* @bch: BCH control structure
* @data: data to encode
* @len: data length in bytes
@@ -187,7 +204,7 @@ static void store_ecc8(struct bch_control *bch, uint8_t *dst,
* The exact number of computed ecc parity bits is given by member @ecc_bits of
* @bch; it may be less than m*t for large values of t.
*/
-void encode_bch(struct bch_control *bch, const uint8_t *data,
+void bch_encode(struct bch_control *bch, const uint8_t *data,
unsigned int len, uint8_t *ecc)
{
const unsigned int l = BCH_ECC_WORDS(bch)-1;
@@ -215,7 +232,7 @@ void encode_bch(struct bch_control *bch, const uint8_t *data,
m = ((unsigned long)data) & 3;
if (m) {
mlen = (len < (4-m)) ? len : 4-m;
- encode_bch_unaligned(bch, data, mlen, bch->ecc_buf);
+ bch_encode_unaligned(bch, data, mlen, bch->ecc_buf);
data += mlen;
len -= mlen;
}
@@ -240,7 +257,13 @@ void encode_bch(struct bch_control *bch, const uint8_t *data,
*/
while (mlen--) {
/* input data is read in big-endian format */
- w = r[0]^cpu_to_be32(*pdata++);
+ w = cpu_to_be32(*pdata++);
+ if (bch->swap_bits)
+ w = (u32)swap_bits(bch, w) |
+ ((u32)swap_bits(bch, w >> 8) << 8) |
+ ((u32)swap_bits(bch, w >> 16) << 16) |
+ ((u32)swap_bits(bch, w >> 24) << 24);
+ w ^= r[0];
p0 = tab0 + (l+1)*((w >> 0) & 0xff);
p1 = tab1 + (l+1)*((w >> 8) & 0xff);
p2 = tab2 + (l+1)*((w >> 16) & 0xff);
@@ -255,13 +278,13 @@ void encode_bch(struct bch_control *bch, const uint8_t *data,
/* process last unaligned bytes */
if (len)
- encode_bch_unaligned(bch, data, len, bch->ecc_buf);
+ bch_encode_unaligned(bch, data, len, bch->ecc_buf);
/* store ecc parity bytes into original parity buffer */
if (ecc)
store_ecc8(bch, ecc, bch->ecc_buf);
}
-EXPORT_SYMBOL_GPL(encode_bch);
+EXPORT_SYMBOL_GPL(bch_encode);
static inline int modulo(struct bch_control *bch, unsigned int v)
{
@@ -527,7 +550,7 @@ static int find_affine4_roots(struct bch_control *bch, unsigned int a,
k = a_log(bch, a);
rows[0] = c;
- /* buid linear system to solve X^4+aX^2+bX+c = 0 */
+ /* build linear system to solve X^4+aX^2+bX+c = 0 */
for (i = 0; i < m; i++) {
rows[i+1] = bch->a_pow_tab[4*i]^
(a ? bch->a_pow_tab[mod_s(bch, k)] : 0)^
@@ -952,7 +975,7 @@ static int chien_search(struct bch_control *bch, unsigned int len,
#endif /* USE_CHIEN_SEARCH */
/**
- * decode_bch - decode received codeword and find bit error locations
+ * bch_decode - decode received codeword and find bit error locations
* @bch: BCH control structure
* @data: received data, ignored if @calc_ecc is provided
* @len: data length in bytes, must always be provided
@@ -966,22 +989,22 @@ static int chien_search(struct bch_control *bch, unsigned int len,
* invalid parameters were provided
*
* Depending on the available hw BCH support and the need to compute @calc_ecc
- * separately (using encode_bch()), this function should be called with one of
+ * separately (using bch_encode()), this function should be called with one of
* the following parameter configurations -
*
* by providing @data and @recv_ecc only:
- * decode_bch(@bch, @data, @len, @recv_ecc, NULL, NULL, @errloc)
+ * bch_decode(@bch, @data, @len, @recv_ecc, NULL, NULL, @errloc)
*
* by providing @recv_ecc and @calc_ecc:
- * decode_bch(@bch, NULL, @len, @recv_ecc, @calc_ecc, NULL, @errloc)
+ * bch_decode(@bch, NULL, @len, @recv_ecc, @calc_ecc, NULL, @errloc)
*
* by providing ecc = recv_ecc XOR calc_ecc:
- * decode_bch(@bch, NULL, @len, NULL, ecc, NULL, @errloc)
+ * bch_decode(@bch, NULL, @len, NULL, ecc, NULL, @errloc)
*
* by providing syndrome results @syn:
- * decode_bch(@bch, NULL, @len, NULL, NULL, @syn, @errloc)
+ * bch_decode(@bch, NULL, @len, NULL, NULL, @syn, @errloc)
*
- * Once decode_bch() has successfully returned with a positive value, error
+ * Once bch_decode() has successfully returned with a positive value, error
* locations returned in array @errloc should be interpreted as follows -
*
* if (errloc[n] >= 8*len), then n-th error is located in ecc (no need for
@@ -993,7 +1016,7 @@ static int chien_search(struct bch_control *bch, unsigned int len,
* Note that this function does not perform any data correction by itself, it
* merely indicates error locations.
*/
-int decode_bch(struct bch_control *bch, const uint8_t *data, unsigned int len,
+int bch_decode(struct bch_control *bch, const uint8_t *data, unsigned int len,
const uint8_t *recv_ecc, const uint8_t *calc_ecc,
const unsigned int *syn, unsigned int *errloc)
{
@@ -1012,7 +1035,7 @@ int decode_bch(struct bch_control *bch, const uint8_t *data, unsigned int len,
/* compute received data ecc into an internal buffer */
if (!data || !recv_ecc)
return -EINVAL;
- encode_bch(bch, data, len, NULL);
+ bch_encode(bch, data, len, NULL);
} else {
/* load provided calculated ecc */
load_ecc8(bch, bch->ecc_buf, calc_ecc);
@@ -1048,12 +1071,14 @@ int decode_bch(struct bch_control *bch, const uint8_t *data, unsigned int len,
break;
}
errloc[i] = nbits-1-errloc[i];
- errloc[i] = (errloc[i] & ~7)|(7-(errloc[i] & 7));
+ if (!bch->swap_bits)
+ errloc[i] = (errloc[i] & ~7) |
+ (7-(errloc[i] & 7));
}
}
return (err >= 0) ? err : -EBADMSG;
}
-EXPORT_SYMBOL_GPL(decode_bch);
+EXPORT_SYMBOL_GPL(bch_decode);
/*
* generate Galois field lookup tables
@@ -1236,27 +1261,29 @@ finish:
}
/**
- * init_bch - initialize a BCH encoder/decoder
+ * bch_init - initialize a BCH encoder/decoder
* @m: Galois field order, should be in the range 5-15
* @t: maximum error correction capability, in bits
* @prim_poly: user-provided primitive polynomial (or 0 to use default)
+ * @swap_bits: swap bits within data and syndrome bytes
*
* Returns:
* a newly allocated BCH control structure if successful, NULL otherwise
*
* This initialization can take some time, as lookup tables are built for fast
* encoding/decoding; make sure not to call this function from a time critical
- * path. Usually, init_bch() should be called on module/driver init and
- * free_bch() should be called to release memory on exit.
+ * path. Usually, bch_init() should be called on module/driver init and
+ * bch_free() should be called to release memory on exit.
*
* You may provide your own primitive polynomial of degree @m in argument
- * @prim_poly, or let init_bch() use its default polynomial.
+ * @prim_poly, or let bch_init() use its default polynomial.
*
- * Once init_bch() has successfully returned a pointer to a newly allocated
+ * Once bch_init() has successfully returned a pointer to a newly allocated
* BCH control structure, ecc length in bytes is given by member @ecc_bytes of
* the structure.
*/
-struct bch_control *init_bch(int m, int t, unsigned int prim_poly)
+struct bch_control *bch_init(int m, int t, unsigned int prim_poly,
+ bool swap_bits)
{
int err = 0;
unsigned int i, words;
@@ -1321,6 +1348,7 @@ struct bch_control *init_bch(int m, int t, unsigned int prim_poly)
bch->syn = bch_alloc(2*t*sizeof(*bch->syn), &err);
bch->cache = bch_alloc(2*t*sizeof(*bch->cache), &err);
bch->elp = bch_alloc((t+1)*sizeof(struct gf_poly_deg1), &err);
+ bch->swap_bits = swap_bits;
for (i = 0; i < ARRAY_SIZE(bch->poly_2t); i++)
bch->poly_2t[i] = bch_alloc(GF_POLY_SZ(2*t), &err);
@@ -1347,16 +1375,16 @@ struct bch_control *init_bch(int m, int t, unsigned int prim_poly)
return bch;
fail:
- free_bch(bch);
+ bch_free(bch);
return NULL;
}
-EXPORT_SYMBOL_GPL(init_bch);
+EXPORT_SYMBOL_GPL(bch_init);
/**
- * free_bch - free the BCH control structure
+ * bch_free - free the BCH control structure
* @bch: BCH control structure to release
*/
-void free_bch(struct bch_control *bch)
+void bch_free(struct bch_control *bch)
{
unsigned int i;
@@ -1377,7 +1405,7 @@ void free_bch(struct bch_control *bch)
kfree(bch);
}
}
-EXPORT_SYMBOL_GPL(free_bch);
+EXPORT_SYMBOL_GPL(bch_free);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Ivan Djelic <ivan.djelic@parrot.com>");
diff --git a/lib/test_bitfield.c b/lib/bitfield_kunit.c
index 5b8f4108662d..1473d8b4bf0f 100644
--- a/lib/test_bitfield.c
+++ b/lib/bitfield_kunit.c
@@ -5,8 +5,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#include <linux/kernel.h>
-#include <linux/module.h>
+#include <kunit/test.h>
#include <linux/bitfield.h>
#define CHECK_ENC_GET_U(tp, v, field, res) do { \
@@ -14,13 +13,11 @@
u##tp _res; \
\
_res = u##tp##_encode_bits(v, field); \
- if (_res != res) { \
- pr_warn("u" #tp "_encode_bits(" #v ", " #field ") is 0x%llx != " #res "\n",\
- (u64)_res); \
- return -EINVAL; \
- } \
- if (u##tp##_get_bits(_res, field) != v) \
- return -EINVAL; \
+ KUNIT_ASSERT_FALSE_MSG(context, _res != res, \
+ "u" #tp "_encode_bits(" #v ", " #field ") is 0x%llx != " #res "\n", \
+ (u64)_res); \
+ KUNIT_ASSERT_FALSE(context, \
+ u##tp##_get_bits(_res, field) != v); \
} \
} while (0)
@@ -29,14 +26,13 @@
__le##tp _res; \
\
_res = le##tp##_encode_bits(v, field); \
- if (_res != cpu_to_le##tp(res)) { \
- pr_warn("le" #tp "_encode_bits(" #v ", " #field ") is 0x%llx != 0x%llx\n",\
- (u64)le##tp##_to_cpu(_res), \
- (u64)(res)); \
- return -EINVAL; \
- } \
- if (le##tp##_get_bits(_res, field) != v) \
- return -EINVAL; \
+ KUNIT_ASSERT_FALSE_MSG(context, \
+ _res != cpu_to_le##tp(res), \
+ "le" #tp "_encode_bits(" #v ", " #field ") is 0x%llx != 0x%llx",\
+ (u64)le##tp##_to_cpu(_res), \
+ (u64)(res)); \
+ KUNIT_ASSERT_FALSE(context, \
+ le##tp##_get_bits(_res, field) != v);\
} \
} while (0)
@@ -45,14 +41,13 @@
__be##tp _res; \
\
_res = be##tp##_encode_bits(v, field); \
- if (_res != cpu_to_be##tp(res)) { \
- pr_warn("be" #tp "_encode_bits(" #v ", " #field ") is 0x%llx != 0x%llx\n",\
- (u64)be##tp##_to_cpu(_res), \
- (u64)(res)); \
- return -EINVAL; \
- } \
- if (be##tp##_get_bits(_res, field) != v) \
- return -EINVAL; \
+ KUNIT_ASSERT_FALSE_MSG(context, \
+ _res != cpu_to_be##tp(res), \
+ "be" #tp "_encode_bits(" #v ", " #field ") is 0x%llx != 0x%llx", \
+ (u64)be##tp##_to_cpu(_res), \
+ (u64)(res)); \
+ KUNIT_ASSERT_FALSE(context, \
+ be##tp##_get_bits(_res, field) != v);\
} \
} while (0)
@@ -62,7 +57,7 @@
CHECK_ENC_GET_BE(tp, v, field, res); \
} while (0)
-static int test_constants(void)
+static void __init test_bitfields_constants(struct kunit *context)
{
/*
* NOTE
@@ -95,19 +90,17 @@ static int test_constants(void)
CHECK_ENC_GET(64, 7, 0x00f0000000000000ull, 0x0070000000000000ull);
CHECK_ENC_GET(64, 14, 0x0f00000000000000ull, 0x0e00000000000000ull);
CHECK_ENC_GET(64, 15, 0xf000000000000000ull, 0xf000000000000000ull);
-
- return 0;
}
#define CHECK(tp, mask) do { \
u64 v; \
\
for (v = 0; v < 1 << hweight32(mask); v++) \
- if (tp##_encode_bits(v, mask) != v << __ffs64(mask)) \
- return -EINVAL; \
+ KUNIT_ASSERT_FALSE(context, \
+ tp##_encode_bits(v, mask) != v << __ffs64(mask));\
} while (0)
-static int test_variables(void)
+static void __init test_bitfields_variables(struct kunit *context)
{
CHECK(u8, 0x0f);
CHECK(u8, 0xf0);
@@ -130,39 +123,32 @@ static int test_variables(void)
CHECK(u64, 0x000000007f000000ull);
CHECK(u64, 0x0000000018000000ull);
CHECK(u64, 0x0000001f8000000ull);
-
- return 0;
}
-static int __init test_bitfields(void)
-{
- int ret = test_constants();
-
- if (ret) {
- pr_warn("constant tests failed!\n");
- return ret;
- }
-
- ret = test_variables();
- if (ret) {
- pr_warn("variable tests failed!\n");
- return ret;
- }
-
#ifdef TEST_BITFIELD_COMPILE
+static void __init test_bitfields_compile(struct kunit *context)
+{
/* these should fail compilation */
CHECK_ENC_GET(16, 16, 0x0f00, 0x1000);
u32_encode_bits(7, 0x06000000);
/* this should at least give a warning */
u16_encode_bits(0, 0x60000);
+}
#endif
- pr_info("tests passed\n");
+static struct kunit_case __refdata bitfields_test_cases[] = {
+ KUNIT_CASE(test_bitfields_constants),
+ KUNIT_CASE(test_bitfields_variables),
+ {}
+};
- return 0;
-}
-module_init(test_bitfields)
+static struct kunit_suite bitfields_test_suite = {
+ .name = "bitfields",
+ .test_cases = bitfields_test_cases,
+};
+
+kunit_test_suites(&bitfields_test_suite);
MODULE_AUTHOR("Johannes Berg <johannes@sipsolutions.net>");
MODULE_LICENSE("GPL");
diff --git a/lib/bitmap-str.c b/lib/bitmap-str.c
new file mode 100644
index 000000000000..be745209507a
--- /dev/null
+++ b/lib/bitmap-str.c
@@ -0,0 +1,510 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/bitmap.h>
+#include <linux/ctype.h>
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/export.h>
+#include <linux/hex.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+
+#include "kstrtox.h"
+
+/**
+ * bitmap_parse_user - convert an ASCII hex string in a user buffer into a bitmap
+ *
+ * @ubuf: pointer to user buffer containing string.
+ * @ulen: buffer size in bytes. If string is smaller than this
+ * then it must be terminated with a \0.
+ * @maskp: pointer to bitmap array that will contain result.
+ * @nmaskbits: size of bitmap, in bits.
+ */
+int bitmap_parse_user(const char __user *ubuf,
+ unsigned int ulen, unsigned long *maskp,
+ int nmaskbits)
+{
+ char *buf;
+ int ret;
+
+ buf = memdup_user_nul(ubuf, ulen);
+ if (IS_ERR(buf))
+ return PTR_ERR(buf);
+
+ ret = bitmap_parse(buf, UINT_MAX, maskp, nmaskbits);
+
+ kfree(buf);
+ return ret;
+}
+EXPORT_SYMBOL(bitmap_parse_user);
+
+/**
+ * bitmap_print_to_pagebuf - convert bitmap to list or hex format ASCII string
+ * @list: indicates whether the bitmap must be list
+ * @buf: page aligned buffer into which string is placed
+ * @maskp: pointer to bitmap to convert
+ * @nmaskbits: size of bitmap, in bits
+ *
+ * Output format is a comma-separated list of decimal numbers and
+ * ranges if list is specified or hex digits grouped into comma-separated
+ * sets of 8 digits/set. Returns the number of characters written to buf.
+ *
+ * It is assumed that @buf is a pointer into a PAGE_SIZE, page-aligned
+ * area and that sufficient storage remains at @buf to accommodate the
+ * bitmap_print_to_pagebuf() output. Returns the number of characters
+ * actually printed to @buf, excluding terminating '\0'.
+ */
+int bitmap_print_to_pagebuf(bool list, char *buf, const unsigned long *maskp,
+ int nmaskbits)
+{
+ ptrdiff_t len = PAGE_SIZE - offset_in_page(buf);
+
+ return list ? scnprintf(buf, len, "%*pbl\n", nmaskbits, maskp) :
+ scnprintf(buf, len, "%*pb\n", nmaskbits, maskp);
+}
+EXPORT_SYMBOL(bitmap_print_to_pagebuf);
+
+/**
+ * bitmap_print_to_buf - convert bitmap to list or hex format ASCII string
+ * @list: indicates whether the bitmap must be list
+ * true: print in decimal list format
+ * false: print in hexadecimal bitmask format
+ * @buf: buffer into which string is placed
+ * @maskp: pointer to bitmap to convert
+ * @nmaskbits: size of bitmap, in bits
+ * @off: in the string from which we are copying, We copy to @buf
+ * @count: the maximum number of bytes to print
+ */
+static int bitmap_print_to_buf(bool list, char *buf, const unsigned long *maskp,
+ int nmaskbits, loff_t off, size_t count)
+{
+ const char *fmt = list ? "%*pbl\n" : "%*pb\n";
+ ssize_t size;
+ void *data;
+
+ data = kasprintf(GFP_KERNEL, fmt, nmaskbits, maskp);
+ if (!data)
+ return -ENOMEM;
+
+ size = memory_read_from_buffer(buf, count, &off, data, strlen(data) + 1);
+ kfree(data);
+
+ return size;
+}
+
+/**
+ * bitmap_print_bitmask_to_buf - convert bitmap to hex bitmask format ASCII string
+ * @buf: buffer into which string is placed
+ * @maskp: pointer to bitmap to convert
+ * @nmaskbits: size of bitmap, in bits
+ * @off: in the string from which we are copying, We copy to @buf
+ * @count: the maximum number of bytes to print
+ *
+ * The bitmap_print_to_pagebuf() is used indirectly via its cpumap wrapper
+ * cpumap_print_to_pagebuf() or directly by drivers to export hexadecimal
+ * bitmask and decimal list to userspace by sysfs ABI.
+ * Drivers might be using a normal attribute for this kind of ABIs. A
+ * normal attribute typically has show entry as below::
+ *
+ * static ssize_t example_attribute_show(struct device *dev,
+ * struct device_attribute *attr, char *buf)
+ * {
+ * ...
+ * return bitmap_print_to_pagebuf(true, buf, &mask, nr_trig_max);
+ * }
+ *
+ * show entry of attribute has no offset and count parameters and this
+ * means the file is limited to one page only.
+ * bitmap_print_to_pagebuf() API works terribly well for this kind of
+ * normal attribute with buf parameter and without offset, count::
+ *
+ * bitmap_print_to_pagebuf(bool list, char *buf, const unsigned long *maskp,
+ * int nmaskbits)
+ * {
+ * }
+ *
+ * The problem is once we have a large bitmap, we have a chance to get a
+ * bitmask or list more than one page. Especially for list, it could be
+ * as complex as 0,3,5,7,9,... We have no simple way to know it exact size.
+ * It turns out bin_attribute is a way to break this limit. bin_attribute
+ * has show entry as below::
+ *
+ * static ssize_t
+ * example_bin_attribute_show(struct file *filp, struct kobject *kobj,
+ * struct bin_attribute *attr, char *buf,
+ * loff_t offset, size_t count)
+ * {
+ * ...
+ * }
+ *
+ * With the new offset and count parameters, this makes sysfs ABI be able
+ * to support file size more than one page. For example, offset could be
+ * >= 4096.
+ * bitmap_print_bitmask_to_buf(), bitmap_print_list_to_buf() wit their
+ * cpumap wrapper cpumap_print_bitmask_to_buf(), cpumap_print_list_to_buf()
+ * make those drivers be able to support large bitmask and list after they
+ * move to use bin_attribute. In result, we have to pass the corresponding
+ * parameters such as off, count from bin_attribute show entry to this API.
+ *
+ * The role of cpumap_print_bitmask_to_buf() and cpumap_print_list_to_buf()
+ * is similar with cpumap_print_to_pagebuf(), the difference is that
+ * bitmap_print_to_pagebuf() mainly serves sysfs attribute with the assumption
+ * the destination buffer is exactly one page and won't be more than one page.
+ * cpumap_print_bitmask_to_buf() and cpumap_print_list_to_buf(), on the other
+ * hand, mainly serves bin_attribute which doesn't work with exact one page,
+ * and it can break the size limit of converted decimal list and hexadecimal
+ * bitmask.
+ *
+ * WARNING!
+ *
+ * This function is not a replacement for sprintf() or bitmap_print_to_pagebuf().
+ * It is intended to workaround sysfs limitations discussed above and should be
+ * used carefully in general case for the following reasons:
+ *
+ * - Time complexity is O(nbits^2/count), comparing to O(nbits) for snprintf().
+ * - Memory complexity is O(nbits), comparing to O(1) for snprintf().
+ * - @off and @count are NOT offset and number of bits to print.
+ * - If printing part of bitmap as list, the resulting string is not a correct
+ * list representation of bitmap. Particularly, some bits within or out of
+ * related interval may be erroneously set or unset. The format of the string
+ * may be broken, so bitmap_parselist-like parser may fail parsing it.
+ * - If printing the whole bitmap as list by parts, user must ensure the order
+ * of calls of the function such that the offset is incremented linearly.
+ * - If printing the whole bitmap as list by parts, user must keep bitmap
+ * unchanged between the very first and very last call. Otherwise concatenated
+ * result may be incorrect, and format may be broken.
+ *
+ * Returns the number of characters actually printed to @buf
+ */
+int bitmap_print_bitmask_to_buf(char *buf, const unsigned long *maskp,
+ int nmaskbits, loff_t off, size_t count)
+{
+ return bitmap_print_to_buf(false, buf, maskp, nmaskbits, off, count);
+}
+EXPORT_SYMBOL(bitmap_print_bitmask_to_buf);
+
+/**
+ * bitmap_print_list_to_buf - convert bitmap to decimal list format ASCII string
+ * @buf: buffer into which string is placed
+ * @maskp: pointer to bitmap to convert
+ * @nmaskbits: size of bitmap, in bits
+ * @off: in the string from which we are copying, We copy to @buf
+ * @count: the maximum number of bytes to print
+ *
+ * Everything is same with the above bitmap_print_bitmask_to_buf() except
+ * the print format.
+ */
+int bitmap_print_list_to_buf(char *buf, const unsigned long *maskp,
+ int nmaskbits, loff_t off, size_t count)
+{
+ return bitmap_print_to_buf(true, buf, maskp, nmaskbits, off, count);
+}
+EXPORT_SYMBOL(bitmap_print_list_to_buf);
+
+/*
+ * Region 9-38:4/10 describes the following bitmap structure:
+ * 0 9 12 18 38 N
+ * .........****......****......****..................
+ * ^ ^ ^ ^ ^
+ * start off group_len end nbits
+ */
+struct region {
+ unsigned int start;
+ unsigned int off;
+ unsigned int group_len;
+ unsigned int end;
+ unsigned int nbits;
+};
+
+static void bitmap_set_region(const struct region *r, unsigned long *bitmap)
+{
+ unsigned int start;
+
+ for (start = r->start; start <= r->end; start += r->group_len)
+ bitmap_set(bitmap, start, min(r->end - start + 1, r->off));
+}
+
+static int bitmap_check_region(const struct region *r)
+{
+ if (r->start > r->end || r->group_len == 0 || r->off > r->group_len)
+ return -EINVAL;
+
+ if (r->end >= r->nbits)
+ return -ERANGE;
+
+ return 0;
+}
+
+static const char *bitmap_getnum(const char *str, unsigned int *num,
+ unsigned int lastbit)
+{
+ unsigned long long n;
+ unsigned int len;
+
+ if (str[0] == 'N') {
+ *num = lastbit;
+ return str + 1;
+ }
+
+ len = _parse_integer(str, 10, &n);
+ if (!len)
+ return ERR_PTR(-EINVAL);
+ if (len & KSTRTOX_OVERFLOW || n != (unsigned int)n)
+ return ERR_PTR(-EOVERFLOW);
+
+ *num = n;
+ return str + len;
+}
+
+static inline bool end_of_str(char c)
+{
+ return c == '\0' || c == '\n';
+}
+
+static inline bool __end_of_region(char c)
+{
+ return isspace(c) || c == ',';
+}
+
+static inline bool end_of_region(char c)
+{
+ return __end_of_region(c) || end_of_str(c);
+}
+
+/*
+ * The format allows commas and whitespaces at the beginning
+ * of the region.
+ */
+static const char *bitmap_find_region(const char *str)
+{
+ while (__end_of_region(*str))
+ str++;
+
+ return end_of_str(*str) ? NULL : str;
+}
+
+static const char *bitmap_find_region_reverse(const char *start, const char *end)
+{
+ while (start <= end && __end_of_region(*end))
+ end--;
+
+ return end;
+}
+
+static const char *bitmap_parse_region(const char *str, struct region *r)
+{
+ unsigned int lastbit = r->nbits - 1;
+
+ if (!strncasecmp(str, "all", 3)) {
+ r->start = 0;
+ r->end = lastbit;
+ str += 3;
+
+ goto check_pattern;
+ }
+
+ str = bitmap_getnum(str, &r->start, lastbit);
+ if (IS_ERR(str))
+ return str;
+
+ if (end_of_region(*str))
+ goto no_end;
+
+ if (*str != '-')
+ return ERR_PTR(-EINVAL);
+
+ str = bitmap_getnum(str + 1, &r->end, lastbit);
+ if (IS_ERR(str))
+ return str;
+
+check_pattern:
+ if (end_of_region(*str))
+ goto no_pattern;
+
+ if (*str != ':')
+ return ERR_PTR(-EINVAL);
+
+ str = bitmap_getnum(str + 1, &r->off, lastbit);
+ if (IS_ERR(str))
+ return str;
+
+ if (*str != '/')
+ return ERR_PTR(-EINVAL);
+
+ return bitmap_getnum(str + 1, &r->group_len, lastbit);
+
+no_end:
+ r->end = r->start;
+no_pattern:
+ r->off = r->end + 1;
+ r->group_len = r->end + 1;
+
+ return end_of_str(*str) ? NULL : str;
+}
+
+/**
+ * bitmap_parselist - convert list format ASCII string to bitmap
+ * @buf: read user string from this buffer; must be terminated
+ * with a \0 or \n.
+ * @maskp: write resulting mask here
+ * @nmaskbits: number of bits in mask to be written
+ *
+ * Input format is a comma-separated list of decimal numbers and
+ * ranges. Consecutively set bits are shown as two hyphen-separated
+ * decimal numbers, the smallest and largest bit numbers set in
+ * the range.
+ * Optionally each range can be postfixed to denote that only parts of it
+ * should be set. The range will divided to groups of specific size.
+ * From each group will be used only defined amount of bits.
+ * Syntax: range:used_size/group_size
+ * Example: 0-1023:2/256 ==> 0,1,256,257,512,513,768,769
+ * The value 'N' can be used as a dynamically substituted token for the
+ * maximum allowed value; i.e (nmaskbits - 1). Keep in mind that it is
+ * dynamic, so if system changes cause the bitmap width to change, such
+ * as more cores in a CPU list, then any ranges using N will also change.
+ *
+ * Returns: 0 on success, -errno on invalid input strings. Error values:
+ *
+ * - ``-EINVAL``: wrong region format
+ * - ``-EINVAL``: invalid character in string
+ * - ``-ERANGE``: bit number specified too large for mask
+ * - ``-EOVERFLOW``: integer overflow in the input parameters
+ */
+int bitmap_parselist(const char *buf, unsigned long *maskp, int nmaskbits)
+{
+ struct region r;
+ long ret;
+
+ r.nbits = nmaskbits;
+ bitmap_zero(maskp, r.nbits);
+
+ while (buf) {
+ buf = bitmap_find_region(buf);
+ if (buf == NULL)
+ return 0;
+
+ buf = bitmap_parse_region(buf, &r);
+ if (IS_ERR(buf))
+ return PTR_ERR(buf);
+
+ ret = bitmap_check_region(&r);
+ if (ret)
+ return ret;
+
+ bitmap_set_region(&r, maskp);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(bitmap_parselist);
+
+
+/**
+ * bitmap_parselist_user() - convert user buffer's list format ASCII
+ * string to bitmap
+ *
+ * @ubuf: pointer to user buffer containing string.
+ * @ulen: buffer size in bytes. If string is smaller than this
+ * then it must be terminated with a \0.
+ * @maskp: pointer to bitmap array that will contain result.
+ * @nmaskbits: size of bitmap, in bits.
+ *
+ * Wrapper for bitmap_parselist(), providing it with user buffer.
+ */
+int bitmap_parselist_user(const char __user *ubuf,
+ unsigned int ulen, unsigned long *maskp,
+ int nmaskbits)
+{
+ char *buf;
+ int ret;
+
+ buf = memdup_user_nul(ubuf, ulen);
+ if (IS_ERR(buf))
+ return PTR_ERR(buf);
+
+ ret = bitmap_parselist(buf, maskp, nmaskbits);
+
+ kfree(buf);
+ return ret;
+}
+EXPORT_SYMBOL(bitmap_parselist_user);
+
+static const char *bitmap_get_x32_reverse(const char *start,
+ const char *end, u32 *num)
+{
+ u32 ret = 0;
+ int c, i;
+
+ for (i = 0; i < 32; i += 4) {
+ c = hex_to_bin(*end--);
+ if (c < 0)
+ return ERR_PTR(-EINVAL);
+
+ ret |= c << i;
+
+ if (start > end || __end_of_region(*end))
+ goto out;
+ }
+
+ if (hex_to_bin(*end--) >= 0)
+ return ERR_PTR(-EOVERFLOW);
+out:
+ *num = ret;
+ return end;
+}
+
+/**
+ * bitmap_parse - convert an ASCII hex string into a bitmap.
+ * @start: pointer to buffer containing string.
+ * @buflen: buffer size in bytes. If string is smaller than this
+ * then it must be terminated with a \0 or \n. In that case,
+ * UINT_MAX may be provided instead of string length.
+ * @maskp: pointer to bitmap array that will contain result.
+ * @nmaskbits: size of bitmap, in bits.
+ *
+ * Commas group hex digits into chunks. Each chunk defines exactly 32
+ * bits of the resultant bitmask. No chunk may specify a value larger
+ * than 32 bits (%-EOVERFLOW), and if a chunk specifies a smaller value
+ * then leading 0-bits are prepended. %-EINVAL is returned for illegal
+ * characters. Grouping such as "1,,5", ",44", "," or "" is allowed.
+ * Leading, embedded and trailing whitespace accepted.
+ */
+int bitmap_parse(const char *start, unsigned int buflen,
+ unsigned long *maskp, int nmaskbits)
+{
+ const char *end = strnchrnul(start, buflen, '\n') - 1;
+ int chunks = BITS_TO_U32(nmaskbits);
+ u32 *bitmap = (u32 *)maskp;
+ int unset_bit;
+ int chunk;
+
+ for (chunk = 0; ; chunk++) {
+ end = bitmap_find_region_reverse(start, end);
+ if (start > end)
+ break;
+
+ if (!chunks--)
+ return -EOVERFLOW;
+
+#if defined(CONFIG_64BIT) && defined(__BIG_ENDIAN)
+ end = bitmap_get_x32_reverse(start, end, &bitmap[chunk ^ 1]);
+#else
+ end = bitmap_get_x32_reverse(start, end, &bitmap[chunk]);
+#endif
+ if (IS_ERR(end))
+ return PTR_ERR(end);
+ }
+
+ unset_bit = (BITS_TO_U32(nmaskbits) - chunks) * 32;
+ if (unset_bit < nmaskbits) {
+ bitmap_clear(maskp, unset_bit, nmaskbits - unset_bit);
+ return 0;
+ }
+
+ if (find_next_bit(maskp, unset_bit, nmaskbits) != unset_bit)
+ return -EOVERFLOW;
+
+ return 0;
+}
+EXPORT_SYMBOL(bitmap_parse);
diff --git a/lib/bitmap.c b/lib/bitmap.c
index bbe2589e8497..09522af227f1 100644
--- a/lib/bitmap.c
+++ b/lib/bitmap.c
@@ -3,27 +3,18 @@
* lib/bitmap.c
* Helper functions for bitmap.h.
*/
-#include <linux/export.h>
-#include <linux/thread_info.h>
-#include <linux/ctype.h>
-#include <linux/errno.h>
+
#include <linux/bitmap.h>
#include <linux/bitops.h>
-#include <linux/bug.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
+#include <linux/ctype.h>
+#include <linux/device.h>
+#include <linux/export.h>
#include <linux/slab.h>
-#include <linux/string.h>
-#include <linux/uaccess.h>
-
-#include <asm/page.h>
-
-#include "kstrtox.h"
/**
* DOC: bitmap introduction
*
- * bitmaps provide an array of bits, implemented using an an
+ * bitmaps provide an array of bits, implemented using an
* array of unsigned longs. The number of valid bits in a
* given bitmap does _not_ need to be an exact multiple of
* BITS_PER_LONG.
@@ -43,22 +34,42 @@
* for the best explanations of this ordering.
*/
-int __bitmap_equal(const unsigned long *bitmap1,
- const unsigned long *bitmap2, unsigned int bits)
+bool __bitmap_equal(const unsigned long *bitmap1,
+ const unsigned long *bitmap2, unsigned int bits)
{
unsigned int k, lim = bits/BITS_PER_LONG;
for (k = 0; k < lim; ++k)
if (bitmap1[k] != bitmap2[k])
- return 0;
+ return false;
if (bits % BITS_PER_LONG)
if ((bitmap1[k] ^ bitmap2[k]) & BITMAP_LAST_WORD_MASK(bits))
- return 0;
+ return false;
- return 1;
+ return true;
}
EXPORT_SYMBOL(__bitmap_equal);
+bool __bitmap_or_equal(const unsigned long *bitmap1,
+ const unsigned long *bitmap2,
+ const unsigned long *bitmap3,
+ unsigned int bits)
+{
+ unsigned int k, lim = bits / BITS_PER_LONG;
+ unsigned long tmp;
+
+ for (k = 0; k < lim; ++k) {
+ if ((bitmap1[k] | bitmap2[k]) != bitmap3[k])
+ return false;
+ }
+
+ if (!(bits % BITS_PER_LONG))
+ return true;
+
+ tmp = (bitmap1[k] | bitmap2[k]) ^ bitmap3[k];
+ return (tmp & BITMAP_LAST_WORD_MASK(bits)) == 0;
+}
+
void __bitmap_complement(unsigned long *dst, const unsigned long *src, unsigned int bits)
{
unsigned int k, lim = BITS_TO_LONGS(bits);
@@ -148,7 +159,74 @@ void __bitmap_shift_left(unsigned long *dst, const unsigned long *src,
}
EXPORT_SYMBOL(__bitmap_shift_left);
-int __bitmap_and(unsigned long *dst, const unsigned long *bitmap1,
+/**
+ * bitmap_cut() - remove bit region from bitmap and right shift remaining bits
+ * @dst: destination bitmap, might overlap with src
+ * @src: source bitmap
+ * @first: start bit of region to be removed
+ * @cut: number of bits to remove
+ * @nbits: bitmap size, in bits
+ *
+ * Set the n-th bit of @dst iff the n-th bit of @src is set and
+ * n is less than @first, or the m-th bit of @src is set for any
+ * m such that @first <= n < nbits, and m = n + @cut.
+ *
+ * In pictures, example for a big-endian 32-bit architecture:
+ *
+ * The @src bitmap is::
+ *
+ * 31 63
+ * | |
+ * 10000000 11000001 11110010 00010101 10000000 11000001 01110010 00010101
+ * | | | |
+ * 16 14 0 32
+ *
+ * if @cut is 3, and @first is 14, bits 14-16 in @src are cut and @dst is::
+ *
+ * 31 63
+ * | |
+ * 10110000 00011000 00110010 00010101 00010000 00011000 00101110 01000010
+ * | | |
+ * 14 (bit 17 0 32
+ * from @src)
+ *
+ * Note that @dst and @src might overlap partially or entirely.
+ *
+ * This is implemented in the obvious way, with a shift and carry
+ * step for each moved bit. Optimisation is left as an exercise
+ * for the compiler.
+ */
+void bitmap_cut(unsigned long *dst, const unsigned long *src,
+ unsigned int first, unsigned int cut, unsigned int nbits)
+{
+ unsigned int len = BITS_TO_LONGS(nbits);
+ unsigned long keep = 0, carry;
+ int i;
+
+ if (first % BITS_PER_LONG) {
+ keep = src[first / BITS_PER_LONG] &
+ (~0UL >> (BITS_PER_LONG - first % BITS_PER_LONG));
+ }
+
+ memmove(dst, src, len * sizeof(*dst));
+
+ while (cut--) {
+ for (i = first / BITS_PER_LONG; i < len; i++) {
+ if (i < len - 1)
+ carry = dst[i + 1] & 1UL;
+ else
+ carry = 0;
+
+ dst[i] = (dst[i] >> 1) | (carry << (BITS_PER_LONG - 1));
+ }
+ }
+
+ dst[first / BITS_PER_LONG] &= ~0UL << (first % BITS_PER_LONG);
+ dst[first / BITS_PER_LONG] |= keep;
+}
+EXPORT_SYMBOL(bitmap_cut);
+
+bool __bitmap_and(unsigned long *dst, const unsigned long *bitmap1,
const unsigned long *bitmap2, unsigned int bits)
{
unsigned int k;
@@ -186,7 +264,7 @@ void __bitmap_xor(unsigned long *dst, const unsigned long *bitmap1,
}
EXPORT_SYMBOL(__bitmap_xor);
-int __bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1,
+bool __bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1,
const unsigned long *bitmap2, unsigned int bits)
{
unsigned int k;
@@ -202,51 +280,74 @@ int __bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1,
}
EXPORT_SYMBOL(__bitmap_andnot);
-int __bitmap_intersects(const unsigned long *bitmap1,
- const unsigned long *bitmap2, unsigned int bits)
+void __bitmap_replace(unsigned long *dst,
+ const unsigned long *old, const unsigned long *new,
+ const unsigned long *mask, unsigned int nbits)
+{
+ unsigned int k;
+ unsigned int nr = BITS_TO_LONGS(nbits);
+
+ for (k = 0; k < nr; k++)
+ dst[k] = (old[k] & ~mask[k]) | (new[k] & mask[k]);
+}
+EXPORT_SYMBOL(__bitmap_replace);
+
+bool __bitmap_intersects(const unsigned long *bitmap1,
+ const unsigned long *bitmap2, unsigned int bits)
{
unsigned int k, lim = bits/BITS_PER_LONG;
for (k = 0; k < lim; ++k)
if (bitmap1[k] & bitmap2[k])
- return 1;
+ return true;
if (bits % BITS_PER_LONG)
if ((bitmap1[k] & bitmap2[k]) & BITMAP_LAST_WORD_MASK(bits))
- return 1;
- return 0;
+ return true;
+ return false;
}
EXPORT_SYMBOL(__bitmap_intersects);
-int __bitmap_subset(const unsigned long *bitmap1,
- const unsigned long *bitmap2, unsigned int bits)
+bool __bitmap_subset(const unsigned long *bitmap1,
+ const unsigned long *bitmap2, unsigned int bits)
{
unsigned int k, lim = bits/BITS_PER_LONG;
for (k = 0; k < lim; ++k)
if (bitmap1[k] & ~bitmap2[k])
- return 0;
+ return false;
if (bits % BITS_PER_LONG)
if ((bitmap1[k] & ~bitmap2[k]) & BITMAP_LAST_WORD_MASK(bits))
- return 0;
- return 1;
+ return false;
+ return true;
}
EXPORT_SYMBOL(__bitmap_subset);
-int __bitmap_weight(const unsigned long *bitmap, unsigned int bits)
-{
- unsigned int k, lim = bits/BITS_PER_LONG;
- int w = 0;
-
- for (k = 0; k < lim; k++)
- w += hweight_long(bitmap[k]);
-
- if (bits % BITS_PER_LONG)
- w += hweight_long(bitmap[k] & BITMAP_LAST_WORD_MASK(bits));
+#define BITMAP_WEIGHT(FETCH, bits) \
+({ \
+ unsigned int __bits = (bits), idx, w = 0; \
+ \
+ for (idx = 0; idx < __bits / BITS_PER_LONG; idx++) \
+ w += hweight_long(FETCH); \
+ \
+ if (__bits % BITS_PER_LONG) \
+ w += hweight_long((FETCH) & BITMAP_LAST_WORD_MASK(__bits)); \
+ \
+ w; \
+})
- return w;
+unsigned int __bitmap_weight(const unsigned long *bitmap, unsigned int bits)
+{
+ return BITMAP_WEIGHT(bitmap[idx], bits);
}
EXPORT_SYMBOL(__bitmap_weight);
+unsigned int __bitmap_weight_and(const unsigned long *bitmap1,
+ const unsigned long *bitmap2, unsigned int bits)
+{
+ return BITMAP_WEIGHT(bitmap1[idx] & bitmap2[idx], bits);
+}
+EXPORT_SYMBOL(__bitmap_weight_and);
+
void __bitmap_set(unsigned long *map, unsigned int start, int len)
{
unsigned long *p = map + BIT_WORD(start);
@@ -328,358 +429,6 @@ again:
}
EXPORT_SYMBOL(bitmap_find_next_zero_area_off);
-/*
- * Bitmap printing & parsing functions: first version by Nadia Yvette Chambers,
- * second version by Paul Jackson, third by Joe Korty.
- */
-
-#define CHUNKSZ 32
-#define nbits_to_hold_value(val) fls(val)
-#define BASEDEC 10 /* fancier cpuset lists input in decimal */
-
-/**
- * __bitmap_parse - convert an ASCII hex string into a bitmap.
- * @buf: pointer to buffer containing string.
- * @buflen: buffer size in bytes. If string is smaller than this
- * then it must be terminated with a \0.
- * @is_user: location of buffer, 0 indicates kernel space
- * @maskp: pointer to bitmap array that will contain result.
- * @nmaskbits: size of bitmap, in bits.
- *
- * Commas group hex digits into chunks. Each chunk defines exactly 32
- * bits of the resultant bitmask. No chunk may specify a value larger
- * than 32 bits (%-EOVERFLOW), and if a chunk specifies a smaller value
- * then leading 0-bits are prepended. %-EINVAL is returned for illegal
- * characters and for grouping errors such as "1,,5", ",44", "," and "".
- * Leading and trailing whitespace accepted, but not embedded whitespace.
- */
-int __bitmap_parse(const char *buf, unsigned int buflen,
- int is_user, unsigned long *maskp,
- int nmaskbits)
-{
- int c, old_c, totaldigits, ndigits, nchunks, nbits;
- u32 chunk;
- const char __user __force *ubuf = (const char __user __force *)buf;
-
- bitmap_zero(maskp, nmaskbits);
-
- nchunks = nbits = totaldigits = c = 0;
- do {
- chunk = 0;
- ndigits = totaldigits;
-
- /* Get the next chunk of the bitmap */
- while (buflen) {
- old_c = c;
- if (is_user) {
- if (__get_user(c, ubuf++))
- return -EFAULT;
- }
- else
- c = *buf++;
- buflen--;
- if (isspace(c))
- continue;
-
- /*
- * If the last character was a space and the current
- * character isn't '\0', we've got embedded whitespace.
- * This is a no-no, so throw an error.
- */
- if (totaldigits && c && isspace(old_c))
- return -EINVAL;
-
- /* A '\0' or a ',' signal the end of the chunk */
- if (c == '\0' || c == ',')
- break;
-
- if (!isxdigit(c))
- return -EINVAL;
-
- /*
- * Make sure there are at least 4 free bits in 'chunk'.
- * If not, this hexdigit will overflow 'chunk', so
- * throw an error.
- */
- if (chunk & ~((1UL << (CHUNKSZ - 4)) - 1))
- return -EOVERFLOW;
-
- chunk = (chunk << 4) | hex_to_bin(c);
- totaldigits++;
- }
- if (ndigits == totaldigits)
- return -EINVAL;
- if (nchunks == 0 && chunk == 0)
- continue;
-
- __bitmap_shift_left(maskp, maskp, CHUNKSZ, nmaskbits);
- *maskp |= chunk;
- nchunks++;
- nbits += (nchunks == 1) ? nbits_to_hold_value(chunk) : CHUNKSZ;
- if (nbits > nmaskbits)
- return -EOVERFLOW;
- } while (buflen && c == ',');
-
- return 0;
-}
-EXPORT_SYMBOL(__bitmap_parse);
-
-/**
- * bitmap_parse_user - convert an ASCII hex string in a user buffer into a bitmap
- *
- * @ubuf: pointer to user buffer containing string.
- * @ulen: buffer size in bytes. If string is smaller than this
- * then it must be terminated with a \0.
- * @maskp: pointer to bitmap array that will contain result.
- * @nmaskbits: size of bitmap, in bits.
- *
- * Wrapper for __bitmap_parse(), providing it with user buffer.
- *
- * We cannot have this as an inline function in bitmap.h because it needs
- * linux/uaccess.h to get the access_ok() declaration and this causes
- * cyclic dependencies.
- */
-int bitmap_parse_user(const char __user *ubuf,
- unsigned int ulen, unsigned long *maskp,
- int nmaskbits)
-{
- if (!access_ok(ubuf, ulen))
- return -EFAULT;
- return __bitmap_parse((const char __force *)ubuf,
- ulen, 1, maskp, nmaskbits);
-
-}
-EXPORT_SYMBOL(bitmap_parse_user);
-
-/**
- * bitmap_print_to_pagebuf - convert bitmap to list or hex format ASCII string
- * @list: indicates whether the bitmap must be list
- * @buf: page aligned buffer into which string is placed
- * @maskp: pointer to bitmap to convert
- * @nmaskbits: size of bitmap, in bits
- *
- * Output format is a comma-separated list of decimal numbers and
- * ranges if list is specified or hex digits grouped into comma-separated
- * sets of 8 digits/set. Returns the number of characters written to buf.
- *
- * It is assumed that @buf is a pointer into a PAGE_SIZE, page-aligned
- * area and that sufficient storage remains at @buf to accommodate the
- * bitmap_print_to_pagebuf() output. Returns the number of characters
- * actually printed to @buf, excluding terminating '\0'.
- */
-int bitmap_print_to_pagebuf(bool list, char *buf, const unsigned long *maskp,
- int nmaskbits)
-{
- ptrdiff_t len = PAGE_SIZE - offset_in_page(buf);
-
- return list ? scnprintf(buf, len, "%*pbl\n", nmaskbits, maskp) :
- scnprintf(buf, len, "%*pb\n", nmaskbits, maskp);
-}
-EXPORT_SYMBOL(bitmap_print_to_pagebuf);
-
-/*
- * Region 9-38:4/10 describes the following bitmap structure:
- * 0 9 12 18 38
- * .........****......****......****......
- * ^ ^ ^ ^
- * start off group_len end
- */
-struct region {
- unsigned int start;
- unsigned int off;
- unsigned int group_len;
- unsigned int end;
-};
-
-static int bitmap_set_region(const struct region *r,
- unsigned long *bitmap, int nbits)
-{
- unsigned int start;
-
- if (r->end >= nbits)
- return -ERANGE;
-
- for (start = r->start; start <= r->end; start += r->group_len)
- bitmap_set(bitmap, start, min(r->end - start + 1, r->off));
-
- return 0;
-}
-
-static int bitmap_check_region(const struct region *r)
-{
- if (r->start > r->end || r->group_len == 0 || r->off > r->group_len)
- return -EINVAL;
-
- return 0;
-}
-
-static const char *bitmap_getnum(const char *str, unsigned int *num)
-{
- unsigned long long n;
- unsigned int len;
-
- len = _parse_integer(str, 10, &n);
- if (!len)
- return ERR_PTR(-EINVAL);
- if (len & KSTRTOX_OVERFLOW || n != (unsigned int)n)
- return ERR_PTR(-EOVERFLOW);
-
- *num = n;
- return str + len;
-}
-
-static inline bool end_of_str(char c)
-{
- return c == '\0' || c == '\n';
-}
-
-static inline bool __end_of_region(char c)
-{
- return isspace(c) || c == ',';
-}
-
-static inline bool end_of_region(char c)
-{
- return __end_of_region(c) || end_of_str(c);
-}
-
-/*
- * The format allows commas and whitespases at the beginning
- * of the region.
- */
-static const char *bitmap_find_region(const char *str)
-{
- while (__end_of_region(*str))
- str++;
-
- return end_of_str(*str) ? NULL : str;
-}
-
-static const char *bitmap_parse_region(const char *str, struct region *r)
-{
- str = bitmap_getnum(str, &r->start);
- if (IS_ERR(str))
- return str;
-
- if (end_of_region(*str))
- goto no_end;
-
- if (*str != '-')
- return ERR_PTR(-EINVAL);
-
- str = bitmap_getnum(str + 1, &r->end);
- if (IS_ERR(str))
- return str;
-
- if (end_of_region(*str))
- goto no_pattern;
-
- if (*str != ':')
- return ERR_PTR(-EINVAL);
-
- str = bitmap_getnum(str + 1, &r->off);
- if (IS_ERR(str))
- return str;
-
- if (*str != '/')
- return ERR_PTR(-EINVAL);
-
- return bitmap_getnum(str + 1, &r->group_len);
-
-no_end:
- r->end = r->start;
-no_pattern:
- r->off = r->end + 1;
- r->group_len = r->end + 1;
-
- return end_of_str(*str) ? NULL : str;
-}
-
-/**
- * bitmap_parselist - convert list format ASCII string to bitmap
- * @buf: read user string from this buffer; must be terminated
- * with a \0 or \n.
- * @maskp: write resulting mask here
- * @nmaskbits: number of bits in mask to be written
- *
- * Input format is a comma-separated list of decimal numbers and
- * ranges. Consecutively set bits are shown as two hyphen-separated
- * decimal numbers, the smallest and largest bit numbers set in
- * the range.
- * Optionally each range can be postfixed to denote that only parts of it
- * should be set. The range will divided to groups of specific size.
- * From each group will be used only defined amount of bits.
- * Syntax: range:used_size/group_size
- * Example: 0-1023:2/256 ==> 0,1,256,257,512,513,768,769
- *
- * Returns: 0 on success, -errno on invalid input strings. Error values:
- *
- * - ``-EINVAL``: wrong region format
- * - ``-EINVAL``: invalid character in string
- * - ``-ERANGE``: bit number specified too large for mask
- * - ``-EOVERFLOW``: integer overflow in the input parameters
- */
-int bitmap_parselist(const char *buf, unsigned long *maskp, int nmaskbits)
-{
- struct region r;
- long ret;
-
- bitmap_zero(maskp, nmaskbits);
-
- while (buf) {
- buf = bitmap_find_region(buf);
- if (buf == NULL)
- return 0;
-
- buf = bitmap_parse_region(buf, &r);
- if (IS_ERR(buf))
- return PTR_ERR(buf);
-
- ret = bitmap_check_region(&r);
- if (ret)
- return ret;
-
- ret = bitmap_set_region(&r, maskp, nmaskbits);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-EXPORT_SYMBOL(bitmap_parselist);
-
-
-/**
- * bitmap_parselist_user()
- *
- * @ubuf: pointer to user buffer containing string.
- * @ulen: buffer size in bytes. If string is smaller than this
- * then it must be terminated with a \0.
- * @maskp: pointer to bitmap array that will contain result.
- * @nmaskbits: size of bitmap, in bits.
- *
- * Wrapper for bitmap_parselist(), providing it with user buffer.
- */
-int bitmap_parselist_user(const char __user *ubuf,
- unsigned int ulen, unsigned long *maskp,
- int nmaskbits)
-{
- char *buf;
- int ret;
-
- buf = memdup_user_nul(ubuf, ulen);
- if (IS_ERR(buf))
- return PTR_ERR(buf);
-
- ret = bitmap_parselist(buf, maskp, nmaskbits);
-
- kfree(buf);
- return ret;
-}
-EXPORT_SYMBOL(bitmap_parselist_user);
-
-
-#ifdef CONFIG_NUMA
/**
* bitmap_pos_to_ord - find ordinal of set bit at given position in bitmap
* @buf: pointer to a bitmap
@@ -703,37 +452,7 @@ static int bitmap_pos_to_ord(const unsigned long *buf, unsigned int pos, unsigne
if (pos >= nbits || !test_bit(pos, buf))
return -1;
- return __bitmap_weight(buf, pos);
-}
-
-/**
- * bitmap_ord_to_pos - find position of n-th set bit in bitmap
- * @buf: pointer to bitmap
- * @ord: ordinal bit position (n-th set bit, n >= 0)
- * @nbits: number of valid bit positions in @buf
- *
- * Map the ordinal offset of bit @ord in @buf to its position in @buf.
- * Value of @ord should be in range 0 <= @ord < weight(buf). If @ord
- * >= weight(buf), returns @nbits.
- *
- * If for example, just bits 4 through 7 are set in @buf, then @ord
- * values 0 through 3 will get mapped to 4 through 7, respectively,
- * and all other @ord values returns @nbits. When @ord value 3
- * gets mapped to (returns) @pos value 7 in this example, that means
- * that the 3rd set bit (starting with 0th) is at position 7 in @buf.
- *
- * The bit positions 0 through @nbits-1 are valid positions in @buf.
- */
-unsigned int bitmap_ord_to_pos(const unsigned long *buf, unsigned int ord, unsigned int nbits)
-{
- unsigned int pos;
-
- for (pos = find_first_bit(buf, nbits);
- pos < nbits && ord;
- pos = find_next_bit(buf, nbits, pos + 1))
- ord--;
-
- return pos;
+ return bitmap_weight(buf, pos);
}
/**
@@ -756,7 +475,7 @@ unsigned int bitmap_ord_to_pos(const unsigned long *buf, unsigned int ord, unsig
* to @dst.
*
* The positions of unset bits in @old are mapped to themselves
- * (the identify map).
+ * (the identity map).
*
* Apply the above specified mapping to @src, placing the result in
* @dst, clearing any bits previously set in @dst.
@@ -785,9 +504,10 @@ void bitmap_remap(unsigned long *dst, const unsigned long *src,
if (n < 0 || w == 0)
set_bit(oldbit, dst); /* identity map */
else
- set_bit(bitmap_ord_to_pos(new, n % w, nbits), dst);
+ set_bit(find_nth_bit(new, nbits, n % w), dst);
}
}
+EXPORT_SYMBOL(bitmap_remap);
/**
* bitmap_bitremap - Apply map defined by a pair of bitmaps to a single bit
@@ -804,7 +524,7 @@ void bitmap_remap(unsigned long *dst, const unsigned long *src,
* the position of the m-th set bit in @new, where m == n % w.
*
* The positions of unset bits in @old are mapped to themselves
- * (the identify map).
+ * (the identity map).
*
* Apply the above specified mapping to bit position @oldbit, returning
* the new bit position.
@@ -823,9 +543,11 @@ int bitmap_bitremap(int oldbit, const unsigned long *old,
if (n < 0 || w == 0)
return oldbit;
else
- return bitmap_ord_to_pos(new, n % w, bits);
+ return find_nth_bit(new, bits, n % w);
}
+EXPORT_SYMBOL(bitmap_bitremap);
+#ifdef CONFIG_NUMA
/**
* bitmap_onto - translate one bitmap relative to another
* @dst: resulting translated bitmap
@@ -945,7 +667,7 @@ void bitmap_onto(unsigned long *dst, const unsigned long *orig,
* The following code is a more efficient, but less
* obvious, equivalent to the loop:
* for (m = 0; m < bitmap_weight(relmap, bits); m++) {
- * n = bitmap_ord_to_pos(orig, m, bits);
+ * n = find_nth_bit(orig, bits, m);
* if (test_bit(m, orig))
* set_bit(n, dst);
* }
@@ -985,187 +707,69 @@ void bitmap_fold(unsigned long *dst, const unsigned long *orig,
}
#endif /* CONFIG_NUMA */
-/*
- * Common code for bitmap_*_region() routines.
- * bitmap: array of unsigned longs corresponding to the bitmap
- * pos: the beginning of the region
- * order: region size (log base 2 of number of bits)
- * reg_op: operation(s) to perform on that region of bitmap
- *
- * Can set, verify and/or release a region of bits in a bitmap,
- * depending on which combination of REG_OP_* flag bits is set.
- *
- * A region of a bitmap is a sequence of bits in the bitmap, of
- * some size '1 << order' (a power of two), aligned to that same
- * '1 << order' power of two.
- *
- * Returns 1 if REG_OP_ISFREE succeeds (region is all zero bits).
- * Returns 0 in all other cases and reg_ops.
- */
-
-enum {
- REG_OP_ISFREE, /* true if region is all zero bits */
- REG_OP_ALLOC, /* set all bits in region */
- REG_OP_RELEASE, /* clear all bits in region */
-};
-
-static int __reg_op(unsigned long *bitmap, unsigned int pos, int order, int reg_op)
+unsigned long *bitmap_alloc(unsigned int nbits, gfp_t flags)
{
- int nbits_reg; /* number of bits in region */
- int index; /* index first long of region in bitmap */
- int offset; /* bit offset region in bitmap[index] */
- int nlongs_reg; /* num longs spanned by region in bitmap */
- int nbitsinlong; /* num bits of region in each spanned long */
- unsigned long mask; /* bitmask for one long of region */
- int i; /* scans bitmap by longs */
- int ret = 0; /* return value */
-
- /*
- * Either nlongs_reg == 1 (for small orders that fit in one long)
- * or (offset == 0 && mask == ~0UL) (for larger multiword orders.)
- */
- nbits_reg = 1 << order;
- index = pos / BITS_PER_LONG;
- offset = pos - (index * BITS_PER_LONG);
- nlongs_reg = BITS_TO_LONGS(nbits_reg);
- nbitsinlong = min(nbits_reg, BITS_PER_LONG);
-
- /*
- * Can't do "mask = (1UL << nbitsinlong) - 1", as that
- * overflows if nbitsinlong == BITS_PER_LONG.
- */
- mask = (1UL << (nbitsinlong - 1));
- mask += mask - 1;
- mask <<= offset;
-
- switch (reg_op) {
- case REG_OP_ISFREE:
- for (i = 0; i < nlongs_reg; i++) {
- if (bitmap[index + i] & mask)
- goto done;
- }
- ret = 1; /* all bits in region free (zero) */
- break;
-
- case REG_OP_ALLOC:
- for (i = 0; i < nlongs_reg; i++)
- bitmap[index + i] |= mask;
- break;
-
- case REG_OP_RELEASE:
- for (i = 0; i < nlongs_reg; i++)
- bitmap[index + i] &= ~mask;
- break;
- }
-done:
- return ret;
+ return kmalloc_array(BITS_TO_LONGS(nbits), sizeof(unsigned long),
+ flags);
}
+EXPORT_SYMBOL(bitmap_alloc);
-/**
- * bitmap_find_free_region - find a contiguous aligned mem region
- * @bitmap: array of unsigned longs corresponding to the bitmap
- * @bits: number of bits in the bitmap
- * @order: region size (log base 2 of number of bits) to find
- *
- * Find a region of free (zero) bits in a @bitmap of @bits bits and
- * allocate them (set them to one). Only consider regions of length
- * a power (@order) of two, aligned to that power of two, which
- * makes the search algorithm much faster.
- *
- * Return the bit offset in bitmap of the allocated region,
- * or -errno on failure.
- */
-int bitmap_find_free_region(unsigned long *bitmap, unsigned int bits, int order)
+unsigned long *bitmap_zalloc(unsigned int nbits, gfp_t flags)
{
- unsigned int pos, end; /* scans bitmap by regions of size order */
-
- for (pos = 0 ; (end = pos + (1U << order)) <= bits; pos = end) {
- if (!__reg_op(bitmap, pos, order, REG_OP_ISFREE))
- continue;
- __reg_op(bitmap, pos, order, REG_OP_ALLOC);
- return pos;
- }
- return -ENOMEM;
+ return bitmap_alloc(nbits, flags | __GFP_ZERO);
}
-EXPORT_SYMBOL(bitmap_find_free_region);
+EXPORT_SYMBOL(bitmap_zalloc);
-/**
- * bitmap_release_region - release allocated bitmap region
- * @bitmap: array of unsigned longs corresponding to the bitmap
- * @pos: beginning of bit region to release
- * @order: region size (log base 2 of number of bits) to release
- *
- * This is the complement to __bitmap_find_free_region() and releases
- * the found region (by clearing it in the bitmap).
- *
- * No return value.
- */
-void bitmap_release_region(unsigned long *bitmap, unsigned int pos, int order)
+unsigned long *bitmap_alloc_node(unsigned int nbits, gfp_t flags, int node)
{
- __reg_op(bitmap, pos, order, REG_OP_RELEASE);
+ return kmalloc_array_node(BITS_TO_LONGS(nbits), sizeof(unsigned long),
+ flags, node);
}
-EXPORT_SYMBOL(bitmap_release_region);
+EXPORT_SYMBOL(bitmap_alloc_node);
-/**
- * bitmap_allocate_region - allocate bitmap region
- * @bitmap: array of unsigned longs corresponding to the bitmap
- * @pos: beginning of bit region to allocate
- * @order: region size (log base 2 of number of bits) to allocate
- *
- * Allocate (set bits in) a specified region of a bitmap.
- *
- * Return 0 on success, or %-EBUSY if specified region wasn't
- * free (not all bits were zero).
- */
-int bitmap_allocate_region(unsigned long *bitmap, unsigned int pos, int order)
+unsigned long *bitmap_zalloc_node(unsigned int nbits, gfp_t flags, int node)
{
- if (!__reg_op(bitmap, pos, order, REG_OP_ISFREE))
- return -EBUSY;
- return __reg_op(bitmap, pos, order, REG_OP_ALLOC);
+ return bitmap_alloc_node(nbits, flags | __GFP_ZERO, node);
}
-EXPORT_SYMBOL(bitmap_allocate_region);
+EXPORT_SYMBOL(bitmap_zalloc_node);
-/**
- * bitmap_copy_le - copy a bitmap, putting the bits into little-endian order.
- * @dst: destination buffer
- * @src: bitmap to copy
- * @nbits: number of bits in the bitmap
- *
- * Require nbits % BITS_PER_LONG == 0.
- */
-#ifdef __BIG_ENDIAN
-void bitmap_copy_le(unsigned long *dst, const unsigned long *src, unsigned int nbits)
+void bitmap_free(const unsigned long *bitmap)
{
- unsigned int i;
-
- for (i = 0; i < nbits/BITS_PER_LONG; i++) {
- if (BITS_PER_LONG == 64)
- dst[i] = cpu_to_le64(src[i]);
- else
- dst[i] = cpu_to_le32(src[i]);
- }
+ kfree(bitmap);
}
-EXPORT_SYMBOL(bitmap_copy_le);
-#endif
+EXPORT_SYMBOL(bitmap_free);
-unsigned long *bitmap_alloc(unsigned int nbits, gfp_t flags)
+static void devm_bitmap_free(void *data)
{
- return kmalloc_array(BITS_TO_LONGS(nbits), sizeof(unsigned long),
- flags);
+ unsigned long *bitmap = data;
+
+ bitmap_free(bitmap);
}
-EXPORT_SYMBOL(bitmap_alloc);
-unsigned long *bitmap_zalloc(unsigned int nbits, gfp_t flags)
+unsigned long *devm_bitmap_alloc(struct device *dev,
+ unsigned int nbits, gfp_t flags)
{
- return bitmap_alloc(nbits, flags | __GFP_ZERO);
+ unsigned long *bitmap;
+ int ret;
+
+ bitmap = bitmap_alloc(nbits, flags);
+ if (!bitmap)
+ return NULL;
+
+ ret = devm_add_action_or_reset(dev, devm_bitmap_free, bitmap);
+ if (ret)
+ return NULL;
+
+ return bitmap;
}
-EXPORT_SYMBOL(bitmap_zalloc);
+EXPORT_SYMBOL_GPL(devm_bitmap_alloc);
-void bitmap_free(const unsigned long *bitmap)
+unsigned long *devm_bitmap_zalloc(struct device *dev,
+ unsigned int nbits, gfp_t flags)
{
- kfree(bitmap);
+ return devm_bitmap_alloc(dev, nbits, flags | __GFP_ZERO);
}
-EXPORT_SYMBOL(bitmap_free);
+EXPORT_SYMBOL_GPL(devm_bitmap_zalloc);
#if BITS_PER_LONG == 64
/**
@@ -1213,5 +817,59 @@ void bitmap_to_arr32(u32 *buf, const unsigned long *bitmap, unsigned int nbits)
buf[halfwords - 1] &= (u32) (UINT_MAX >> ((-nbits) & 31));
}
EXPORT_SYMBOL(bitmap_to_arr32);
+#endif
+#if BITS_PER_LONG == 32
+/**
+ * bitmap_from_arr64 - copy the contents of u64 array of bits to bitmap
+ * @bitmap: array of unsigned longs, the destination bitmap
+ * @buf: array of u64 (in host byte order), the source bitmap
+ * @nbits: number of bits in @bitmap
+ */
+void bitmap_from_arr64(unsigned long *bitmap, const u64 *buf, unsigned int nbits)
+{
+ int n;
+
+ for (n = nbits; n > 0; n -= 64) {
+ u64 val = *buf++;
+
+ *bitmap++ = val;
+ if (n > 32)
+ *bitmap++ = val >> 32;
+ }
+
+ /*
+ * Clear tail bits in the last word beyond nbits.
+ *
+ * Negative index is OK because here we point to the word next
+ * to the last word of the bitmap, except for nbits == 0, which
+ * is tested implicitly.
+ */
+ if (nbits % BITS_PER_LONG)
+ bitmap[-1] &= BITMAP_LAST_WORD_MASK(nbits);
+}
+EXPORT_SYMBOL(bitmap_from_arr64);
+
+/**
+ * bitmap_to_arr64 - copy the contents of bitmap to a u64 array of bits
+ * @buf: array of u64 (in host byte order), the dest bitmap
+ * @bitmap: array of unsigned longs, the source bitmap
+ * @nbits: number of bits in @bitmap
+ */
+void bitmap_to_arr64(u64 *buf, const unsigned long *bitmap, unsigned int nbits)
+{
+ const unsigned long *end = bitmap + BITS_TO_LONGS(nbits);
+
+ while (bitmap < end) {
+ *buf = *bitmap++;
+ if (bitmap < end)
+ *buf |= (u64)(*bitmap++) << 32;
+ buf++;
+ }
+
+ /* Clear tail bits in the last element of array beyond nbits. */
+ if (nbits % 64)
+ buf[-1] &= GENMASK_ULL((nbits - 1) % 64, 0);
+}
+EXPORT_SYMBOL(bitmap_to_arr64);
#endif
diff --git a/lib/bootconfig-data.S b/lib/bootconfig-data.S
new file mode 100644
index 000000000000..ef85ba1a82f4
--- /dev/null
+++ b/lib/bootconfig-data.S
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Embed default bootconfig in the kernel.
+ */
+ .section .init.rodata, "aw"
+ .global embedded_bootconfig_data
+embedded_bootconfig_data:
+ .incbin "lib/default.bconf"
+ .global embedded_bootconfig_data_end
+embedded_bootconfig_data_end:
diff --git a/lib/bootconfig.c b/lib/bootconfig.c
new file mode 100644
index 000000000000..c59d26068a64
--- /dev/null
+++ b/lib/bootconfig.c
@@ -0,0 +1,985 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Extra Boot Config
+ * Masami Hiramatsu <mhiramat@kernel.org>
+ */
+
+#ifdef __KERNEL__
+#include <linux/bootconfig.h>
+#include <linux/bug.h>
+#include <linux/ctype.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/memblock.h>
+#include <linux/string.h>
+
+#ifdef CONFIG_BOOT_CONFIG_EMBED
+/* embedded_bootconfig_data is defined in bootconfig-data.S */
+extern __visible const char embedded_bootconfig_data[];
+extern __visible const char embedded_bootconfig_data_end[];
+
+const char * __init xbc_get_embedded_bootconfig(size_t *size)
+{
+ *size = embedded_bootconfig_data_end - embedded_bootconfig_data;
+ return (*size) ? embedded_bootconfig_data : NULL;
+}
+#endif
+
+#else /* !__KERNEL__ */
+/*
+ * NOTE: This is only for tools/bootconfig, because tools/bootconfig will
+ * run the parser sanity test.
+ * This does NOT mean lib/bootconfig.c is available in the user space.
+ * However, if you change this file, please make sure the tools/bootconfig
+ * has no issue on building and running.
+ */
+#include <linux/bootconfig.h>
+#endif
+
+/*
+ * Extra Boot Config (XBC) is given as tree-structured ascii text of
+ * key-value pairs on memory.
+ * xbc_parse() parses the text to build a simple tree. Each tree node is
+ * simply a key word or a value. A key node may have a next key node or/and
+ * a child node (both key and value). A value node may have a next value
+ * node (for array).
+ */
+
+static struct xbc_node *xbc_nodes __initdata;
+static int xbc_node_num __initdata;
+static char *xbc_data __initdata;
+static size_t xbc_data_size __initdata;
+static struct xbc_node *last_parent __initdata;
+static const char *xbc_err_msg __initdata;
+static int xbc_err_pos __initdata;
+static int open_brace[XBC_DEPTH_MAX] __initdata;
+static int brace_index __initdata;
+
+#ifdef __KERNEL__
+static inline void * __init xbc_alloc_mem(size_t size)
+{
+ return memblock_alloc(size, SMP_CACHE_BYTES);
+}
+
+static inline void __init xbc_free_mem(void *addr, size_t size)
+{
+ memblock_free(addr, size);
+}
+
+#else /* !__KERNEL__ */
+
+static inline void *xbc_alloc_mem(size_t size)
+{
+ return malloc(size);
+}
+
+static inline void xbc_free_mem(void *addr, size_t size)
+{
+ free(addr);
+}
+#endif
+/**
+ * xbc_get_info() - Get the information of loaded boot config
+ * @node_size: A pointer to store the number of nodes.
+ * @data_size: A pointer to store the size of bootconfig data.
+ *
+ * Get the number of used nodes in @node_size if it is not NULL,
+ * and the size of bootconfig data in @data_size if it is not NULL.
+ * Return 0 if the boot config is initialized, or return -ENODEV.
+ */
+int __init xbc_get_info(int *node_size, size_t *data_size)
+{
+ if (!xbc_data)
+ return -ENODEV;
+
+ if (node_size)
+ *node_size = xbc_node_num;
+ if (data_size)
+ *data_size = xbc_data_size;
+ return 0;
+}
+
+static int __init xbc_parse_error(const char *msg, const char *p)
+{
+ xbc_err_msg = msg;
+ xbc_err_pos = (int)(p - xbc_data);
+
+ return -EINVAL;
+}
+
+/**
+ * xbc_root_node() - Get the root node of extended boot config
+ *
+ * Return the address of root node of extended boot config. If the
+ * extended boot config is not initiized, return NULL.
+ */
+struct xbc_node * __init xbc_root_node(void)
+{
+ if (unlikely(!xbc_data))
+ return NULL;
+
+ return xbc_nodes;
+}
+
+/**
+ * xbc_node_index() - Get the index of XBC node
+ * @node: A target node of getting index.
+ *
+ * Return the index number of @node in XBC node list.
+ */
+int __init xbc_node_index(struct xbc_node *node)
+{
+ return node - &xbc_nodes[0];
+}
+
+/**
+ * xbc_node_get_parent() - Get the parent XBC node
+ * @node: An XBC node.
+ *
+ * Return the parent node of @node. If the node is top node of the tree,
+ * return NULL.
+ */
+struct xbc_node * __init xbc_node_get_parent(struct xbc_node *node)
+{
+ return node->parent == XBC_NODE_MAX ? NULL : &xbc_nodes[node->parent];
+}
+
+/**
+ * xbc_node_get_child() - Get the child XBC node
+ * @node: An XBC node.
+ *
+ * Return the first child node of @node. If the node has no child, return
+ * NULL.
+ */
+struct xbc_node * __init xbc_node_get_child(struct xbc_node *node)
+{
+ return node->child ? &xbc_nodes[node->child] : NULL;
+}
+
+/**
+ * xbc_node_get_next() - Get the next sibling XBC node
+ * @node: An XBC node.
+ *
+ * Return the NEXT sibling node of @node. If the node has no next sibling,
+ * return NULL. Note that even if this returns NULL, it doesn't mean @node
+ * has no siblings. (You also has to check whether the parent's child node
+ * is @node or not.)
+ */
+struct xbc_node * __init xbc_node_get_next(struct xbc_node *node)
+{
+ return node->next ? &xbc_nodes[node->next] : NULL;
+}
+
+/**
+ * xbc_node_get_data() - Get the data of XBC node
+ * @node: An XBC node.
+ *
+ * Return the data (which is always a null terminated string) of @node.
+ * If the node has invalid data, warn and return NULL.
+ */
+const char * __init xbc_node_get_data(struct xbc_node *node)
+{
+ int offset = node->data & ~XBC_VALUE;
+
+ if (WARN_ON(offset >= xbc_data_size))
+ return NULL;
+
+ return xbc_data + offset;
+}
+
+static bool __init
+xbc_node_match_prefix(struct xbc_node *node, const char **prefix)
+{
+ const char *p = xbc_node_get_data(node);
+ int len = strlen(p);
+
+ if (strncmp(*prefix, p, len))
+ return false;
+
+ p = *prefix + len;
+ if (*p == '.')
+ p++;
+ else if (*p != '\0')
+ return false;
+ *prefix = p;
+
+ return true;
+}
+
+/**
+ * xbc_node_find_subkey() - Find a subkey node which matches given key
+ * @parent: An XBC node.
+ * @key: A key string.
+ *
+ * Search a key node under @parent which matches @key. The @key can contain
+ * several words jointed with '.'. If @parent is NULL, this searches the
+ * node from whole tree. Return NULL if no node is matched.
+ */
+struct xbc_node * __init
+xbc_node_find_subkey(struct xbc_node *parent, const char *key)
+{
+ struct xbc_node *node;
+
+ if (parent)
+ node = xbc_node_get_subkey(parent);
+ else
+ node = xbc_root_node();
+
+ while (node && xbc_node_is_key(node)) {
+ if (!xbc_node_match_prefix(node, &key))
+ node = xbc_node_get_next(node);
+ else if (*key != '\0')
+ node = xbc_node_get_subkey(node);
+ else
+ break;
+ }
+
+ return node;
+}
+
+/**
+ * xbc_node_find_value() - Find a value node which matches given key
+ * @parent: An XBC node.
+ * @key: A key string.
+ * @vnode: A container pointer of found XBC node.
+ *
+ * Search a value node under @parent whose (parent) key node matches @key,
+ * store it in *@vnode, and returns the value string.
+ * The @key can contain several words jointed with '.'. If @parent is NULL,
+ * this searches the node from whole tree. Return the value string if a
+ * matched key found, return NULL if no node is matched.
+ * Note that this returns 0-length string and stores NULL in *@vnode if the
+ * key has no value. And also it will return the value of the first entry if
+ * the value is an array.
+ */
+const char * __init
+xbc_node_find_value(struct xbc_node *parent, const char *key,
+ struct xbc_node **vnode)
+{
+ struct xbc_node *node = xbc_node_find_subkey(parent, key);
+
+ if (!node || !xbc_node_is_key(node))
+ return NULL;
+
+ node = xbc_node_get_child(node);
+ if (node && !xbc_node_is_value(node))
+ return NULL;
+
+ if (vnode)
+ *vnode = node;
+
+ return node ? xbc_node_get_data(node) : "";
+}
+
+/**
+ * xbc_node_compose_key_after() - Compose partial key string of the XBC node
+ * @root: Root XBC node
+ * @node: Target XBC node.
+ * @buf: A buffer to store the key.
+ * @size: The size of the @buf.
+ *
+ * Compose the partial key of the @node into @buf, which is starting right
+ * after @root (@root is not included.) If @root is NULL, this returns full
+ * key words of @node.
+ * Returns the total length of the key stored in @buf. Returns -EINVAL
+ * if @node is NULL or @root is not the ancestor of @node or @root is @node,
+ * or returns -ERANGE if the key depth is deeper than max depth.
+ * This is expected to be used with xbc_find_node() to list up all (child)
+ * keys under given key.
+ */
+int __init xbc_node_compose_key_after(struct xbc_node *root,
+ struct xbc_node *node,
+ char *buf, size_t size)
+{
+ uint16_t keys[XBC_DEPTH_MAX];
+ int depth = 0, ret = 0, total = 0;
+
+ if (!node || node == root)
+ return -EINVAL;
+
+ if (xbc_node_is_value(node))
+ node = xbc_node_get_parent(node);
+
+ while (node && node != root) {
+ keys[depth++] = xbc_node_index(node);
+ if (depth == XBC_DEPTH_MAX)
+ return -ERANGE;
+ node = xbc_node_get_parent(node);
+ }
+ if (!node && root)
+ return -EINVAL;
+
+ while (--depth >= 0) {
+ node = xbc_nodes + keys[depth];
+ ret = snprintf(buf, size, "%s%s", xbc_node_get_data(node),
+ depth ? "." : "");
+ if (ret < 0)
+ return ret;
+ if (ret > size) {
+ size = 0;
+ } else {
+ size -= ret;
+ buf += ret;
+ }
+ total += ret;
+ }
+
+ return total;
+}
+
+/**
+ * xbc_node_find_next_leaf() - Find the next leaf node under given node
+ * @root: An XBC root node
+ * @node: An XBC node which starts from.
+ *
+ * Search the next leaf node (which means the terminal key node) of @node
+ * under @root node (including @root node itself).
+ * Return the next node or NULL if next leaf node is not found.
+ */
+struct xbc_node * __init xbc_node_find_next_leaf(struct xbc_node *root,
+ struct xbc_node *node)
+{
+ struct xbc_node *next;
+
+ if (unlikely(!xbc_data))
+ return NULL;
+
+ if (!node) { /* First try */
+ node = root;
+ if (!node)
+ node = xbc_nodes;
+ } else {
+ /* Leaf node may have a subkey */
+ next = xbc_node_get_subkey(node);
+ if (next) {
+ node = next;
+ goto found;
+ }
+
+ if (node == root) /* @root was a leaf, no child node. */
+ return NULL;
+
+ while (!node->next) {
+ node = xbc_node_get_parent(node);
+ if (node == root)
+ return NULL;
+ /* User passed a node which is not uder parent */
+ if (WARN_ON(!node))
+ return NULL;
+ }
+ node = xbc_node_get_next(node);
+ }
+
+found:
+ while (node && !xbc_node_is_leaf(node))
+ node = xbc_node_get_child(node);
+
+ return node;
+}
+
+/**
+ * xbc_node_find_next_key_value() - Find the next key-value pair nodes
+ * @root: An XBC root node
+ * @leaf: A container pointer of XBC node which starts from.
+ *
+ * Search the next leaf node (which means the terminal key node) of *@leaf
+ * under @root node. Returns the value and update *@leaf if next leaf node
+ * is found, or NULL if no next leaf node is found.
+ * Note that this returns 0-length string if the key has no value, or
+ * the value of the first entry if the value is an array.
+ */
+const char * __init xbc_node_find_next_key_value(struct xbc_node *root,
+ struct xbc_node **leaf)
+{
+ /* tip must be passed */
+ if (WARN_ON(!leaf))
+ return NULL;
+
+ *leaf = xbc_node_find_next_leaf(root, *leaf);
+ if (!*leaf)
+ return NULL;
+ if ((*leaf)->child)
+ return xbc_node_get_data(xbc_node_get_child(*leaf));
+ else
+ return ""; /* No value key */
+}
+
+/* XBC parse and tree build */
+
+static int __init xbc_init_node(struct xbc_node *node, char *data, uint32_t flag)
+{
+ unsigned long offset = data - xbc_data;
+
+ if (WARN_ON(offset >= XBC_DATA_MAX))
+ return -EINVAL;
+
+ node->data = (uint16_t)offset | flag;
+ node->child = 0;
+ node->next = 0;
+
+ return 0;
+}
+
+static struct xbc_node * __init xbc_add_node(char *data, uint32_t flag)
+{
+ struct xbc_node *node;
+
+ if (xbc_node_num == XBC_NODE_MAX)
+ return NULL;
+
+ node = &xbc_nodes[xbc_node_num++];
+ if (xbc_init_node(node, data, flag) < 0)
+ return NULL;
+
+ return node;
+}
+
+static inline __init struct xbc_node *xbc_last_sibling(struct xbc_node *node)
+{
+ while (node->next)
+ node = xbc_node_get_next(node);
+
+ return node;
+}
+
+static inline __init struct xbc_node *xbc_last_child(struct xbc_node *node)
+{
+ while (node->child)
+ node = xbc_node_get_child(node);
+
+ return node;
+}
+
+static struct xbc_node * __init __xbc_add_sibling(char *data, uint32_t flag, bool head)
+{
+ struct xbc_node *sib, *node = xbc_add_node(data, flag);
+
+ if (node) {
+ if (!last_parent) {
+ /* Ignore @head in this case */
+ node->parent = XBC_NODE_MAX;
+ sib = xbc_last_sibling(xbc_nodes);
+ sib->next = xbc_node_index(node);
+ } else {
+ node->parent = xbc_node_index(last_parent);
+ if (!last_parent->child || head) {
+ node->next = last_parent->child;
+ last_parent->child = xbc_node_index(node);
+ } else {
+ sib = xbc_node_get_child(last_parent);
+ sib = xbc_last_sibling(sib);
+ sib->next = xbc_node_index(node);
+ }
+ }
+ } else
+ xbc_parse_error("Too many nodes", data);
+
+ return node;
+}
+
+static inline struct xbc_node * __init xbc_add_sibling(char *data, uint32_t flag)
+{
+ return __xbc_add_sibling(data, flag, false);
+}
+
+static inline struct xbc_node * __init xbc_add_head_sibling(char *data, uint32_t flag)
+{
+ return __xbc_add_sibling(data, flag, true);
+}
+
+static inline __init struct xbc_node *xbc_add_child(char *data, uint32_t flag)
+{
+ struct xbc_node *node = xbc_add_sibling(data, flag);
+
+ if (node)
+ last_parent = node;
+
+ return node;
+}
+
+static inline __init bool xbc_valid_keyword(char *key)
+{
+ if (key[0] == '\0')
+ return false;
+
+ while (isalnum(*key) || *key == '-' || *key == '_')
+ key++;
+
+ return *key == '\0';
+}
+
+static char *skip_comment(char *p)
+{
+ char *ret;
+
+ ret = strchr(p, '\n');
+ if (!ret)
+ ret = p + strlen(p);
+ else
+ ret++;
+
+ return ret;
+}
+
+static char *skip_spaces_until_newline(char *p)
+{
+ while (isspace(*p) && *p != '\n')
+ p++;
+ return p;
+}
+
+static int __init __xbc_open_brace(char *p)
+{
+ /* Push the last key as open brace */
+ open_brace[brace_index++] = xbc_node_index(last_parent);
+ if (brace_index >= XBC_DEPTH_MAX)
+ return xbc_parse_error("Exceed max depth of braces", p);
+
+ return 0;
+}
+
+static int __init __xbc_close_brace(char *p)
+{
+ brace_index--;
+ if (!last_parent || brace_index < 0 ||
+ (open_brace[brace_index] != xbc_node_index(last_parent)))
+ return xbc_parse_error("Unexpected closing brace", p);
+
+ if (brace_index == 0)
+ last_parent = NULL;
+ else
+ last_parent = &xbc_nodes[open_brace[brace_index - 1]];
+
+ return 0;
+}
+
+/*
+ * Return delimiter or error, no node added. As same as lib/cmdline.c,
+ * you can use " around spaces, but can't escape " for value.
+ */
+static int __init __xbc_parse_value(char **__v, char **__n)
+{
+ char *p, *v = *__v;
+ int c, quotes = 0;
+
+ v = skip_spaces(v);
+ while (*v == '#') {
+ v = skip_comment(v);
+ v = skip_spaces(v);
+ }
+ if (*v == '"' || *v == '\'') {
+ quotes = *v;
+ v++;
+ }
+ p = v - 1;
+ while ((c = *++p)) {
+ if (!isprint(c) && !isspace(c))
+ return xbc_parse_error("Non printable value", p);
+ if (quotes) {
+ if (c != quotes)
+ continue;
+ quotes = 0;
+ *p++ = '\0';
+ p = skip_spaces_until_newline(p);
+ c = *p;
+ if (c && !strchr(",;\n#}", c))
+ return xbc_parse_error("No value delimiter", p);
+ if (*p)
+ p++;
+ break;
+ }
+ if (strchr(",;\n#}", c)) {
+ *p++ = '\0';
+ v = strim(v);
+ break;
+ }
+ }
+ if (quotes)
+ return xbc_parse_error("No closing quotes", p);
+ if (c == '#') {
+ p = skip_comment(p);
+ c = '\n'; /* A comment must be treated as a newline */
+ }
+ *__n = p;
+ *__v = v;
+
+ return c;
+}
+
+static int __init xbc_parse_array(char **__v)
+{
+ struct xbc_node *node;
+ char *next;
+ int c = 0;
+
+ if (last_parent->child)
+ last_parent = xbc_node_get_child(last_parent);
+
+ do {
+ c = __xbc_parse_value(__v, &next);
+ if (c < 0)
+ return c;
+
+ node = xbc_add_child(*__v, XBC_VALUE);
+ if (!node)
+ return -ENOMEM;
+ *__v = next;
+ } while (c == ',');
+ node->child = 0;
+
+ return c;
+}
+
+static inline __init
+struct xbc_node *find_match_node(struct xbc_node *node, char *k)
+{
+ while (node) {
+ if (!strcmp(xbc_node_get_data(node), k))
+ break;
+ node = xbc_node_get_next(node);
+ }
+ return node;
+}
+
+static int __init __xbc_add_key(char *k)
+{
+ struct xbc_node *node, *child;
+
+ if (!xbc_valid_keyword(k))
+ return xbc_parse_error("Invalid keyword", k);
+
+ if (unlikely(xbc_node_num == 0))
+ goto add_node;
+
+ if (!last_parent) /* the first level */
+ node = find_match_node(xbc_nodes, k);
+ else {
+ child = xbc_node_get_child(last_parent);
+ /* Since the value node is the first child, skip it. */
+ if (child && xbc_node_is_value(child))
+ child = xbc_node_get_next(child);
+ node = find_match_node(child, k);
+ }
+
+ if (node)
+ last_parent = node;
+ else {
+add_node:
+ node = xbc_add_child(k, XBC_KEY);
+ if (!node)
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static int __init __xbc_parse_keys(char *k)
+{
+ char *p;
+ int ret;
+
+ k = strim(k);
+ while ((p = strchr(k, '.'))) {
+ *p++ = '\0';
+ ret = __xbc_add_key(k);
+ if (ret)
+ return ret;
+ k = p;
+ }
+
+ return __xbc_add_key(k);
+}
+
+static int __init xbc_parse_kv(char **k, char *v, int op)
+{
+ struct xbc_node *prev_parent = last_parent;
+ struct xbc_node *child;
+ char *next;
+ int c, ret;
+
+ ret = __xbc_parse_keys(*k);
+ if (ret)
+ return ret;
+
+ c = __xbc_parse_value(&v, &next);
+ if (c < 0)
+ return c;
+
+ child = xbc_node_get_child(last_parent);
+ if (child && xbc_node_is_value(child)) {
+ if (op == '=')
+ return xbc_parse_error("Value is redefined", v);
+ if (op == ':') {
+ unsigned short nidx = child->next;
+
+ xbc_init_node(child, v, XBC_VALUE);
+ child->next = nidx; /* keep subkeys */
+ goto array;
+ }
+ /* op must be '+' */
+ last_parent = xbc_last_child(child);
+ }
+ /* The value node should always be the first child */
+ if (!xbc_add_head_sibling(v, XBC_VALUE))
+ return -ENOMEM;
+
+array:
+ if (c == ',') { /* Array */
+ c = xbc_parse_array(&next);
+ if (c < 0)
+ return c;
+ }
+
+ last_parent = prev_parent;
+
+ if (c == '}') {
+ ret = __xbc_close_brace(next - 1);
+ if (ret < 0)
+ return ret;
+ }
+
+ *k = next;
+
+ return 0;
+}
+
+static int __init xbc_parse_key(char **k, char *n)
+{
+ struct xbc_node *prev_parent = last_parent;
+ int ret;
+
+ *k = strim(*k);
+ if (**k != '\0') {
+ ret = __xbc_parse_keys(*k);
+ if (ret)
+ return ret;
+ last_parent = prev_parent;
+ }
+ *k = n;
+
+ return 0;
+}
+
+static int __init xbc_open_brace(char **k, char *n)
+{
+ int ret;
+
+ ret = __xbc_parse_keys(*k);
+ if (ret)
+ return ret;
+ *k = n;
+
+ return __xbc_open_brace(n - 1);
+}
+
+static int __init xbc_close_brace(char **k, char *n)
+{
+ int ret;
+
+ ret = xbc_parse_key(k, n);
+ if (ret)
+ return ret;
+ /* k is updated in xbc_parse_key() */
+
+ return __xbc_close_brace(n - 1);
+}
+
+static int __init xbc_verify_tree(void)
+{
+ int i, depth, len, wlen;
+ struct xbc_node *n, *m;
+
+ /* Brace closing */
+ if (brace_index) {
+ n = &xbc_nodes[open_brace[brace_index]];
+ return xbc_parse_error("Brace is not closed",
+ xbc_node_get_data(n));
+ }
+
+ /* Empty tree */
+ if (xbc_node_num == 0) {
+ xbc_parse_error("Empty config", xbc_data);
+ return -ENOENT;
+ }
+
+ for (i = 0; i < xbc_node_num; i++) {
+ if (xbc_nodes[i].next > xbc_node_num) {
+ return xbc_parse_error("No closing brace",
+ xbc_node_get_data(xbc_nodes + i));
+ }
+ }
+
+ /* Key tree limitation check */
+ n = &xbc_nodes[0];
+ depth = 1;
+ len = 0;
+
+ while (n) {
+ wlen = strlen(xbc_node_get_data(n)) + 1;
+ len += wlen;
+ if (len > XBC_KEYLEN_MAX)
+ return xbc_parse_error("Too long key length",
+ xbc_node_get_data(n));
+
+ m = xbc_node_get_child(n);
+ if (m && xbc_node_is_key(m)) {
+ n = m;
+ depth++;
+ if (depth > XBC_DEPTH_MAX)
+ return xbc_parse_error("Too many key words",
+ xbc_node_get_data(n));
+ continue;
+ }
+ len -= wlen;
+ m = xbc_node_get_next(n);
+ while (!m) {
+ n = xbc_node_get_parent(n);
+ if (!n)
+ break;
+ len -= strlen(xbc_node_get_data(n)) + 1;
+ depth--;
+ m = xbc_node_get_next(n);
+ }
+ n = m;
+ }
+
+ return 0;
+}
+
+/* Need to setup xbc_data and xbc_nodes before call this. */
+static int __init xbc_parse_tree(void)
+{
+ char *p, *q;
+ int ret = 0, c;
+
+ last_parent = NULL;
+ p = xbc_data;
+ do {
+ q = strpbrk(p, "{}=+;:\n#");
+ if (!q) {
+ p = skip_spaces(p);
+ if (*p != '\0')
+ ret = xbc_parse_error("No delimiter", p);
+ break;
+ }
+
+ c = *q;
+ *q++ = '\0';
+ switch (c) {
+ case ':':
+ case '+':
+ if (*q++ != '=') {
+ ret = xbc_parse_error(c == '+' ?
+ "Wrong '+' operator" :
+ "Wrong ':' operator",
+ q - 2);
+ break;
+ }
+ fallthrough;
+ case '=':
+ ret = xbc_parse_kv(&p, q, c);
+ break;
+ case '{':
+ ret = xbc_open_brace(&p, q);
+ break;
+ case '#':
+ q = skip_comment(q);
+ fallthrough;
+ case ';':
+ case '\n':
+ ret = xbc_parse_key(&p, q);
+ break;
+ case '}':
+ ret = xbc_close_brace(&p, q);
+ break;
+ }
+ } while (!ret);
+
+ return ret;
+}
+
+/**
+ * xbc_exit() - Clean up all parsed bootconfig
+ *
+ * This clears all data structures of parsed bootconfig on memory.
+ * If you need to reuse xbc_init() with new boot config, you can
+ * use this.
+ */
+void __init xbc_exit(void)
+{
+ xbc_free_mem(xbc_data, xbc_data_size);
+ xbc_data = NULL;
+ xbc_data_size = 0;
+ xbc_node_num = 0;
+ xbc_free_mem(xbc_nodes, sizeof(struct xbc_node) * XBC_NODE_MAX);
+ xbc_nodes = NULL;
+ brace_index = 0;
+}
+
+/**
+ * xbc_init() - Parse given XBC file and build XBC internal tree
+ * @data: The boot config text original data
+ * @size: The size of @data
+ * @emsg: A pointer of const char * to store the error message
+ * @epos: A pointer of int to store the error position
+ *
+ * This parses the boot config text in @data. @size must be smaller
+ * than XBC_DATA_MAX.
+ * Return the number of stored nodes (>0) if succeeded, or -errno
+ * if there is any error.
+ * In error cases, @emsg will be updated with an error message and
+ * @epos will be updated with the error position which is the byte offset
+ * of @buf. If the error is not a parser error, @epos will be -1.
+ */
+int __init xbc_init(const char *data, size_t size, const char **emsg, int *epos)
+{
+ int ret;
+
+ if (epos)
+ *epos = -1;
+
+ if (xbc_data) {
+ if (emsg)
+ *emsg = "Bootconfig is already initialized";
+ return -EBUSY;
+ }
+ if (size > XBC_DATA_MAX || size == 0) {
+ if (emsg)
+ *emsg = size ? "Config data is too big" :
+ "Config data is empty";
+ return -ERANGE;
+ }
+
+ xbc_data = xbc_alloc_mem(size + 1);
+ if (!xbc_data) {
+ if (emsg)
+ *emsg = "Failed to allocate bootconfig data";
+ return -ENOMEM;
+ }
+ memcpy(xbc_data, data, size);
+ xbc_data[size] = '\0';
+ xbc_data_size = size + 1;
+
+ xbc_nodes = xbc_alloc_mem(sizeof(struct xbc_node) * XBC_NODE_MAX);
+ if (!xbc_nodes) {
+ if (emsg)
+ *emsg = "Failed to allocate bootconfig nodes";
+ xbc_exit();
+ return -ENOMEM;
+ }
+ memset(xbc_nodes, 0, sizeof(struct xbc_node) * XBC_NODE_MAX);
+
+ ret = xbc_parse_tree();
+ if (!ret)
+ ret = xbc_verify_tree();
+
+ if (ret < 0) {
+ if (epos)
+ *epos = xbc_err_pos;
+ if (emsg)
+ *emsg = xbc_err_msg;
+ xbc_exit();
+ } else
+ ret = xbc_node_num;
+
+ return ret;
+}
diff --git a/lib/bsearch.c b/lib/bsearch.c
index 8baa83968162..bf86aa66f2b2 100644
--- a/lib/bsearch.c
+++ b/lib/bsearch.c
@@ -28,27 +28,9 @@
* the key and elements in the array are of the same type, you can use
* the same comparison function for both sort() and bsearch().
*/
-void *bsearch(const void *key, const void *base, size_t num, size_t size,
- int (*cmp)(const void *key, const void *elt))
+void *bsearch(const void *key, const void *base, size_t num, size_t size, cmp_func_t cmp)
{
- const char *pivot;
- int result;
-
- while (num > 0) {
- pivot = base + (num >> 1) * size;
- result = cmp(key, pivot);
-
- if (result == 0)
- return (void *)pivot;
-
- if (result > 0) {
- base = pivot + size;
- num--;
- }
- num >>= 1;
- }
-
- return NULL;
+ return __inline_bsearch(key, base, num, size, cmp);
}
EXPORT_SYMBOL(bsearch);
NOKPROBE_SYMBOL(bsearch);
diff --git a/lib/btree.c b/lib/btree.c
index b4cf08a5c267..49420cae3a83 100644
--- a/lib/btree.c
+++ b/lib/btree.c
@@ -238,7 +238,7 @@ static int keyzero(struct btree_geo *geo, unsigned long *key)
return 1;
}
-void *btree_lookup(struct btree_head *head, struct btree_geo *geo,
+static void *btree_lookup_node(struct btree_head *head, struct btree_geo *geo,
unsigned long *key)
{
int i, height = head->height;
@@ -257,7 +257,16 @@ void *btree_lookup(struct btree_head *head, struct btree_geo *geo,
if (!node)
return NULL;
}
+ return node;
+}
+void *btree_lookup(struct btree_head *head, struct btree_geo *geo,
+ unsigned long *key)
+{
+ int i;
+ unsigned long *node;
+
+ node = btree_lookup_node(head, geo, key);
if (!node)
return NULL;
@@ -271,23 +280,10 @@ EXPORT_SYMBOL_GPL(btree_lookup);
int btree_update(struct btree_head *head, struct btree_geo *geo,
unsigned long *key, void *val)
{
- int i, height = head->height;
- unsigned long *node = head->node;
-
- if (height == 0)
- return -ENOENT;
-
- for ( ; height > 1; height--) {
- for (i = 0; i < geo->no_pairs; i++)
- if (keycmp(geo, node, i, key) <= 0)
- break;
- if (i == geo->no_pairs)
- return -ENOENT;
- node = bval(geo, node, i);
- if (!node)
- return -ENOENT;
- }
+ int i;
+ unsigned long *node;
+ node = btree_lookup_node(head, geo, key);
if (!node)
return -ENOENT;
@@ -798,4 +794,3 @@ module_exit(btree_module_exit);
MODULE_AUTHOR("Joern Engel <joern@logfs.org>");
MODULE_AUTHOR("Johannes Berg <johannes@sipsolutions.net>");
-MODULE_LICENSE("GPL");
diff --git a/lib/bug.c b/lib/bug.c
index 1077366f496b..e0ff21989990 100644
--- a/lib/bug.c
+++ b/lib/bug.c
@@ -6,8 +6,7 @@
CONFIG_BUG - emit BUG traps. Nothing happens without this.
CONFIG_GENERIC_BUG - enable this code.
- CONFIG_GENERIC_BUG_RELATIVE_POINTERS - use 32-bit pointers relative to
- the containing struct bug_entry for bug_addr and file.
+ CONFIG_GENERIC_BUG_RELATIVE_POINTERS - use 32-bit relative pointers for bug_addr and file
CONFIG_DEBUG_BUGVERBOSE - emit full file+line information for each BUG
CONFIG_BUG and CONFIG_DEBUG_BUGVERBOSE are potentially user-settable
@@ -47,15 +46,17 @@
#include <linux/bug.h>
#include <linux/sched.h>
#include <linux/rculist.h>
+#include <linux/ftrace.h>
+#include <linux/context_tracking.h>
extern struct bug_entry __start___bug_table[], __stop___bug_table[];
static inline unsigned long bug_addr(const struct bug_entry *bug)
{
-#ifndef CONFIG_GENERIC_BUG_RELATIVE_POINTERS
- return bug->bug_addr;
+#ifdef CONFIG_GENERIC_BUG_RELATIVE_POINTERS
+ return (unsigned long)&bug->bug_addr_disp + bug->bug_addr_disp;
#else
- return (unsigned long)bug + bug->bug_addr_disp;
+ return bug->bug_addr;
#endif
}
@@ -90,8 +91,6 @@ void module_bug_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs,
char *secstrings;
unsigned int i;
- lockdep_assert_held(&module_mutex);
-
mod->bug_table = NULL;
mod->num_bugs = 0;
@@ -117,7 +116,6 @@ void module_bug_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs,
void module_bug_cleanup(struct module *mod)
{
- lockdep_assert_held(&module_mutex);
list_del_rcu(&mod->bug_list);
}
@@ -129,6 +127,22 @@ static inline struct bug_entry *module_find_bug(unsigned long bugaddr)
}
#endif
+void bug_get_file_line(struct bug_entry *bug, const char **file,
+ unsigned int *line)
+{
+#ifdef CONFIG_DEBUG_BUGVERBOSE
+#ifdef CONFIG_GENERIC_BUG_RELATIVE_POINTERS
+ *file = (const char *)&bug->file_disp + bug->file_disp;
+#else
+ *file = bug->file;
+#endif
+ *line = bug->line;
+#else
+ *file = NULL;
+ *line = 0;
+#endif
+}
+
struct bug_entry *find_bug(unsigned long bugaddr)
{
struct bug_entry *bug;
@@ -140,7 +154,7 @@ struct bug_entry *find_bug(unsigned long bugaddr)
return module_find_bug(bugaddr);
}
-enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
+static enum bug_trap_type __report_bug(unsigned long bugaddr, struct pt_regs *regs)
{
struct bug_entry *bug;
const char *file;
@@ -153,34 +167,33 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
if (!bug)
return BUG_TRAP_TYPE_NONE;
- file = NULL;
- line = 0;
- warning = 0;
+ disable_trace_on_warning();
- if (bug) {
-#ifdef CONFIG_DEBUG_BUGVERBOSE
-#ifndef CONFIG_GENERIC_BUG_RELATIVE_POINTERS
- file = bug->file;
-#else
- file = (const char *)bug + bug->file_disp;
-#endif
- line = bug->line;
-#endif
- warning = (bug->flags & BUGFLAG_WARNING) != 0;
- once = (bug->flags & BUGFLAG_ONCE) != 0;
- done = (bug->flags & BUGFLAG_DONE) != 0;
-
- if (warning && once) {
- if (done)
- return BUG_TRAP_TYPE_WARN;
-
- /*
- * Since this is the only store, concurrency is not an issue.
- */
- bug->flags |= BUGFLAG_DONE;
- }
+ bug_get_file_line(bug, &file, &line);
+
+ warning = (bug->flags & BUGFLAG_WARNING) != 0;
+ once = (bug->flags & BUGFLAG_ONCE) != 0;
+ done = (bug->flags & BUGFLAG_DONE) != 0;
+
+ if (warning && once) {
+ if (done)
+ return BUG_TRAP_TYPE_WARN;
+
+ /*
+ * Since this is the only store, concurrency is not an issue.
+ */
+ bug->flags |= BUGFLAG_DONE;
}
+ /*
+ * BUG() and WARN_ON() families don't print a custom debug message
+ * before triggering the exception handler, so we must add the
+ * "cut here" line now. WARN() issues its own "cut here" before the
+ * extra debugging message it writes before triggering the handler.
+ */
+ if ((bug->flags & BUGFLAG_NO_CUT_HERE) == 0)
+ printk(KERN_DEFAULT CUT_HERE);
+
if (warning) {
/* this is a WARN_ON rather than BUG/BUG_ON */
__warn(file, line, (void *)bugaddr, BUG_GET_TAINT(bug), regs,
@@ -188,8 +201,6 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
return BUG_TRAP_TYPE_WARN;
}
- printk(KERN_DEFAULT CUT_HERE);
-
if (file)
pr_crit("kernel BUG at %s:%u!\n", file, line);
else
@@ -199,6 +210,18 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
return BUG_TRAP_TYPE_BUG;
}
+enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
+{
+ enum bug_trap_type ret;
+ bool rcu = false;
+
+ rcu = warn_rcu_enter();
+ ret = __report_bug(bugaddr, regs);
+ warn_rcu_exit(rcu);
+
+ return ret;
+}
+
static void clear_once_table(struct bug_entry *start, struct bug_entry *end)
{
struct bug_entry *bug;
diff --git a/lib/buildid.c b/lib/buildid.c
new file mode 100644
index 000000000000..e3a7acdeef0e
--- /dev/null
+++ b/lib/buildid.c
@@ -0,0 +1,191 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/buildid.h>
+#include <linux/cache.h>
+#include <linux/elf.h>
+#include <linux/kernel.h>
+#include <linux/pagemap.h>
+
+#define BUILD_ID 3
+
+/*
+ * Parse build id from the note segment. This logic can be shared between
+ * 32-bit and 64-bit system, because Elf32_Nhdr and Elf64_Nhdr are
+ * identical.
+ */
+static int parse_build_id_buf(unsigned char *build_id,
+ __u32 *size,
+ const void *note_start,
+ Elf32_Word note_size)
+{
+ Elf32_Word note_offs = 0, new_offs;
+
+ while (note_offs + sizeof(Elf32_Nhdr) < note_size) {
+ Elf32_Nhdr *nhdr = (Elf32_Nhdr *)(note_start + note_offs);
+
+ if (nhdr->n_type == BUILD_ID &&
+ nhdr->n_namesz == sizeof("GNU") &&
+ !strcmp((char *)(nhdr + 1), "GNU") &&
+ nhdr->n_descsz > 0 &&
+ nhdr->n_descsz <= BUILD_ID_SIZE_MAX) {
+ memcpy(build_id,
+ note_start + note_offs +
+ ALIGN(sizeof("GNU"), 4) + sizeof(Elf32_Nhdr),
+ nhdr->n_descsz);
+ memset(build_id + nhdr->n_descsz, 0,
+ BUILD_ID_SIZE_MAX - nhdr->n_descsz);
+ if (size)
+ *size = nhdr->n_descsz;
+ return 0;
+ }
+ new_offs = note_offs + sizeof(Elf32_Nhdr) +
+ ALIGN(nhdr->n_namesz, 4) + ALIGN(nhdr->n_descsz, 4);
+ if (new_offs <= note_offs) /* overflow */
+ break;
+ note_offs = new_offs;
+ }
+
+ return -EINVAL;
+}
+
+static inline int parse_build_id(const void *page_addr,
+ unsigned char *build_id,
+ __u32 *size,
+ const void *note_start,
+ Elf32_Word note_size)
+{
+ /* check for overflow */
+ if (note_start < page_addr || note_start + note_size < note_start)
+ return -EINVAL;
+
+ /* only supports note that fits in the first page */
+ if (note_start + note_size > page_addr + PAGE_SIZE)
+ return -EINVAL;
+
+ return parse_build_id_buf(build_id, size, note_start, note_size);
+}
+
+/* Parse build ID from 32-bit ELF */
+static int get_build_id_32(const void *page_addr, unsigned char *build_id,
+ __u32 *size)
+{
+ Elf32_Ehdr *ehdr = (Elf32_Ehdr *)page_addr;
+ Elf32_Phdr *phdr;
+ int i;
+
+ /* only supports phdr that fits in one page */
+ if (ehdr->e_phnum >
+ (PAGE_SIZE - sizeof(Elf32_Ehdr)) / sizeof(Elf32_Phdr))
+ return -EINVAL;
+
+ phdr = (Elf32_Phdr *)(page_addr + sizeof(Elf32_Ehdr));
+
+ for (i = 0; i < ehdr->e_phnum; ++i) {
+ if (phdr[i].p_type == PT_NOTE &&
+ !parse_build_id(page_addr, build_id, size,
+ page_addr + phdr[i].p_offset,
+ phdr[i].p_filesz))
+ return 0;
+ }
+ return -EINVAL;
+}
+
+/* Parse build ID from 64-bit ELF */
+static int get_build_id_64(const void *page_addr, unsigned char *build_id,
+ __u32 *size)
+{
+ Elf64_Ehdr *ehdr = (Elf64_Ehdr *)page_addr;
+ Elf64_Phdr *phdr;
+ int i;
+
+ /* only supports phdr that fits in one page */
+ if (ehdr->e_phnum >
+ (PAGE_SIZE - sizeof(Elf64_Ehdr)) / sizeof(Elf64_Phdr))
+ return -EINVAL;
+
+ phdr = (Elf64_Phdr *)(page_addr + sizeof(Elf64_Ehdr));
+
+ for (i = 0; i < ehdr->e_phnum; ++i) {
+ if (phdr[i].p_type == PT_NOTE &&
+ !parse_build_id(page_addr, build_id, size,
+ page_addr + phdr[i].p_offset,
+ phdr[i].p_filesz))
+ return 0;
+ }
+ return -EINVAL;
+}
+
+/*
+ * Parse build ID of ELF file mapped to vma
+ * @vma: vma object
+ * @build_id: buffer to store build id, at least BUILD_ID_SIZE long
+ * @size: returns actual build id size in case of success
+ *
+ * Return: 0 on success, -EINVAL otherwise
+ */
+int build_id_parse(struct vm_area_struct *vma, unsigned char *build_id,
+ __u32 *size)
+{
+ Elf32_Ehdr *ehdr;
+ struct page *page;
+ void *page_addr;
+ int ret;
+
+ /* only works for page backed storage */
+ if (!vma->vm_file)
+ return -EINVAL;
+
+ page = find_get_page(vma->vm_file->f_mapping, 0);
+ if (!page)
+ return -EFAULT; /* page not mapped */
+
+ ret = -EINVAL;
+ page_addr = kmap_atomic(page);
+ ehdr = (Elf32_Ehdr *)page_addr;
+
+ /* compare magic x7f "ELF" */
+ if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG) != 0)
+ goto out;
+
+ /* only support executable file and shared object file */
+ if (ehdr->e_type != ET_EXEC && ehdr->e_type != ET_DYN)
+ goto out;
+
+ if (ehdr->e_ident[EI_CLASS] == ELFCLASS32)
+ ret = get_build_id_32(page_addr, build_id, size);
+ else if (ehdr->e_ident[EI_CLASS] == ELFCLASS64)
+ ret = get_build_id_64(page_addr, build_id, size);
+out:
+ kunmap_atomic(page_addr);
+ put_page(page);
+ return ret;
+}
+
+/**
+ * build_id_parse_buf - Get build ID from a buffer
+ * @buf: ELF note section(s) to parse
+ * @buf_size: Size of @buf in bytes
+ * @build_id: Build ID parsed from @buf, at least BUILD_ID_SIZE_MAX long
+ *
+ * Return: 0 on success, -EINVAL otherwise
+ */
+int build_id_parse_buf(const void *buf, unsigned char *build_id, u32 buf_size)
+{
+ return parse_build_id_buf(build_id, NULL, buf, buf_size);
+}
+
+#if IS_ENABLED(CONFIG_STACKTRACE_BUILD_ID) || IS_ENABLED(CONFIG_CRASH_CORE)
+unsigned char vmlinux_build_id[BUILD_ID_SIZE_MAX] __ro_after_init;
+
+/**
+ * init_vmlinux_build_id - Compute and stash the running kernel's build ID
+ */
+void __init init_vmlinux_build_id(void)
+{
+ extern const void __start_notes __weak;
+ extern const void __stop_notes __weak;
+ unsigned int size = &__stop_notes - &__start_notes;
+
+ build_id_parse_buf(&__start_notes, vmlinux_build_id, size);
+}
+#endif
diff --git a/lib/bust_spinlocks.c b/lib/bust_spinlocks.c
index 8be59f84eaea..bfd53972a4d8 100644
--- a/lib/bust_spinlocks.c
+++ b/lib/bust_spinlocks.c
@@ -22,9 +22,6 @@ void bust_spinlocks(int yes)
if (yes) {
++oops_in_progress;
} else {
-#ifdef CONFIG_VT
- unblank_screen();
-#endif
console_unblank();
if (--oops_in_progress == 0)
wake_up_klogd();
diff --git a/lib/checksum.c b/lib/checksum.c
index de032ad96f4a..6860d6b05a17 100644
--- a/lib/checksum.c
+++ b/lib/checksum.c
@@ -145,37 +145,6 @@ __sum16 ip_compute_csum(const void *buff, int len)
}
EXPORT_SYMBOL(ip_compute_csum);
-/*
- * copy from fs while checksumming, otherwise like csum_partial
- */
-__wsum
-csum_partial_copy_from_user(const void __user *src, void *dst, int len,
- __wsum sum, int *csum_err)
-{
- int missing;
-
- missing = __copy_from_user(dst, src, len);
- if (missing) {
- memset(dst + len - missing, 0, missing);
- *csum_err = -EFAULT;
- } else
- *csum_err = 0;
-
- return csum_partial(dst, len, sum);
-}
-EXPORT_SYMBOL(csum_partial_copy_from_user);
-
-/*
- * copy from ds while checksumming, otherwise like csum_partial
- */
-__wsum
-csum_partial_copy(const void *src, void *dst, int len, __wsum sum)
-{
- memcpy(dst, src, len);
- return csum_partial(dst, len, sum);
-}
-EXPORT_SYMBOL(csum_partial_copy);
-
#ifndef csum_tcpudp_nofold
static inline u32 from64to32(u64 x)
{
diff --git a/lib/checksum_kunit.c b/lib/checksum_kunit.c
new file mode 100644
index 000000000000..bf70850035c7
--- /dev/null
+++ b/lib/checksum_kunit.c
@@ -0,0 +1,641 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Test cases csum_partial, csum_fold, ip_fast_csum, csum_ipv6_magic
+ */
+
+#include <kunit/test.h>
+#include <asm/checksum.h>
+#include <net/ip6_checksum.h>
+
+#define MAX_LEN 512
+#define MAX_ALIGN 64
+#define TEST_BUFLEN (MAX_LEN + MAX_ALIGN)
+
+#define IPv4_MIN_WORDS 5
+#define IPv4_MAX_WORDS 15
+#define NUM_IPv6_TESTS 200
+#define NUM_IP_FAST_CSUM_TESTS 181
+
+/* Values for a little endian CPU. Byte swap each half on big endian CPU. */
+static const u32 random_init_sum = 0x2847aab;
+static const u8 random_buf[] = {
+ 0xac, 0xd7, 0x76, 0x69, 0x6e, 0xf2, 0x93, 0x2c, 0x1f, 0xe0, 0xde, 0x86,
+ 0x8f, 0x54, 0x33, 0x90, 0x95, 0xbf, 0xff, 0xb9, 0xea, 0x62, 0x6e, 0xb5,
+ 0xd3, 0x4f, 0xf5, 0x60, 0x50, 0x5c, 0xc7, 0xfa, 0x6d, 0x1a, 0xc7, 0xf0,
+ 0xd2, 0x2c, 0x12, 0x3d, 0x88, 0xe3, 0x14, 0x21, 0xb1, 0x5e, 0x45, 0x31,
+ 0xa2, 0x85, 0x36, 0x76, 0xba, 0xd8, 0xad, 0xbb, 0x9e, 0x49, 0x8f, 0xf7,
+ 0xce, 0xea, 0xef, 0xca, 0x2c, 0x29, 0xf7, 0x15, 0x5c, 0x1d, 0x4d, 0x09,
+ 0x1f, 0xe2, 0x14, 0x31, 0x8c, 0x07, 0x57, 0x23, 0x1f, 0x6f, 0x03, 0xe1,
+ 0x93, 0x19, 0x53, 0x03, 0x45, 0x49, 0x9a, 0x3b, 0x8e, 0x0c, 0x12, 0x5d,
+ 0x8a, 0xb8, 0x9b, 0x8c, 0x9a, 0x03, 0xe5, 0xa2, 0x43, 0xd2, 0x3b, 0x4e,
+ 0x7e, 0x30, 0x3c, 0x22, 0x2d, 0xc5, 0xfc, 0x9e, 0xdb, 0xc6, 0xf9, 0x69,
+ 0x12, 0x39, 0x1f, 0xa0, 0x11, 0x0c, 0x3f, 0xf5, 0x53, 0xc9, 0x30, 0xfb,
+ 0xb0, 0xdd, 0x21, 0x1d, 0x34, 0xe2, 0x65, 0x30, 0xf1, 0xe8, 0x1b, 0xe7,
+ 0x55, 0x0d, 0xeb, 0xbd, 0xcc, 0x9d, 0x24, 0xa4, 0xad, 0xa7, 0x93, 0x47,
+ 0x19, 0x2e, 0xc4, 0x5c, 0x3b, 0xc7, 0x6d, 0x95, 0x0c, 0x47, 0x60, 0xaf,
+ 0x5b, 0x47, 0xee, 0xdc, 0x31, 0x31, 0x14, 0x12, 0x7e, 0x9e, 0x45, 0xb1,
+ 0xc1, 0x69, 0x4b, 0x84, 0xfc, 0x88, 0xc1, 0x9e, 0x46, 0xb4, 0xc2, 0x25,
+ 0xc5, 0x6c, 0x4c, 0x22, 0x58, 0x5c, 0xbe, 0xff, 0xea, 0x88, 0x88, 0x7a,
+ 0xcb, 0x1c, 0x5d, 0x63, 0xa1, 0xf2, 0x33, 0x0c, 0xa2, 0x16, 0x0b, 0x6e,
+ 0x2b, 0x79, 0x58, 0xf7, 0xac, 0xd3, 0x6a, 0x3f, 0x81, 0x57, 0x48, 0x45,
+ 0xe3, 0x7c, 0xdc, 0xd6, 0x34, 0x7e, 0xe6, 0x73, 0xfa, 0xcb, 0x31, 0x18,
+ 0xa9, 0x0b, 0xee, 0x6b, 0x99, 0xb9, 0x2d, 0xde, 0x22, 0x0e, 0x71, 0x57,
+ 0x0e, 0x9b, 0x11, 0xd1, 0x15, 0x41, 0xd0, 0x6b, 0x50, 0x8a, 0x23, 0x64,
+ 0xe3, 0x9c, 0xb3, 0x55, 0x09, 0xe9, 0x32, 0x67, 0xf9, 0xe0, 0x73, 0xf1,
+ 0x60, 0x66, 0x0b, 0x88, 0x79, 0x8d, 0x4b, 0x52, 0x83, 0x20, 0x26, 0x78,
+ 0x49, 0x27, 0xe7, 0x3e, 0x29, 0xa8, 0x18, 0x82, 0x41, 0xdd, 0x1e, 0xcc,
+ 0x3b, 0xc4, 0x65, 0xd1, 0x21, 0x40, 0x72, 0xb2, 0x87, 0x5e, 0x16, 0x10,
+ 0x80, 0x3f, 0x4b, 0x58, 0x1c, 0xc2, 0x79, 0x20, 0xf0, 0xe0, 0x80, 0xd3,
+ 0x52, 0xa5, 0x19, 0x6e, 0x47, 0x90, 0x08, 0xf5, 0x50, 0xe2, 0xd6, 0xae,
+ 0xe9, 0x2e, 0xdc, 0xd5, 0xb4, 0x90, 0x1f, 0x79, 0x49, 0x82, 0x21, 0x84,
+ 0xa0, 0xb5, 0x2f, 0xff, 0x30, 0x71, 0xed, 0x80, 0x68, 0xb1, 0x6d, 0xef,
+ 0xf6, 0xcf, 0xb8, 0x41, 0x79, 0xf5, 0x01, 0xbc, 0x0c, 0x9b, 0x0e, 0x06,
+ 0xf3, 0xb0, 0xbb, 0x97, 0xb8, 0xb1, 0xfd, 0x51, 0x4e, 0xef, 0x0a, 0x3d,
+ 0x7a, 0x3d, 0xbd, 0x61, 0x00, 0xa2, 0xb3, 0xf0, 0x1d, 0x77, 0x7b, 0x6c,
+ 0x01, 0x61, 0xa5, 0xa3, 0xdb, 0xd5, 0xd5, 0xf4, 0xb5, 0x28, 0x9f, 0x0a,
+ 0xa3, 0x82, 0x5f, 0x4b, 0x40, 0x0f, 0x05, 0x0e, 0x78, 0xed, 0xbf, 0x17,
+ 0xf6, 0x5a, 0x8a, 0x7d, 0xf9, 0x45, 0xc1, 0xd7, 0x1b, 0x9d, 0x6c, 0x07,
+ 0x88, 0xf3, 0xbc, 0xf1, 0xea, 0x28, 0x1f, 0xb8, 0x7a, 0x60, 0x3c, 0xce,
+ 0x3e, 0x50, 0xb2, 0x0b, 0xcf, 0xe5, 0x08, 0x1f, 0x48, 0x04, 0xf9, 0x35,
+ 0x29, 0x15, 0xbe, 0x82, 0x96, 0xc2, 0x55, 0x04, 0x6c, 0x19, 0x45, 0x29,
+ 0x0b, 0xb6, 0x49, 0x12, 0xfb, 0x8d, 0x1b, 0x75, 0x8b, 0xd9, 0x6a, 0x5c,
+ 0xbe, 0x46, 0x2b, 0x41, 0xfe, 0x21, 0xad, 0x1f, 0x75, 0xe7, 0x90, 0x3d,
+ 0xe1, 0xdf, 0x4b, 0xe1, 0x81, 0xe2, 0x17, 0x02, 0x7b, 0x58, 0x8b, 0x92,
+ 0x1a, 0xac, 0x46, 0xdd, 0x2e, 0xce, 0x40, 0x09
+};
+
+/* Values for a little endian CPU. Byte swap on big endian CPU. */
+static const u16 expected_results[] = {
+ 0x82d0, 0x8224, 0xab23, 0xaaad, 0x41ad, 0x413f, 0x4f3e, 0x4eab, 0x22ab,
+ 0x228c, 0x428b, 0x41ad, 0xbbac, 0xbb1d, 0x671d, 0x66ea, 0xd6e9, 0xd654,
+ 0x1754, 0x1655, 0x5d54, 0x5c6a, 0xfa69, 0xf9fb, 0x44fb, 0x4428, 0xf527,
+ 0xf432, 0x9432, 0x93e2, 0x37e2, 0x371b, 0x3d1a, 0x3cad, 0x22ad, 0x21e6,
+ 0x31e5, 0x3113, 0x0513, 0x0501, 0xc800, 0xc778, 0xe477, 0xe463, 0xc363,
+ 0xc2b2, 0x64b2, 0x646d, 0x336d, 0x32cb, 0xadca, 0xad94, 0x3794, 0x36da,
+ 0x5ed9, 0x5e2c, 0xa32b, 0xa28d, 0x598d, 0x58fe, 0x61fd, 0x612f, 0x772e,
+ 0x763f, 0xac3e, 0xac12, 0x8312, 0x821b, 0x6d1b, 0x6cbf, 0x4fbf, 0x4f72,
+ 0x4672, 0x4653, 0x6452, 0x643e, 0x333e, 0x32b2, 0x2bb2, 0x2b5b, 0x085b,
+ 0x083c, 0x993b, 0x9938, 0xb837, 0xb7a4, 0x9ea4, 0x9e51, 0x9b51, 0x9b0c,
+ 0x520c, 0x5172, 0x1672, 0x15e4, 0x09e4, 0x09d2, 0xacd1, 0xac47, 0xf446,
+ 0xf3ab, 0x67ab, 0x6711, 0x6411, 0x632c, 0xc12b, 0xc0e8, 0xeee7, 0xeeac,
+ 0xa0ac, 0xa02e, 0x702e, 0x6ff2, 0x4df2, 0x4dc5, 0x88c4, 0x87c8, 0xe9c7,
+ 0xe8ec, 0x22ec, 0x21f3, 0xb8f2, 0xb8e0, 0x7fe0, 0x7fc1, 0xdfc0, 0xdfaf,
+ 0xd3af, 0xd370, 0xde6f, 0xde1c, 0x151c, 0x14ec, 0x19eb, 0x193b, 0x3c3a,
+ 0x3c19, 0x1f19, 0x1ee5, 0x3ce4, 0x3c7f, 0x0c7f, 0x0b8e, 0x238d, 0x2372,
+ 0x3c71, 0x3c1c, 0x2f1c, 0x2e31, 0x7130, 0x7064, 0xd363, 0xd33f, 0x2f3f,
+ 0x2e92, 0x8791, 0x86fe, 0x3ffe, 0x3fe5, 0x11e5, 0x1121, 0xb520, 0xb4e5,
+ 0xede4, 0xed77, 0x5877, 0x586b, 0x116b, 0x110b, 0x620a, 0x61af, 0x1aaf,
+ 0x19c1, 0x3dc0, 0x3d8f, 0x0c8f, 0x0c7b, 0xfa7a, 0xf9fc, 0x5bfc, 0x5bb7,
+ 0xaab6, 0xa9f5, 0x40f5, 0x40aa, 0xbca9, 0xbbad, 0x33ad, 0x32ec, 0x94eb,
+ 0x94a5, 0xe0a4, 0xdfe2, 0xbae2, 0xba1d, 0x4e1d, 0x4dd1, 0x2bd1, 0x2b79,
+ 0xcf78, 0xceba, 0xcfb9, 0xcecf, 0x46cf, 0x4647, 0xcc46, 0xcb7b, 0xaf7b,
+ 0xaf1e, 0x4c1e, 0x4b7d, 0x597c, 0x5949, 0x4d49, 0x4ca7, 0x36a7, 0x369c,
+ 0xc89b, 0xc870, 0x4f70, 0x4f18, 0x5817, 0x576b, 0x846a, 0x8400, 0x4500,
+ 0x447f, 0xed7e, 0xed36, 0xa836, 0xa753, 0x2b53, 0x2a77, 0x5476, 0x5442,
+ 0xd641, 0xd55b, 0x625b, 0x6161, 0x9660, 0x962f, 0x7e2f, 0x7d86, 0x7286,
+ 0x7198, 0x0698, 0x05ff, 0x4cfe, 0x4cd1, 0x6ed0, 0x6eae, 0x60ae, 0x603d,
+ 0x093d, 0x092f, 0x6e2e, 0x6e1d, 0x9d1c, 0x9d07, 0x5c07, 0x5b37, 0xf036,
+ 0xefe6, 0x65e6, 0x65c3, 0x01c3, 0x00e0, 0x64df, 0x642c, 0x0f2c, 0x0f23,
+ 0x2622, 0x25f0, 0xbeef, 0xbdf6, 0xddf5, 0xdd82, 0xec81, 0xec21, 0x8621,
+ 0x8616, 0xfe15, 0xfd9c, 0x709c, 0x7051, 0x1e51, 0x1dce, 0xfdcd, 0xfda7,
+ 0x85a7, 0x855e, 0x5e5e, 0x5d77, 0x1f77, 0x1f4e, 0x774d, 0x7735, 0xf534,
+ 0xf4f3, 0x17f3, 0x17d5, 0x4bd4, 0x4b99, 0x8798, 0x8733, 0xb632, 0xb611,
+ 0x7611, 0x759f, 0xc39e, 0xc317, 0x6517, 0x6501, 0x5501, 0x5481, 0x1581,
+ 0x1536, 0xbd35, 0xbd19, 0xfb18, 0xfa9f, 0xda9f, 0xd9af, 0xf9ae, 0xf92e,
+ 0x262e, 0x25dc, 0x80db, 0x80c2, 0x12c2, 0x127b, 0x827a, 0x8272, 0x8d71,
+ 0x8d21, 0xab20, 0xaa4a, 0xfc49, 0xfb60, 0xcd60, 0xcc84, 0xf783, 0xf6cf,
+ 0x66cf, 0x66b0, 0xedaf, 0xed66, 0x6b66, 0x6b45, 0xe744, 0xe6a4, 0x31a4,
+ 0x3175, 0x3274, 0x3244, 0xc143, 0xc056, 0x4056, 0x3fee, 0x8eed, 0x8e80,
+ 0x9f7f, 0x9e89, 0xcf88, 0xced0, 0x8dd0, 0x8d57, 0x9856, 0x9855, 0xdc54,
+ 0xdc48, 0x4148, 0x413a, 0x3b3a, 0x3a47, 0x8a46, 0x898b, 0xf28a, 0xf1d2,
+ 0x40d2, 0x3fd5, 0xeed4, 0xee86, 0xff85, 0xff7b, 0xc27b, 0xc201, 0x8501,
+ 0x8444, 0x2344, 0x2344, 0x8143, 0x8090, 0x908f, 0x9072, 0x1972, 0x18f7,
+ 0xacf6, 0xacf5, 0x4bf5, 0x4b50, 0xa84f, 0xa774, 0xd273, 0xd19e, 0xdd9d,
+ 0xdce8, 0xb4e8, 0xb449, 0xaa49, 0xa9a6, 0x27a6, 0x2747, 0xdc46, 0xdc06,
+ 0xcd06, 0xcd01, 0xbf01, 0xbe89, 0xd188, 0xd0c9, 0xb9c9, 0xb8d3, 0x5ed3,
+ 0x5e49, 0xe148, 0xe04f, 0x9b4f, 0x9a8e, 0xc38d, 0xc372, 0x2672, 0x2606,
+ 0x1f06, 0x1e7e, 0x2b7d, 0x2ac1, 0x39c0, 0x38d6, 0x10d6, 0x10b7, 0x58b6,
+ 0x583c, 0xf83b, 0xf7ff, 0x29ff, 0x29c1, 0xd9c0, 0xd90e, 0xce0e, 0xcd3f,
+ 0xe83e, 0xe836, 0xc936, 0xc8ee, 0xc4ee, 0xc3f5, 0x8ef5, 0x8ecc, 0x79cc,
+ 0x790e, 0xf70d, 0xf677, 0x3477, 0x3422, 0x3022, 0x2fb6, 0x16b6, 0x1671,
+ 0xed70, 0xed65, 0x3765, 0x371c, 0x251c, 0x2421, 0x9720, 0x9705, 0x2205,
+ 0x217a, 0x4879, 0x480f, 0xec0e, 0xeb50, 0xa550, 0xa525, 0x6425, 0x6327,
+ 0x4227, 0x417a, 0x227a, 0x2205, 0x3b04, 0x3a74, 0xfd73, 0xfc92, 0x1d92,
+ 0x1d47, 0x3c46, 0x3bc5, 0x59c4, 0x59ad, 0x57ad, 0x5732, 0xff31, 0xfea6,
+ 0x6ca6, 0x6c8c, 0xc08b, 0xc045, 0xe344, 0xe316, 0x1516, 0x14d6,
+};
+
+/* Values for a little endian CPU. Byte swap each half on big endian CPU. */
+static const u32 init_sums_no_overflow[] = {
+ 0xffffffff, 0xfffffffb, 0xfffffbfb, 0xfffffbf7, 0xfffff7f7, 0xfffff7f3,
+ 0xfffff3f3, 0xfffff3ef, 0xffffefef, 0xffffefeb, 0xffffebeb, 0xffffebe7,
+ 0xffffe7e7, 0xffffe7e3, 0xffffe3e3, 0xffffe3df, 0xffffdfdf, 0xffffdfdb,
+ 0xffffdbdb, 0xffffdbd7, 0xffffd7d7, 0xffffd7d3, 0xffffd3d3, 0xffffd3cf,
+ 0xffffcfcf, 0xffffcfcb, 0xffffcbcb, 0xffffcbc7, 0xffffc7c7, 0xffffc7c3,
+ 0xffffc3c3, 0xffffc3bf, 0xffffbfbf, 0xffffbfbb, 0xffffbbbb, 0xffffbbb7,
+ 0xffffb7b7, 0xffffb7b3, 0xffffb3b3, 0xffffb3af, 0xffffafaf, 0xffffafab,
+ 0xffffabab, 0xffffaba7, 0xffffa7a7, 0xffffa7a3, 0xffffa3a3, 0xffffa39f,
+ 0xffff9f9f, 0xffff9f9b, 0xffff9b9b, 0xffff9b97, 0xffff9797, 0xffff9793,
+ 0xffff9393, 0xffff938f, 0xffff8f8f, 0xffff8f8b, 0xffff8b8b, 0xffff8b87,
+ 0xffff8787, 0xffff8783, 0xffff8383, 0xffff837f, 0xffff7f7f, 0xffff7f7b,
+ 0xffff7b7b, 0xffff7b77, 0xffff7777, 0xffff7773, 0xffff7373, 0xffff736f,
+ 0xffff6f6f, 0xffff6f6b, 0xffff6b6b, 0xffff6b67, 0xffff6767, 0xffff6763,
+ 0xffff6363, 0xffff635f, 0xffff5f5f, 0xffff5f5b, 0xffff5b5b, 0xffff5b57,
+ 0xffff5757, 0xffff5753, 0xffff5353, 0xffff534f, 0xffff4f4f, 0xffff4f4b,
+ 0xffff4b4b, 0xffff4b47, 0xffff4747, 0xffff4743, 0xffff4343, 0xffff433f,
+ 0xffff3f3f, 0xffff3f3b, 0xffff3b3b, 0xffff3b37, 0xffff3737, 0xffff3733,
+ 0xffff3333, 0xffff332f, 0xffff2f2f, 0xffff2f2b, 0xffff2b2b, 0xffff2b27,
+ 0xffff2727, 0xffff2723, 0xffff2323, 0xffff231f, 0xffff1f1f, 0xffff1f1b,
+ 0xffff1b1b, 0xffff1b17, 0xffff1717, 0xffff1713, 0xffff1313, 0xffff130f,
+ 0xffff0f0f, 0xffff0f0b, 0xffff0b0b, 0xffff0b07, 0xffff0707, 0xffff0703,
+ 0xffff0303, 0xffff02ff, 0xfffffefe, 0xfffffefa, 0xfffffafa, 0xfffffaf6,
+ 0xfffff6f6, 0xfffff6f2, 0xfffff2f2, 0xfffff2ee, 0xffffeeee, 0xffffeeea,
+ 0xffffeaea, 0xffffeae6, 0xffffe6e6, 0xffffe6e2, 0xffffe2e2, 0xffffe2de,
+ 0xffffdede, 0xffffdeda, 0xffffdada, 0xffffdad6, 0xffffd6d6, 0xffffd6d2,
+ 0xffffd2d2, 0xffffd2ce, 0xffffcece, 0xffffceca, 0xffffcaca, 0xffffcac6,
+ 0xffffc6c6, 0xffffc6c2, 0xffffc2c2, 0xffffc2be, 0xffffbebe, 0xffffbeba,
+ 0xffffbaba, 0xffffbab6, 0xffffb6b6, 0xffffb6b2, 0xffffb2b2, 0xffffb2ae,
+ 0xffffaeae, 0xffffaeaa, 0xffffaaaa, 0xffffaaa6, 0xffffa6a6, 0xffffa6a2,
+ 0xffffa2a2, 0xffffa29e, 0xffff9e9e, 0xffff9e9a, 0xffff9a9a, 0xffff9a96,
+ 0xffff9696, 0xffff9692, 0xffff9292, 0xffff928e, 0xffff8e8e, 0xffff8e8a,
+ 0xffff8a8a, 0xffff8a86, 0xffff8686, 0xffff8682, 0xffff8282, 0xffff827e,
+ 0xffff7e7e, 0xffff7e7a, 0xffff7a7a, 0xffff7a76, 0xffff7676, 0xffff7672,
+ 0xffff7272, 0xffff726e, 0xffff6e6e, 0xffff6e6a, 0xffff6a6a, 0xffff6a66,
+ 0xffff6666, 0xffff6662, 0xffff6262, 0xffff625e, 0xffff5e5e, 0xffff5e5a,
+ 0xffff5a5a, 0xffff5a56, 0xffff5656, 0xffff5652, 0xffff5252, 0xffff524e,
+ 0xffff4e4e, 0xffff4e4a, 0xffff4a4a, 0xffff4a46, 0xffff4646, 0xffff4642,
+ 0xffff4242, 0xffff423e, 0xffff3e3e, 0xffff3e3a, 0xffff3a3a, 0xffff3a36,
+ 0xffff3636, 0xffff3632, 0xffff3232, 0xffff322e, 0xffff2e2e, 0xffff2e2a,
+ 0xffff2a2a, 0xffff2a26, 0xffff2626, 0xffff2622, 0xffff2222, 0xffff221e,
+ 0xffff1e1e, 0xffff1e1a, 0xffff1a1a, 0xffff1a16, 0xffff1616, 0xffff1612,
+ 0xffff1212, 0xffff120e, 0xffff0e0e, 0xffff0e0a, 0xffff0a0a, 0xffff0a06,
+ 0xffff0606, 0xffff0602, 0xffff0202, 0xffff01fe, 0xfffffdfd, 0xfffffdf9,
+ 0xfffff9f9, 0xfffff9f5, 0xfffff5f5, 0xfffff5f1, 0xfffff1f1, 0xfffff1ed,
+ 0xffffeded, 0xffffede9, 0xffffe9e9, 0xffffe9e5, 0xffffe5e5, 0xffffe5e1,
+ 0xffffe1e1, 0xffffe1dd, 0xffffdddd, 0xffffddd9, 0xffffd9d9, 0xffffd9d5,
+ 0xffffd5d5, 0xffffd5d1, 0xffffd1d1, 0xffffd1cd, 0xffffcdcd, 0xffffcdc9,
+ 0xffffc9c9, 0xffffc9c5, 0xffffc5c5, 0xffffc5c1, 0xffffc1c1, 0xffffc1bd,
+ 0xffffbdbd, 0xffffbdb9, 0xffffb9b9, 0xffffb9b5, 0xffffb5b5, 0xffffb5b1,
+ 0xffffb1b1, 0xffffb1ad, 0xffffadad, 0xffffada9, 0xffffa9a9, 0xffffa9a5,
+ 0xffffa5a5, 0xffffa5a1, 0xffffa1a1, 0xffffa19d, 0xffff9d9d, 0xffff9d99,
+ 0xffff9999, 0xffff9995, 0xffff9595, 0xffff9591, 0xffff9191, 0xffff918d,
+ 0xffff8d8d, 0xffff8d89, 0xffff8989, 0xffff8985, 0xffff8585, 0xffff8581,
+ 0xffff8181, 0xffff817d, 0xffff7d7d, 0xffff7d79, 0xffff7979, 0xffff7975,
+ 0xffff7575, 0xffff7571, 0xffff7171, 0xffff716d, 0xffff6d6d, 0xffff6d69,
+ 0xffff6969, 0xffff6965, 0xffff6565, 0xffff6561, 0xffff6161, 0xffff615d,
+ 0xffff5d5d, 0xffff5d59, 0xffff5959, 0xffff5955, 0xffff5555, 0xffff5551,
+ 0xffff5151, 0xffff514d, 0xffff4d4d, 0xffff4d49, 0xffff4949, 0xffff4945,
+ 0xffff4545, 0xffff4541, 0xffff4141, 0xffff413d, 0xffff3d3d, 0xffff3d39,
+ 0xffff3939, 0xffff3935, 0xffff3535, 0xffff3531, 0xffff3131, 0xffff312d,
+ 0xffff2d2d, 0xffff2d29, 0xffff2929, 0xffff2925, 0xffff2525, 0xffff2521,
+ 0xffff2121, 0xffff211d, 0xffff1d1d, 0xffff1d19, 0xffff1919, 0xffff1915,
+ 0xffff1515, 0xffff1511, 0xffff1111, 0xffff110d, 0xffff0d0d, 0xffff0d09,
+ 0xffff0909, 0xffff0905, 0xffff0505, 0xffff0501, 0xffff0101, 0xffff00fd,
+ 0xfffffcfc, 0xfffffcf8, 0xfffff8f8, 0xfffff8f4, 0xfffff4f4, 0xfffff4f0,
+ 0xfffff0f0, 0xfffff0ec, 0xffffecec, 0xffffece8, 0xffffe8e8, 0xffffe8e4,
+ 0xffffe4e4, 0xffffe4e0, 0xffffe0e0, 0xffffe0dc, 0xffffdcdc, 0xffffdcd8,
+ 0xffffd8d8, 0xffffd8d4, 0xffffd4d4, 0xffffd4d0, 0xffffd0d0, 0xffffd0cc,
+ 0xffffcccc, 0xffffccc8, 0xffffc8c8, 0xffffc8c4, 0xffffc4c4, 0xffffc4c0,
+ 0xffffc0c0, 0xffffc0bc, 0xffffbcbc, 0xffffbcb8, 0xffffb8b8, 0xffffb8b4,
+ 0xffffb4b4, 0xffffb4b0, 0xffffb0b0, 0xffffb0ac, 0xffffacac, 0xffffaca8,
+ 0xffffa8a8, 0xffffa8a4, 0xffffa4a4, 0xffffa4a0, 0xffffa0a0, 0xffffa09c,
+ 0xffff9c9c, 0xffff9c98, 0xffff9898, 0xffff9894, 0xffff9494, 0xffff9490,
+ 0xffff9090, 0xffff908c, 0xffff8c8c, 0xffff8c88, 0xffff8888, 0xffff8884,
+ 0xffff8484, 0xffff8480, 0xffff8080, 0xffff807c, 0xffff7c7c, 0xffff7c78,
+ 0xffff7878, 0xffff7874, 0xffff7474, 0xffff7470, 0xffff7070, 0xffff706c,
+ 0xffff6c6c, 0xffff6c68, 0xffff6868, 0xffff6864, 0xffff6464, 0xffff6460,
+ 0xffff6060, 0xffff605c, 0xffff5c5c, 0xffff5c58, 0xffff5858, 0xffff5854,
+ 0xffff5454, 0xffff5450, 0xffff5050, 0xffff504c, 0xffff4c4c, 0xffff4c48,
+ 0xffff4848, 0xffff4844, 0xffff4444, 0xffff4440, 0xffff4040, 0xffff403c,
+ 0xffff3c3c, 0xffff3c38, 0xffff3838, 0xffff3834, 0xffff3434, 0xffff3430,
+ 0xffff3030, 0xffff302c, 0xffff2c2c, 0xffff2c28, 0xffff2828, 0xffff2824,
+ 0xffff2424, 0xffff2420, 0xffff2020, 0xffff201c, 0xffff1c1c, 0xffff1c18,
+ 0xffff1818, 0xffff1814, 0xffff1414, 0xffff1410, 0xffff1010, 0xffff100c,
+ 0xffff0c0c, 0xffff0c08, 0xffff0808, 0xffff0804, 0xffff0404, 0xffff0400,
+ 0xffff0000, 0xfffffffb,
+};
+
+static const u16 expected_csum_ipv6_magic[] = {
+ 0x18d4, 0x3085, 0x2e4b, 0xd9f4, 0xbdc8, 0x78f, 0x1034, 0x8422, 0x6fc0,
+ 0xd2f6, 0xbeb5, 0x9d3, 0x7e2a, 0x312e, 0x778e, 0xc1bb, 0x7cf2, 0x9d1e,
+ 0xca21, 0xf3ff, 0x7569, 0xb02e, 0xca86, 0x7e76, 0x4539, 0x45e3, 0xf28d,
+ 0xdf81, 0x8fd5, 0x3b5d, 0x8324, 0xf471, 0x83be, 0x1daf, 0x8c46, 0xe682,
+ 0xd1fb, 0x6b2e, 0xe687, 0x2a33, 0x4833, 0x2d67, 0x660f, 0x2e79, 0xd65e,
+ 0x6b62, 0x6672, 0x5dbd, 0x8680, 0xbaa5, 0x2229, 0x2125, 0x2d01, 0x1cc0,
+ 0x6d36, 0x33c0, 0xee36, 0xd832, 0x9820, 0x8a31, 0x53c5, 0x2e2, 0xdb0e,
+ 0x49ed, 0x17a7, 0x77a0, 0xd72e, 0x3d72, 0x7dc8, 0x5b17, 0xf55d, 0xa4d9,
+ 0x1446, 0x5d56, 0x6b2e, 0x69a5, 0xadb6, 0xff2a, 0x92e, 0xe044, 0x3402,
+ 0xbb60, 0xec7f, 0xe7e6, 0x1986, 0x32f4, 0x8f8, 0x5e00, 0x47c6, 0x3059,
+ 0x3969, 0xe957, 0x4388, 0x2854, 0x3334, 0xea71, 0xa6de, 0x33f9, 0x83fc,
+ 0x37b4, 0x5531, 0x3404, 0x1010, 0xed30, 0x610a, 0xc95, 0x9aed, 0x6ff,
+ 0x5136, 0x2741, 0x660e, 0x8b80, 0xf71, 0xa263, 0x88af, 0x7a73, 0x3c37,
+ 0x1908, 0x6db5, 0x2e92, 0x1cd2, 0x70c8, 0xee16, 0xe80, 0xcd55, 0x6e6,
+ 0x6434, 0x127, 0x655d, 0x2ea0, 0xb4f4, 0xdc20, 0x5671, 0xe462, 0xe52b,
+ 0xdb44, 0x3589, 0xc48f, 0xe60b, 0xd2d2, 0x66ad, 0x498, 0x436, 0xb917,
+ 0xf0ca, 0x1a6e, 0x1cb7, 0xbf61, 0x2870, 0xc7e8, 0x5b30, 0xe4a5, 0x168,
+ 0xadfc, 0xd035, 0xe690, 0xe283, 0xfb27, 0xe4ad, 0xb1a5, 0xf2d5, 0xc4b6,
+ 0x8a30, 0xd7d5, 0x7df9, 0x91d5, 0x63ed, 0x2d21, 0x312b, 0xab19, 0xa632,
+ 0x8d2e, 0xef06, 0x57b9, 0xc373, 0xbd1f, 0xa41f, 0x8444, 0x9975, 0x90cb,
+ 0xc49c, 0xe965, 0x4eff, 0x5a, 0xef6d, 0xe81a, 0xe260, 0x853a, 0xff7a,
+ 0x99aa, 0xb06b, 0xee19, 0xcc2c, 0xf34c, 0x7c49, 0xdac3, 0xa71e, 0xc988,
+ 0x3845, 0x1014
+};
+
+static const u16 expected_fast_csum[] = {
+ 0xda83, 0x45da, 0x4f46, 0x4e4f, 0x34e, 0xe902, 0xa5e9, 0x87a5, 0x7187,
+ 0x5671, 0xf556, 0x6df5, 0x816d, 0x8f81, 0xbb8f, 0xfbba, 0x5afb, 0xbe5a,
+ 0xedbe, 0xabee, 0x6aac, 0xe6b, 0xea0d, 0x67ea, 0x7e68, 0x8a7e, 0x6f8a,
+ 0x3a70, 0x9f3a, 0xe89e, 0x75e8, 0x7976, 0xfa79, 0x2cfa, 0x3c2c, 0x463c,
+ 0x7146, 0x7a71, 0x547a, 0xfd53, 0x99fc, 0xb699, 0x92b6, 0xdb91, 0xe8da,
+ 0x5fe9, 0x1e60, 0xae1d, 0x39ae, 0xf439, 0xa1f4, 0xdda1, 0xede, 0x790f,
+ 0x579, 0x1206, 0x9012, 0x2490, 0xd224, 0x5cd2, 0xa65d, 0xca7, 0x220d,
+ 0xf922, 0xbf9, 0x920b, 0x1b92, 0x361c, 0x2e36, 0x4d2e, 0x24d, 0x2,
+ 0xcfff, 0x90cf, 0xa591, 0x93a5, 0x7993, 0x9579, 0xc894, 0x50c8, 0x5f50,
+ 0xd55e, 0xcad5, 0xf3c9, 0x8f4, 0x4409, 0x5043, 0x5b50, 0x55b, 0x2205,
+ 0x1e22, 0x801e, 0x3780, 0xe137, 0x7ee0, 0xf67d, 0x3cf6, 0xa53c, 0x2ea5,
+ 0x472e, 0x5147, 0xcf51, 0x1bcf, 0x951c, 0x1e95, 0xc71e, 0xe4c7, 0xc3e4,
+ 0x3dc3, 0xee3d, 0xa4ed, 0xf9a4, 0xcbf8, 0x75cb, 0xb375, 0x50b4, 0x3551,
+ 0xf835, 0x19f8, 0x8c1a, 0x538c, 0xad52, 0xa3ac, 0xb0a3, 0x5cb0, 0x6c5c,
+ 0x5b6c, 0xc05a, 0x92c0, 0x4792, 0xbe47, 0x53be, 0x1554, 0x5715, 0x4b57,
+ 0xe54a, 0x20e5, 0x21, 0xd500, 0xa1d4, 0xa8a1, 0x57a9, 0xca57, 0x5ca,
+ 0x1c06, 0x4f1c, 0xe24e, 0xd9e2, 0xf0d9, 0x4af1, 0x474b, 0x8146, 0xe81,
+ 0xfd0e, 0x84fd, 0x7c85, 0xba7c, 0x17ba, 0x4a17, 0x964a, 0xf595, 0xff5,
+ 0x5310, 0x3253, 0x6432, 0x4263, 0x2242, 0xe121, 0x32e1, 0xf632, 0xc5f5,
+ 0x21c6, 0x7d22, 0x8e7c, 0x418e, 0x5641, 0x3156, 0x7c31, 0x737c, 0x373,
+ 0x2503, 0xc22a, 0x3c2, 0x4a04, 0x8549, 0x5285, 0xa352, 0xe8a3, 0x6fe8,
+ 0x1a6f, 0x211a, 0xe021, 0x38e0, 0x7638, 0xf575, 0x9df5, 0x169e, 0xf116,
+ 0x23f1, 0xcd23, 0xece, 0x660f, 0x4866, 0x6a48, 0x716a, 0xee71, 0xa2ee,
+ 0xb8a2, 0x61b9, 0xa361, 0xf7a2, 0x26f7, 0x1127, 0x6611, 0xe065, 0x36e0,
+ 0x1837, 0x3018, 0x1c30, 0x721b, 0x3e71, 0xe43d, 0x99e4, 0x9e9a, 0xb79d,
+ 0xa9b7, 0xcaa, 0xeb0c, 0x4eb, 0x1305, 0x8813, 0xb687, 0xa9b6, 0xfba9,
+ 0xd7fb, 0xccd8, 0x2ecd, 0x652f, 0xae65, 0x3fae, 0x3a40, 0x563a, 0x7556,
+ 0x2776, 0x1228, 0xef12, 0xf9ee, 0xcef9, 0x56cf, 0xa956, 0x24a9, 0xba24,
+ 0x5fba, 0x665f, 0xf465, 0x8ff4, 0x6d8f, 0x346d, 0x5f34, 0x385f, 0xd137,
+ 0xb8d0, 0xacb8, 0x55ac, 0x7455, 0xe874, 0x89e8, 0xd189, 0xa0d1, 0xb2a0,
+ 0xb8b2, 0x36b8, 0x5636, 0xd355, 0x8d3, 0x1908, 0x2118, 0xc21, 0x990c,
+ 0x8b99, 0x158c, 0x7815, 0x9e78, 0x6f9e, 0x4470, 0x1d44, 0x341d, 0x2634,
+ 0x3f26, 0x793e, 0xc79, 0xcc0b, 0x26cc, 0xd126, 0x1fd1, 0xb41f, 0xb6b4,
+ 0x22b7, 0xa122, 0xa1, 0x7f01, 0x837e, 0x3b83, 0xaf3b, 0x6fae, 0x916f,
+ 0xb490, 0xffb3, 0xceff, 0x50cf, 0x7550, 0x7275, 0x1272, 0x2613, 0xaa26,
+ 0xd5aa, 0x7d5, 0x9607, 0x96, 0xb100, 0xf8b0, 0x4bf8, 0xdd4c, 0xeddd,
+ 0x98ed, 0x2599, 0x9325, 0xeb92, 0x8feb, 0xcc8f, 0x2acd, 0x392b, 0x3b39,
+ 0xcb3b, 0x6acb, 0xd46a, 0xb8d4, 0x6ab8, 0x106a, 0x2f10, 0x892f, 0x789,
+ 0xc806, 0x45c8, 0x7445, 0x3c74, 0x3a3c, 0xcf39, 0xd7ce, 0x58d8, 0x6e58,
+ 0x336e, 0x1034, 0xee10, 0xe9ed, 0xc2e9, 0x3fc2, 0xd53e, 0xd2d4, 0xead2,
+ 0x8fea, 0x2190, 0x1162, 0xbe11, 0x8cbe, 0x6d8c, 0xfb6c, 0x6dfb, 0xd36e,
+ 0x3ad3, 0xf3a, 0x870e, 0xc287, 0x53c3, 0xc54, 0x5b0c, 0x7d5a, 0x797d,
+ 0xec79, 0x5dec, 0x4d5e, 0x184e, 0xd618, 0x60d6, 0xb360, 0x98b3, 0xf298,
+ 0xb1f2, 0x69b1, 0xf969, 0xef9, 0xab0e, 0x21ab, 0xe321, 0x24e3, 0x8224,
+ 0x5481, 0x5954, 0x7a59, 0xff7a, 0x7dff, 0x1a7d, 0xa51a, 0x46a5, 0x6b47,
+ 0xe6b, 0x830e, 0xa083, 0xff9f, 0xd0ff, 0xffd0, 0xe6ff, 0x7de7, 0xc67d,
+ 0xd0c6, 0x61d1, 0x3a62, 0xc3b, 0x150c, 0x1715, 0x4517, 0x5345, 0x3954,
+ 0xdd39, 0xdadd, 0x32db, 0x6a33, 0xd169, 0x86d1, 0xb687, 0x3fb6, 0x883f,
+ 0xa487, 0x39a4, 0x2139, 0xbe20, 0xffbe, 0xedfe, 0x8ded, 0x368e, 0xc335,
+ 0x51c3, 0x9851, 0xf297, 0xd6f2, 0xb9d6, 0x95ba, 0x2096, 0xea1f, 0x76e9,
+ 0x4e76, 0xe04d, 0xd0df, 0x80d0, 0xa280, 0xfca2, 0x75fc, 0xef75, 0x32ef,
+ 0x6833, 0xdf68, 0xc4df, 0x76c4, 0xb77, 0xb10a, 0xbfb1, 0x58bf, 0x5258,
+ 0x4d52, 0x6c4d, 0x7e6c, 0xb67e, 0xccb5, 0x8ccc, 0xbe8c, 0xc8bd, 0x9ac8,
+ 0xa99b, 0x52a9, 0x2f53, 0xc30, 0x3e0c, 0xb83d, 0x83b7, 0x5383, 0x7e53,
+ 0x4f7e, 0xe24e, 0xb3e1, 0x8db3, 0x618e, 0xc861, 0xfcc8, 0x34fc, 0x9b35,
+ 0xaa9b, 0xb1aa, 0x5eb1, 0x395e, 0x8639, 0xd486, 0x8bd4, 0x558b, 0x2156,
+ 0xf721, 0x4ef6, 0x14f, 0x7301, 0xdd72, 0x49de, 0x894a, 0x9889, 0x8898,
+ 0x7788, 0x7b77, 0x637b, 0xb963, 0xabb9, 0x7cab, 0xc87b, 0x21c8, 0xcb21,
+ 0xdfca, 0xbfdf, 0xf2bf, 0x6af2, 0x626b, 0xb261, 0x3cb2, 0xc63c, 0xc9c6,
+ 0xc9c9, 0xb4c9, 0xf9b4, 0x91f9, 0x4091, 0x3a40, 0xcc39, 0xd1cb, 0x7ed1,
+ 0x537f, 0x6753, 0xa167, 0xba49, 0x88ba, 0x7789, 0x3877, 0xf037, 0xd3ef,
+ 0xb5d4, 0x55b6, 0xa555, 0xeca4, 0xa1ec, 0xb6a2, 0x7b7, 0x9507, 0xfd94,
+ 0x82fd, 0x5c83, 0x765c, 0x9676, 0x3f97, 0xda3f, 0x6fda, 0x646f, 0x3064,
+ 0x5e30, 0x655e, 0x6465, 0xcb64, 0xcdca, 0x4ccd, 0x3f4c, 0x243f, 0x6f24,
+ 0x656f, 0x6065, 0x3560, 0x3b36, 0xac3b, 0x4aac, 0x714a, 0x7e71, 0xda7e,
+ 0x7fda, 0xda7f, 0x6fda, 0xff6f, 0xc6ff, 0xedc6, 0xd4ed, 0x70d5, 0xeb70,
+ 0xa3eb, 0x80a3, 0xca80, 0x3fcb, 0x2540, 0xf825, 0x7ef8, 0xf87e, 0x73f8,
+ 0xb474, 0xb4b4, 0x92b5, 0x9293, 0x93, 0x3500, 0x7134, 0x9071, 0xfa8f,
+ 0x51fa, 0x1452, 0xba13, 0x7ab9, 0x957a, 0x8a95, 0x6e8a, 0x6d6e, 0x7c6d,
+ 0x447c, 0x9744, 0x4597, 0x8945, 0xef88, 0x8fee, 0x3190, 0x4831, 0x8447,
+ 0xa183, 0x1da1, 0xd41d, 0x2dd4, 0x4f2e, 0xc94e, 0xcbc9, 0xc9cb, 0x9ec9,
+ 0x319e, 0xd531, 0x20d5, 0x4021, 0xb23f, 0x29b2, 0xd828, 0xecd8, 0x5ded,
+ 0xfc5d, 0x4dfc, 0xd24d, 0x6bd2, 0x5f6b, 0xb35e, 0x7fb3, 0xee7e, 0x56ee,
+ 0xa657, 0x68a6, 0x8768, 0x7787, 0xb077, 0x4cb1, 0x764c, 0xb175, 0x7b1,
+ 0x3d07, 0x603d, 0x3560, 0x3e35, 0xb03d, 0xd6b0, 0xc8d6, 0xd8c8, 0x8bd8,
+ 0x3e8c, 0x303f, 0xd530, 0xf1d4, 0x42f1, 0xca42, 0xddca, 0x41dd, 0x3141,
+ 0x132, 0xe901, 0x8e9, 0xbe09, 0xe0bd, 0x2ce0, 0x862d, 0x3986, 0x9139,
+ 0x6d91, 0x6a6d, 0x8d6a, 0x1b8d, 0xac1b, 0xedab, 0x54ed, 0xc054, 0xcebf,
+ 0xc1ce, 0x5c2, 0x3805, 0x6038, 0x5960, 0xd359, 0xdd3, 0xbe0d, 0xafbd,
+ 0x6daf, 0x206d, 0x2c20, 0x862c, 0x8e86, 0xec8d, 0xa2ec, 0xa3a2, 0x51a3,
+ 0x8051, 0xfd7f, 0x91fd, 0xa292, 0xaf14, 0xeeae, 0x59ef, 0x535a, 0x8653,
+ 0x3986, 0x9539, 0xb895, 0xa0b8, 0x26a0, 0x2227, 0xc022, 0x77c0, 0xad77,
+ 0x46ad, 0xaa46, 0x60aa, 0x8560, 0x4785, 0xd747, 0x45d7, 0x2346, 0x5f23,
+ 0x25f, 0x1d02, 0x71d, 0x8206, 0xc82, 0x180c, 0x3018, 0x4b30, 0x4b,
+ 0x3001, 0x1230, 0x2d12, 0x8c2d, 0x148d, 0x4015, 0x5f3f, 0x3d5f, 0x6b3d,
+ 0x396b, 0x473a, 0xf746, 0x44f7, 0x8945, 0x3489, 0xcb34, 0x84ca, 0xd984,
+ 0xf0d9, 0xbcf0, 0x63bd, 0x3264, 0xf332, 0x45f3, 0x7346, 0x5673, 0xb056,
+ 0xd3b0, 0x4ad4, 0x184b, 0x7d18, 0x6c7d, 0xbb6c, 0xfeba, 0xe0fe, 0x10e1,
+ 0x5410, 0x2954, 0x9f28, 0x3a9f, 0x5a3a, 0xdb59, 0xbdc, 0xb40b, 0x1ab4,
+ 0x131b, 0x5d12, 0x6d5c, 0xe16c, 0xb0e0, 0x89b0, 0xba88, 0xbb, 0x3c01,
+ 0xe13b, 0x6fe1, 0x446f, 0xa344, 0x81a3, 0xfe81, 0xc7fd, 0x38c8, 0xb38,
+ 0x1a0b, 0x6d19, 0xf36c, 0x47f3, 0x6d48, 0xb76d, 0xd3b7, 0xd8d2, 0x52d9,
+ 0x4b53, 0xa54a, 0x34a5, 0xc534, 0x9bc4, 0xed9b, 0xbeed, 0x3ebe, 0x233e,
+ 0x9f22, 0x4a9f, 0x774b, 0x4577, 0xa545, 0x64a5, 0xb65, 0x870b, 0x487,
+ 0x9204, 0x5f91, 0xd55f, 0x35d5, 0x1a35, 0x71a, 0x7a07, 0x4e7a, 0xfc4e,
+ 0x1efc, 0x481f, 0x7448, 0xde74, 0xa7dd, 0x1ea7, 0xaa1e, 0xcfaa, 0xfbcf,
+ 0xedfb, 0x6eee, 0x386f, 0x4538, 0x6e45, 0xd96d, 0x11d9, 0x7912, 0x4b79,
+ 0x494b, 0x6049, 0xac5f, 0x65ac, 0x1366, 0x5913, 0xe458, 0x7ae4, 0x387a,
+ 0x3c38, 0xb03c, 0x76b0, 0x9376, 0xe193, 0x42e1, 0x7742, 0x6476, 0x3564,
+ 0x3c35, 0x6a3c, 0xcc69, 0x94cc, 0x5d95, 0xe5e, 0xee0d, 0x4ced, 0xce4c,
+ 0x52ce, 0xaa52, 0xdaaa, 0xe4da, 0x1de5, 0x4530, 0x5445, 0x3954, 0xb639,
+ 0x81b6, 0x7381, 0x1574, 0xc215, 0x10c2, 0x3f10, 0x6b3f, 0xe76b, 0x7be7,
+ 0xbc7b, 0xf7bb, 0x41f7, 0xcc41, 0x38cc, 0x4239, 0xa942, 0x4a9, 0xc504,
+ 0x7cc4, 0x437c, 0x6743, 0xea67, 0x8dea, 0xe88d, 0xd8e8, 0xdcd8, 0x17dd,
+ 0x5718, 0x958, 0xa609, 0x41a5, 0x5842, 0x159, 0x9f01, 0x269f, 0x5a26,
+ 0x405a, 0xc340, 0xb4c3, 0xd4b4, 0xf4d3, 0xf1f4, 0x39f2, 0xe439, 0x67e4,
+ 0x4168, 0xa441, 0xdda3, 0xdedd, 0x9df, 0xab0a, 0xa5ab, 0x9a6, 0xba09,
+ 0x9ab9, 0xad9a, 0x5ae, 0xe205, 0xece2, 0xecec, 0x14ed, 0xd614, 0x6bd5,
+ 0x916c, 0x3391, 0x6f33, 0x206f, 0x8020, 0x780, 0x7207, 0x2472, 0x8a23,
+ 0xb689, 0x3ab6, 0xf739, 0x97f6, 0xb097, 0xa4b0, 0xe6a4, 0x88e6, 0x2789,
+ 0xb28, 0x350b, 0x1f35, 0x431e, 0x1043, 0xc30f, 0x79c3, 0x379, 0x5703,
+ 0x3256, 0x4732, 0x7247, 0x9d72, 0x489d, 0xd348, 0xa4d3, 0x7ca4, 0xbf7b,
+ 0x45c0, 0x7b45, 0x337b, 0x4034, 0x843f, 0xd083, 0x35d0, 0x6335, 0x4d63,
+ 0xe14c, 0xcce0, 0xfecc, 0x35ff, 0x5636, 0xf856, 0xeef8, 0x2def, 0xfc2d,
+ 0x4fc, 0x6e04, 0xb66d, 0x78b6, 0xbb78, 0x3dbb, 0x9a3d, 0x839a, 0x9283,
+ 0x593, 0xd504, 0x23d5, 0x5424, 0xd054, 0x61d0, 0xdb61, 0x17db, 0x1f18,
+ 0x381f, 0x9e37, 0x679e, 0x1d68, 0x381d, 0x8038, 0x917f, 0x491, 0xbb04,
+ 0x23bb, 0x4124, 0xd41, 0xa30c, 0x8ba3, 0x8b8b, 0xc68b, 0xd2c6, 0xebd2,
+ 0x93eb, 0xbd93, 0x99bd, 0x1a99, 0xea19, 0x58ea, 0xcf58, 0x73cf, 0x1073,
+ 0x9e10, 0x139e, 0xea13, 0xcde9, 0x3ecd, 0x883f, 0xf89, 0x180f, 0x2a18,
+ 0x212a, 0xce20, 0x73ce, 0xf373, 0x60f3, 0xad60, 0x4093, 0x8e40, 0xb98e,
+ 0xbfb9, 0xf1bf, 0x8bf1, 0x5e8c, 0xe95e, 0x14e9, 0x4e14, 0x1c4e, 0x7f1c,
+ 0xe77e, 0x6fe7, 0xf26f, 0x13f2, 0x8b13, 0xda8a, 0x5fda, 0xea5f, 0x4eea,
+ 0xa84f, 0x88a8, 0x1f88, 0x2820, 0x9728, 0x5a97, 0x3f5b, 0xb23f, 0x70b2,
+ 0x2c70, 0x232d, 0xf623, 0x4f6, 0x905, 0x7509, 0xd675, 0x28d7, 0x9428,
+ 0x3794, 0xf036, 0x2bf0, 0xba2c, 0xedb9, 0xd7ed, 0x59d8, 0xed59, 0x4ed,
+ 0xe304, 0x18e3, 0x5c19, 0x3d5c, 0x753d, 0x6d75, 0x956d, 0x7f95, 0xc47f,
+ 0x83c4, 0xa84, 0x2e0a, 0x5f2e, 0xb95f, 0x77b9, 0x6d78, 0xf46d, 0x1bf4,
+ 0xed1b, 0xd6ed, 0xe0d6, 0x5e1, 0x3905, 0x5638, 0xa355, 0x99a2, 0xbe99,
+ 0xb4bd, 0x85b4, 0x2e86, 0x542e, 0x6654, 0xd765, 0x73d7, 0x3a74, 0x383a,
+ 0x2638, 0x7826, 0x7677, 0x9a76, 0x7e99, 0x2e7e, 0xea2d, 0xa6ea, 0x8a7,
+ 0x109, 0x3300, 0xad32, 0x5fad, 0x465f, 0x2f46, 0xc62f, 0xd4c5, 0xad5,
+ 0xcb0a, 0x4cb, 0xb004, 0x7baf, 0xe47b, 0x92e4, 0x8e92, 0x638e, 0x1763,
+ 0xc17, 0xf20b, 0x1ff2, 0x8920, 0x5889, 0xcb58, 0xf8cb, 0xcaf8, 0x84cb,
+ 0x9f84, 0x8a9f, 0x918a, 0x4991, 0x8249, 0xff81, 0x46ff, 0x5046, 0x5f50,
+ 0x725f, 0xf772, 0x8ef7, 0xe08f, 0xc1e0, 0x1fc2, 0x9e1f, 0x8b9d, 0x108b,
+ 0x411, 0x2b04, 0xb02a, 0x1fb0, 0x1020, 0x7a0f, 0x587a, 0x8958, 0xb188,
+ 0xb1b1, 0x49b2, 0xb949, 0x7ab9, 0x917a, 0xfc91, 0xe6fc, 0x47e7, 0xbc47,
+ 0x8fbb, 0xea8e, 0x34ea, 0x2635, 0x1726, 0x9616, 0xc196, 0xa6c1, 0xf3a6,
+ 0x11f3, 0x4811, 0x3e48, 0xeb3e, 0xf7ea, 0x1bf8, 0xdb1c, 0x8adb, 0xe18a,
+ 0x42e1, 0x9d42, 0x5d9c, 0x6e5d, 0x286e, 0x4928, 0x9a49, 0xb09c, 0xa6b0,
+ 0x2a7, 0xe702, 0xf5e6, 0x9af5, 0xf9b, 0x810f, 0x8080, 0x180, 0x1702,
+ 0x5117, 0xa650, 0x11a6, 0x1011, 0x550f, 0xd554, 0xbdd5, 0x6bbe, 0xc66b,
+ 0xfc7, 0x5510, 0x5555, 0x7655, 0x177, 0x2b02, 0x6f2a, 0xb70, 0x9f0b,
+ 0xcf9e, 0xf3cf, 0x3ff4, 0xcb40, 0x8ecb, 0x768e, 0x5277, 0x8652, 0x9186,
+ 0x9991, 0x5099, 0xd350, 0x93d3, 0x6d94, 0xe6d, 0x530e, 0x3153, 0xa531,
+ 0x64a5, 0x7964, 0x7c79, 0x467c, 0x1746, 0x3017, 0x3730, 0x538, 0x5,
+ 0x1e00, 0x5b1e, 0x955a, 0xae95, 0x3eaf, 0xff3e, 0xf8ff, 0xb2f9, 0xa1b3,
+ 0xb2a1, 0x5b2, 0xad05, 0x7cac, 0x2d7c, 0xd32c, 0x80d2, 0x7280, 0x8d72,
+ 0x1b8e, 0x831b, 0xac82, 0xfdac, 0xa7fd, 0x15a8, 0xd614, 0xe0d5, 0x7be0,
+ 0xb37b, 0x61b3, 0x9661, 0x9d95, 0xc79d, 0x83c7, 0xd883, 0xead7, 0xceb,
+ 0xf60c, 0xa9f5, 0x19a9, 0xa019, 0x8f9f, 0xd48f, 0x3ad5, 0x853a, 0x985,
+ 0x5309, 0x6f52, 0x1370, 0x6e13, 0xa96d, 0x98a9, 0x5198, 0x9f51, 0xb69f,
+ 0xa1b6, 0x2ea1, 0x672e, 0x2067, 0x6520, 0xaf65, 0x6eaf, 0x7e6f, 0xee7e,
+ 0x17ef, 0xa917, 0xcea8, 0x9ace, 0xff99, 0x5dff, 0xdf5d, 0x38df, 0xa39,
+ 0x1c0b, 0xe01b, 0x46e0, 0xcb46, 0x90cb, 0xba90, 0x4bb, 0x9104, 0x9d90,
+ 0xc89c, 0xf6c8, 0x6cf6, 0x886c, 0x1789, 0xbd17, 0x70bc, 0x7e71, 0x17e,
+ 0x1f01, 0xa01f, 0xbaa0, 0x14bb, 0xfc14, 0x7afb, 0xa07a, 0x3da0, 0xbf3d,
+ 0x48bf, 0x8c48, 0x968b, 0x9d96, 0xfd9d, 0x96fd, 0x9796, 0x6b97, 0xd16b,
+ 0xf4d1, 0x3bf4, 0x253c, 0x9125, 0x6691, 0xc166, 0x34c1, 0x5735, 0x1a57,
+ 0xdc19, 0x77db, 0x8577, 0x4a85, 0x824a, 0x9182, 0x7f91, 0xfd7f, 0xb4c3,
+ 0xb5b4, 0xb3b5, 0x7eb3, 0x617e, 0x4e61, 0xa4f, 0x530a, 0x3f52, 0xa33e,
+ 0x34a3, 0x9234, 0xf091, 0xf4f0, 0x1bf5, 0x311b, 0x9631, 0x6a96, 0x386b,
+ 0x1d39, 0xe91d, 0xe8e9, 0x69e8, 0x426a, 0xee42, 0x89ee, 0x368a, 0x2837,
+ 0x7428, 0x5974, 0x6159, 0x1d62, 0x7b1d, 0xf77a, 0x7bf7, 0x6b7c, 0x696c,
+ 0xf969, 0x4cf9, 0x714c, 0x4e71, 0x6b4e, 0x256c, 0x6e25, 0xe96d, 0x94e9,
+ 0x8f94, 0x3e8f, 0x343e, 0x4634, 0xb646, 0x97b5, 0x8997, 0xe8a, 0x900e,
+ 0x8090, 0xfd80, 0xa0fd, 0x16a1, 0xf416, 0xebf4, 0x95ec, 0x1196, 0x8911,
+ 0x3d89, 0xda3c, 0x9fd9, 0xd79f, 0x4bd7, 0x214c, 0x3021, 0x4f30, 0x994e,
+ 0x5c99, 0x6f5d, 0x326f, 0xab31, 0x6aab, 0xe969, 0x90e9, 0x1190, 0xff10,
+ 0xa2fe, 0xe0a2, 0x66e1, 0x4067, 0x9e3f, 0x2d9e, 0x712d, 0x8170, 0xd180,
+ 0xffd1, 0x25ff, 0x3826, 0x2538, 0x5f24, 0xc45e, 0x1cc4, 0xdf1c, 0x93df,
+ 0xc793, 0x80c7, 0x2380, 0xd223, 0x7ed2, 0xfc7e, 0x22fd, 0x7422, 0x1474,
+ 0xb714, 0x7db6, 0x857d, 0xa85, 0xa60a, 0x88a6, 0x4289, 0x7842, 0xc278,
+ 0xf7c2, 0xcdf7, 0x84cd, 0xae84, 0x8cae, 0xb98c, 0x1aba, 0x4d1a, 0x884c,
+ 0x4688, 0xcc46, 0xd8cb, 0x2bd9, 0xbe2b, 0xa2be, 0x72a2, 0xf772, 0xd2f6,
+ 0x75d2, 0xc075, 0xa3c0, 0x63a3, 0xae63, 0x8fae, 0x2a90, 0x5f2a, 0xef5f,
+ 0x5cef, 0xa05c, 0x89a0, 0x5e89, 0x6b5e, 0x736b, 0x773, 0x9d07, 0xe99c,
+ 0x27ea, 0x2028, 0xc20, 0x980b, 0x4797, 0x2848, 0x9828, 0xc197, 0x48c2,
+ 0x2449, 0x7024, 0x570, 0x3e05, 0xd3e, 0xf60c, 0xbbf5, 0x69bb, 0x3f6a,
+ 0x740, 0xf006, 0xe0ef, 0xbbe0, 0xadbb, 0x56ad, 0xcf56, 0xbfce, 0xa9bf,
+ 0x205b, 0x6920, 0xae69, 0x50ae, 0x2050, 0xf01f, 0x27f0, 0x9427, 0x8993,
+ 0x8689, 0x4087, 0x6e40, 0xb16e, 0xa1b1, 0xe8a1, 0x87e8, 0x6f88, 0xfe6f,
+ 0x4cfe, 0xe94d, 0xd5e9, 0x47d6, 0x3148, 0x5f31, 0xc35f, 0x13c4, 0xa413,
+ 0x5a5, 0x2405, 0xc223, 0x66c2, 0x3667, 0x5e37, 0x5f5e, 0x2f5f, 0x8c2f,
+ 0xe48c, 0xd0e4, 0x4d1, 0xd104, 0xe4d0, 0xcee4, 0xfcf, 0x480f, 0xa447,
+ 0x5ea4, 0xff5e, 0xbefe, 0x8dbe, 0x1d8e, 0x411d, 0x1841, 0x6918, 0x5469,
+ 0x1155, 0xc611, 0xaac6, 0x37ab, 0x2f37, 0xca2e, 0x87ca, 0xbd87, 0xabbd,
+ 0xb3ab, 0xcb4, 0xce0c, 0xfccd, 0xa5fd, 0x72a5, 0xf072, 0x83f0, 0xfe83,
+ 0x97fd, 0xc997, 0xb0c9, 0xadb0, 0xe6ac, 0x88e6, 0x1088, 0xbe10, 0x16be,
+ 0xa916, 0xa3a8, 0x46a3, 0x5447, 0xe953, 0x84e8, 0x2085, 0xa11f, 0xfa1,
+ 0xdd0f, 0xbedc, 0x5abe, 0x805a, 0xc97f, 0x6dc9, 0x826d, 0x4a82, 0x934a,
+ 0x5293, 0xd852, 0xd3d8, 0xadd3, 0xf4ad, 0xf3f4, 0xfcf3, 0xfefc, 0xcafe,
+ 0xb7ca, 0x3cb8, 0xa13c, 0x18a1, 0x1418, 0xea13, 0x91ea, 0xf891, 0x53f8,
+ 0xa254, 0xe9a2, 0x87ea, 0x4188, 0x1c41, 0xdc1b, 0xf5db, 0xcaf5, 0x45ca,
+ 0x6d45, 0x396d, 0xde39, 0x90dd, 0x1e91, 0x1e, 0x7b00, 0x6a7b, 0xa46a,
+ 0xc9a3, 0x9bc9, 0x389b, 0x1139, 0x5211, 0x1f52, 0xeb1f, 0xabeb, 0x48ab,
+ 0x9348, 0xb392, 0x17b3, 0x1618, 0x5b16, 0x175b, 0xdc17, 0xdedb, 0x1cdf,
+ 0xeb1c, 0xd1ea, 0x4ad2, 0xd4b, 0xc20c, 0x24c2, 0x7b25, 0x137b, 0x8b13,
+ 0x618b, 0xa061, 0xff9f, 0xfffe, 0x72ff, 0xf572, 0xe2f5, 0xcfe2, 0xd2cf,
+ 0x75d3, 0x6a76, 0xc469, 0x1ec4, 0xfc1d, 0x59fb, 0x455a, 0x7a45, 0xa479,
+ 0xb7a4
+};
+
+static u8 tmp_buf[TEST_BUFLEN];
+
+#define full_csum(buff, len, sum) csum_fold(csum_partial(buff, len, sum))
+
+#define CHECK_EQ(lhs, rhs) KUNIT_ASSERT_EQ(test, (__force u64)lhs, (__force u64)rhs)
+
+static __sum16 to_sum16(u16 x)
+{
+ return (__force __sum16)le16_to_cpu((__force __le16)x);
+}
+
+/* This function swaps the bytes inside each half of a __wsum */
+static __wsum to_wsum(u32 x)
+{
+ u16 hi = le16_to_cpu((__force __le16)(x >> 16));
+ u16 lo = le16_to_cpu((__force __le16)x);
+
+ return (__force __wsum)((hi << 16) | lo);
+}
+
+static void assert_setup_correct(struct kunit *test)
+{
+ CHECK_EQ(sizeof(random_buf) / sizeof(random_buf[0]), MAX_LEN);
+ CHECK_EQ(sizeof(expected_results) / sizeof(expected_results[0]),
+ MAX_LEN);
+ CHECK_EQ(sizeof(init_sums_no_overflow) /
+ sizeof(init_sums_no_overflow[0]),
+ MAX_LEN);
+}
+
+/*
+ * Test with randomized input (pre determined random with known results).
+ */
+static void test_csum_fixed_random_inputs(struct kunit *test)
+{
+ int len, align;
+ __wsum sum;
+ __sum16 result, expec;
+
+ assert_setup_correct(test);
+ for (align = 0; align < TEST_BUFLEN; ++align) {
+ memcpy(&tmp_buf[align], random_buf,
+ min(MAX_LEN, TEST_BUFLEN - align));
+ for (len = 0; len < MAX_LEN && (align + len) < TEST_BUFLEN;
+ ++len) {
+ /*
+ * Test the precomputed random input.
+ */
+ sum = to_wsum(random_init_sum);
+ result = full_csum(&tmp_buf[align], len, sum);
+ expec = to_sum16(expected_results[len]);
+ CHECK_EQ(result, expec);
+ }
+ }
+}
+
+/*
+ * All ones input test. If there are any missing carry operations, it fails.
+ */
+static void test_csum_all_carry_inputs(struct kunit *test)
+{
+ int len, align;
+ __wsum sum;
+ __sum16 result, expec;
+
+ assert_setup_correct(test);
+ memset(tmp_buf, 0xff, TEST_BUFLEN);
+ for (align = 0; align < TEST_BUFLEN; ++align) {
+ for (len = 0; len < MAX_LEN && (align + len) < TEST_BUFLEN;
+ ++len) {
+ /*
+ * All carries from input and initial sum.
+ */
+ sum = to_wsum(0xffffffff);
+ result = full_csum(&tmp_buf[align], len, sum);
+ expec = to_sum16((len & 1) ? 0xff00 : 0);
+ CHECK_EQ(result, expec);
+
+ /*
+ * All carries from input.
+ */
+ sum = 0;
+ result = full_csum(&tmp_buf[align], len, sum);
+ if (len & 1)
+ expec = to_sum16(0xff00);
+ else if (len)
+ expec = 0;
+ else
+ expec = to_sum16(0xffff);
+ CHECK_EQ(result, expec);
+ }
+ }
+}
+
+/*
+ * Test with input that alone doesn't cause any carries. By selecting the
+ * maximum initial sum, this allows us to test that there are no carries
+ * where there shouldn't be.
+ */
+static void test_csum_no_carry_inputs(struct kunit *test)
+{
+ int len, align;
+ __wsum sum;
+ __sum16 result, expec;
+
+ assert_setup_correct(test);
+ memset(tmp_buf, 0x4, TEST_BUFLEN);
+ for (align = 0; align < TEST_BUFLEN; ++align) {
+ for (len = 0; len < MAX_LEN && (align + len) < TEST_BUFLEN;
+ ++len) {
+ /*
+ * Expect no carries.
+ */
+ sum = to_wsum(init_sums_no_overflow[len]);
+ result = full_csum(&tmp_buf[align], len, sum);
+ expec = 0;
+ CHECK_EQ(result, expec);
+
+ /*
+ * Expect one carry.
+ */
+ sum = to_wsum(init_sums_no_overflow[len] + 1);
+ result = full_csum(&tmp_buf[align], len, sum);
+ expec = to_sum16(len ? 0xfffe : 0xffff);
+ CHECK_EQ(result, expec);
+ }
+ }
+}
+
+static void test_ip_fast_csum(struct kunit *test)
+{
+ __sum16 csum_result;
+ u16 expected;
+
+ for (int len = IPv4_MIN_WORDS; len < IPv4_MAX_WORDS; len++) {
+ for (int index = 0; index < NUM_IP_FAST_CSUM_TESTS; index++) {
+ csum_result = ip_fast_csum(random_buf + index, len);
+ expected =
+ expected_fast_csum[(len - IPv4_MIN_WORDS) *
+ NUM_IP_FAST_CSUM_TESTS +
+ index];
+ CHECK_EQ(to_sum16(expected), csum_result);
+ }
+ }
+}
+
+static void test_csum_ipv6_magic(struct kunit *test)
+{
+#if defined(CONFIG_NET)
+ const struct in6_addr *saddr;
+ const struct in6_addr *daddr;
+ unsigned int len;
+ unsigned char proto;
+ __wsum csum;
+
+ const int daddr_offset = sizeof(struct in6_addr);
+ const int len_offset = sizeof(struct in6_addr) + sizeof(struct in6_addr);
+ const int proto_offset = sizeof(struct in6_addr) + sizeof(struct in6_addr) +
+ sizeof(int);
+ const int csum_offset = sizeof(struct in6_addr) + sizeof(struct in6_addr) +
+ sizeof(int) + sizeof(char);
+
+ for (int i = 0; i < NUM_IPv6_TESTS; i++) {
+ saddr = (const struct in6_addr *)(random_buf + i);
+ daddr = (const struct in6_addr *)(random_buf + i +
+ daddr_offset);
+ len = le32_to_cpu(*(__le32 *)(random_buf + i + len_offset));
+ proto = *(random_buf + i + proto_offset);
+ csum = *(__wsum *)(random_buf + i + csum_offset);
+ CHECK_EQ(to_sum16(expected_csum_ipv6_magic[i]),
+ csum_ipv6_magic(saddr, daddr, len, proto, csum));
+ }
+#endif /* !CONFIG_NET */
+}
+
+static struct kunit_case __refdata checksum_test_cases[] = {
+ KUNIT_CASE(test_csum_fixed_random_inputs),
+ KUNIT_CASE(test_csum_all_carry_inputs),
+ KUNIT_CASE(test_csum_no_carry_inputs),
+ KUNIT_CASE(test_ip_fast_csum),
+ KUNIT_CASE(test_csum_ipv6_magic),
+ {}
+};
+
+static struct kunit_suite checksum_test_suite = {
+ .name = "checksum",
+ .test_cases = checksum_test_cases,
+};
+
+kunit_test_suites(&checksum_test_suite);
+
+MODULE_AUTHOR("Noah Goldstein <goldstein.w.n@gmail.com>");
+MODULE_LICENSE("GPL");
diff --git a/lib/closure.c b/lib/closure.c
new file mode 100644
index 000000000000..c16540552d61
--- /dev/null
+++ b/lib/closure.c
@@ -0,0 +1,211 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Asynchronous refcounty things
+ *
+ * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
+ * Copyright 2012 Google, Inc.
+ */
+
+#include <linux/closure.h>
+#include <linux/debugfs.h>
+#include <linux/export.h>
+#include <linux/rcupdate.h>
+#include <linux/seq_file.h>
+#include <linux/sched/debug.h>
+
+static inline void closure_put_after_sub(struct closure *cl, int flags)
+{
+ int r = flags & CLOSURE_REMAINING_MASK;
+
+ BUG_ON(flags & CLOSURE_GUARD_MASK);
+ BUG_ON(!r && (flags & ~CLOSURE_DESTRUCTOR));
+
+ if (!r) {
+ smp_acquire__after_ctrl_dep();
+
+ cl->closure_get_happened = false;
+
+ if (cl->fn && !(flags & CLOSURE_DESTRUCTOR)) {
+ atomic_set(&cl->remaining,
+ CLOSURE_REMAINING_INITIALIZER);
+ closure_queue(cl);
+ } else {
+ struct closure *parent = cl->parent;
+ closure_fn *destructor = cl->fn;
+
+ closure_debug_destroy(cl);
+
+ if (destructor)
+ destructor(&cl->work);
+
+ if (parent)
+ closure_put(parent);
+ }
+ }
+}
+
+/* For clearing flags with the same atomic op as a put */
+void closure_sub(struct closure *cl, int v)
+{
+ closure_put_after_sub(cl, atomic_sub_return_release(v, &cl->remaining));
+}
+EXPORT_SYMBOL(closure_sub);
+
+/*
+ * closure_put - decrement a closure's refcount
+ */
+void closure_put(struct closure *cl)
+{
+ closure_put_after_sub(cl, atomic_dec_return_release(&cl->remaining));
+}
+EXPORT_SYMBOL(closure_put);
+
+/*
+ * closure_wake_up - wake up all closures on a wait list, without memory barrier
+ */
+void __closure_wake_up(struct closure_waitlist *wait_list)
+{
+ struct llist_node *list;
+ struct closure *cl, *t;
+ struct llist_node *reverse = NULL;
+
+ list = llist_del_all(&wait_list->list);
+
+ /* We first reverse the list to preserve FIFO ordering and fairness */
+ reverse = llist_reverse_order(list);
+
+ /* Then do the wakeups */
+ llist_for_each_entry_safe(cl, t, reverse, list) {
+ closure_set_waiting(cl, 0);
+ closure_sub(cl, CLOSURE_WAITING + 1);
+ }
+}
+EXPORT_SYMBOL(__closure_wake_up);
+
+/**
+ * closure_wait - add a closure to a waitlist
+ * @waitlist: will own a ref on @cl, which will be released when
+ * closure_wake_up() is called on @waitlist.
+ * @cl: closure pointer.
+ *
+ */
+bool closure_wait(struct closure_waitlist *waitlist, struct closure *cl)
+{
+ if (atomic_read(&cl->remaining) & CLOSURE_WAITING)
+ return false;
+
+ cl->closure_get_happened = true;
+ closure_set_waiting(cl, _RET_IP_);
+ atomic_add(CLOSURE_WAITING + 1, &cl->remaining);
+ llist_add(&cl->list, &waitlist->list);
+
+ return true;
+}
+EXPORT_SYMBOL(closure_wait);
+
+struct closure_syncer {
+ struct task_struct *task;
+ int done;
+};
+
+static CLOSURE_CALLBACK(closure_sync_fn)
+{
+ struct closure *cl = container_of(ws, struct closure, work);
+ struct closure_syncer *s = cl->s;
+ struct task_struct *p;
+
+ rcu_read_lock();
+ p = READ_ONCE(s->task);
+ s->done = 1;
+ wake_up_process(p);
+ rcu_read_unlock();
+}
+
+void __sched __closure_sync(struct closure *cl)
+{
+ struct closure_syncer s = { .task = current };
+
+ cl->s = &s;
+ continue_at(cl, closure_sync_fn, NULL);
+
+ while (1) {
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ if (s.done)
+ break;
+ schedule();
+ }
+
+ __set_current_state(TASK_RUNNING);
+}
+EXPORT_SYMBOL(__closure_sync);
+
+#ifdef CONFIG_DEBUG_CLOSURES
+
+static LIST_HEAD(closure_list);
+static DEFINE_SPINLOCK(closure_list_lock);
+
+void closure_debug_create(struct closure *cl)
+{
+ unsigned long flags;
+
+ BUG_ON(cl->magic == CLOSURE_MAGIC_ALIVE);
+ cl->magic = CLOSURE_MAGIC_ALIVE;
+
+ spin_lock_irqsave(&closure_list_lock, flags);
+ list_add(&cl->all, &closure_list);
+ spin_unlock_irqrestore(&closure_list_lock, flags);
+}
+EXPORT_SYMBOL(closure_debug_create);
+
+void closure_debug_destroy(struct closure *cl)
+{
+ unsigned long flags;
+
+ BUG_ON(cl->magic != CLOSURE_MAGIC_ALIVE);
+ cl->magic = CLOSURE_MAGIC_DEAD;
+
+ spin_lock_irqsave(&closure_list_lock, flags);
+ list_del(&cl->all);
+ spin_unlock_irqrestore(&closure_list_lock, flags);
+}
+EXPORT_SYMBOL(closure_debug_destroy);
+
+static int debug_show(struct seq_file *f, void *data)
+{
+ struct closure *cl;
+
+ spin_lock_irq(&closure_list_lock);
+
+ list_for_each_entry(cl, &closure_list, all) {
+ int r = atomic_read(&cl->remaining);
+
+ seq_printf(f, "%p: %pS -> %pS p %p r %i ",
+ cl, (void *) cl->ip, cl->fn, cl->parent,
+ r & CLOSURE_REMAINING_MASK);
+
+ seq_printf(f, "%s%s\n",
+ test_bit(WORK_STRUCT_PENDING_BIT,
+ work_data_bits(&cl->work)) ? "Q" : "",
+ r & CLOSURE_RUNNING ? "R" : "");
+
+ if (r & CLOSURE_WAITING)
+ seq_printf(f, " W %pS\n",
+ (void *) cl->waiting_on);
+
+ seq_puts(f, "\n");
+ }
+
+ spin_unlock_irq(&closure_list_lock);
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(debug);
+
+static int __init closure_debug_init(void)
+{
+ debugfs_create_file("closures", 0400, NULL, NULL, &debug_fops);
+ return 0;
+}
+late_initcall(closure_debug_init)
+
+#endif
diff --git a/lib/clz_ctz.c b/lib/clz_ctz.c
index 0d3a686b5ba2..fb8c0c5c2bd2 100644
--- a/lib/clz_ctz.c
+++ b/lib/clz_ctz.c
@@ -28,36 +28,16 @@ int __weak __clzsi2(int val)
}
EXPORT_SYMBOL(__clzsi2);
-int __weak __clzdi2(long val);
-int __weak __ctzdi2(long val);
-#if BITS_PER_LONG == 32
-
-int __weak __clzdi2(long val)
+int __weak __clzdi2(u64 val);
+int __weak __clzdi2(u64 val)
{
- return 32 - fls((int)val);
+ return 64 - fls64(val);
}
EXPORT_SYMBOL(__clzdi2);
-int __weak __ctzdi2(long val)
+int __weak __ctzdi2(u64 val);
+int __weak __ctzdi2(u64 val)
{
- return __ffs((u32)val);
+ return __ffs64(val);
}
EXPORT_SYMBOL(__ctzdi2);
-
-#elif BITS_PER_LONG == 64
-
-int __weak __clzdi2(long val)
-{
- return 64 - fls64((u64)val);
-}
-EXPORT_SYMBOL(__clzdi2);
-
-int __weak __ctzdi2(long val)
-{
- return __ffs64((u64)val);
-}
-EXPORT_SYMBOL(__ctzdi2);
-
-#else
-#error BITS_PER_LONG not 32 or 64
-#endif
diff --git a/lib/cmdline.c b/lib/cmdline.c
index fbb9981a04a4..90ed997d9570 100644
--- a/lib/cmdline.c
+++ b/lib/cmdline.c
@@ -35,25 +35,37 @@ static int get_range(char **str, int *pint, int n)
/**
* get_option - Parse integer from an option string
* @str: option string
- * @pint: (output) integer value parsed from @str
+ * @pint: (optional output) integer value parsed from @str
*
* Read an int from an option string; if available accept a subsequent
* comma as well.
*
+ * When @pint is NULL the function can be used as a validator of
+ * the current option in the string.
+ *
* Return values:
* 0 - no int in string
* 1 - int found, no subsequent comma
* 2 - int found including a subsequent comma
* 3 - hyphen found to denote a range
+ *
+ * Leading hyphen without integer is no integer case, but we consume it
+ * for the sake of simplification.
*/
int get_option(char **str, int *pint)
{
char *cur = *str;
+ int value;
if (!cur || !(*cur))
return 0;
- *pint = simple_strtol(cur, str, 0);
+ if (*cur == '-')
+ value = -simple_strtoull(++cur, str, 0);
+ else
+ value = simple_strtoull(cur, str, 0);
+ if (pint)
+ *pint = value;
if (cur == *str)
return 0;
if (**str == ',') {
@@ -71,7 +83,7 @@ EXPORT_SYMBOL(get_option);
* get_options - Parse a string into a list of integers
* @str: String to be parsed
* @nints: size of integer array
- * @ints: integer array
+ * @ints: integer array (must have room for at least one element)
*
* This function parses a string containing a comma-separated
* list of integers, a hyphen-separated range of _positive_ integers,
@@ -79,6 +91,14 @@ EXPORT_SYMBOL(get_option);
* full, or when no more numbers can be retrieved from the
* string.
*
+ * When @nints is 0, the function just validates the given @str and
+ * returns the amount of parseable integers as described below.
+ *
+ * Returns:
+ *
+ * The first element is filled by the number of collected integers
+ * in the range. The rest is what was parsed from the @str.
+ *
* Return value is the character in the string which caused
* the parse to end (typically a null terminator, if @str is
* completely parseable).
@@ -86,15 +106,20 @@ EXPORT_SYMBOL(get_option);
char *get_options(const char *str, int nints, int *ints)
{
+ bool validate = (nints == 0);
int res, i = 1;
- while (i < nints) {
- res = get_option((char **)&str, ints + i);
+ while (i < nints || validate) {
+ int *pint = validate ? ints : ints + i;
+
+ res = get_option((char **)&str, pint);
if (res == 0)
break;
if (res == 3) {
+ int n = validate ? 0 : nints - i;
int range_nums;
- range_nums = get_range((char **)&str, ints + i, nints - i);
+
+ range_nums = get_range((char **)&str, pint, n);
if (range_nums < 0)
break;
/*
@@ -132,27 +157,28 @@ unsigned long long memparse(const char *ptr, char **retptr)
case 'E':
case 'e':
ret <<= 10;
- /* fall through */
+ fallthrough;
case 'P':
case 'p':
ret <<= 10;
- /* fall through */
+ fallthrough;
case 'T':
case 't':
ret <<= 10;
- /* fall through */
+ fallthrough;
case 'G':
case 'g':
ret <<= 10;
- /* fall through */
+ fallthrough;
case 'M':
case 'm':
ret <<= 10;
- /* fall through */
+ fallthrough;
case 'K':
case 'k':
ret <<= 10;
endptr++;
+ fallthrough;
default:
break;
}
@@ -202,7 +228,6 @@ char *next_arg(char *args, char **param, char **val)
{
unsigned int i, equals = 0;
int in_quote = 0, quoted = 0;
- char *next;
if (*args == '"') {
args++;
@@ -235,15 +260,16 @@ char *next_arg(char *args, char **param, char **val)
args[i-1] = '\0';
}
}
- if (quoted && args[i-1] == '"')
+ if (quoted && i > 0 && args[i-1] == '"')
args[i-1] = '\0';
if (args[i]) {
args[i] = '\0';
- next = args + i + 1;
+ args += i + 1;
} else
- next = args + i;
+ args += i;
/* Chew up trailing spaces. */
- return skip_spaces(next);
+ return skip_spaces(args);
}
+EXPORT_SYMBOL(next_arg);
diff --git a/lib/cmdline_kunit.c b/lib/cmdline_kunit.c
new file mode 100644
index 000000000000..d4572dbc9145
--- /dev/null
+++ b/lib/cmdline_kunit.c
@@ -0,0 +1,156 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Test cases for API provided by cmdline.c
+ */
+
+#include <kunit/test.h>
+#include <linux/kernel.h>
+#include <linux/random.h>
+#include <linux/string.h>
+
+static const char *cmdline_test_strings[] = {
+ "\"\"", "" , "=" , "\"-", "," , "-," , ",-" , "-" ,
+ "+," , "--", ",,", "''" , "\"\",", "\",\"", "-\"\"", "\"",
+};
+
+static const int cmdline_test_values[] = {
+ 1, 1, 1, 1, 2, 3, 2, 3,
+ 1, 3, 2, 1, 1, 1, 3, 1,
+};
+
+static_assert(ARRAY_SIZE(cmdline_test_strings) == ARRAY_SIZE(cmdline_test_values));
+
+static const char *cmdline_test_range_strings[] = {
+ "-7" , "--7" , "-1-2" , "7--9",
+ "7-" , "-7--9", "7-9," , "9-7" ,
+ "5-a", "a-5" , "5-8" , ",8-5",
+ "+,1", "-,4" , "-3,0-1,6", "4,-" ,
+ " +2", " -9" , "0-1,-3,6", "- 9" ,
+};
+
+static const int cmdline_test_range_values[][16] = {
+ { 1, -7, }, { 0, -0, }, { 4, -1, 0, +1, 2, }, { 0, 7, },
+ { 0, +7, }, { 0, -7, }, { 3, +7, 8, +9, 0, }, { 0, 9, },
+ { 0, +5, }, { 0, -0, }, { 4, +5, 6, +7, 8, }, { 0, 0, },
+ { 0, +0, }, { 0, -0, }, { 4, -3, 0, +1, 6, }, { 1, 4, },
+ { 0, +0, }, { 0, -0, }, { 4, +0, 1, -3, 6, }, { 0, 0, },
+};
+
+static_assert(ARRAY_SIZE(cmdline_test_range_strings) == ARRAY_SIZE(cmdline_test_range_values));
+
+static void cmdline_do_one_test(struct kunit *test, const char *in, int rc, int offset)
+{
+ const char *fmt = "Pattern: %s";
+ const char *out = in;
+ int dummy;
+ int ret;
+
+ ret = get_option((char **)&out, &dummy);
+
+ KUNIT_EXPECT_EQ_MSG(test, ret, rc, fmt, in);
+ KUNIT_EXPECT_PTR_EQ_MSG(test, out, in + offset, fmt, in);
+}
+
+static void cmdline_test_noint(struct kunit *test)
+{
+ unsigned int i = 0;
+
+ do {
+ const char *str = cmdline_test_strings[i];
+ int rc = 0;
+ int offset;
+
+ /* Only first and leading '-' will advance the pointer */
+ offset = !!(*str == '-');
+ cmdline_do_one_test(test, str, rc, offset);
+ } while (++i < ARRAY_SIZE(cmdline_test_strings));
+}
+
+static void cmdline_test_lead_int(struct kunit *test)
+{
+ unsigned int i = 0;
+ char in[32];
+
+ do {
+ const char *str = cmdline_test_strings[i];
+ int rc = cmdline_test_values[i];
+ int offset;
+
+ sprintf(in, "%u%s", get_random_u8(), str);
+ /* Only first '-' after the number will advance the pointer */
+ offset = strlen(in) - strlen(str) + !!(rc == 2);
+ cmdline_do_one_test(test, in, rc, offset);
+ } while (++i < ARRAY_SIZE(cmdline_test_strings));
+}
+
+static void cmdline_test_tail_int(struct kunit *test)
+{
+ unsigned int i = 0;
+ char in[32];
+
+ do {
+ const char *str = cmdline_test_strings[i];
+ /* When "" or "-" the result will be valid integer */
+ int rc = strcmp(str, "") ? (strcmp(str, "-") ? 0 : 1) : 1;
+ int offset;
+
+ sprintf(in, "%s%u", str, get_random_u8());
+ /*
+ * Only first and leading '-' not followed by integer
+ * will advance the pointer.
+ */
+ offset = rc ? strlen(in) : !!(*str == '-');
+ cmdline_do_one_test(test, in, rc, offset);
+ } while (++i < ARRAY_SIZE(cmdline_test_strings));
+}
+
+static void cmdline_do_one_range_test(struct kunit *test, const char *in,
+ unsigned int n, const int *e)
+{
+ unsigned int i;
+ int r[16];
+ int *p;
+
+ memset(r, 0, sizeof(r));
+ get_options(in, ARRAY_SIZE(r), r);
+ KUNIT_EXPECT_EQ_MSG(test, r[0], e[0], "in test %u (parsed) expected %d numbers, got %d",
+ n, e[0], r[0]);
+ for (i = 1; i < ARRAY_SIZE(r); i++)
+ KUNIT_EXPECT_EQ_MSG(test, r[i], e[i], "in test %u at %u", n, i);
+
+ memset(r, 0, sizeof(r));
+ get_options(in, 0, r);
+ KUNIT_EXPECT_EQ_MSG(test, r[0], e[0], "in test %u (validated) expected %d numbers, got %d",
+ n, e[0], r[0]);
+
+ p = memchr_inv(&r[1], 0, sizeof(r) - sizeof(r[0]));
+ KUNIT_EXPECT_PTR_EQ_MSG(test, p, NULL, "in test %u at %u out of bound", n, p - r);
+}
+
+static void cmdline_test_range(struct kunit *test)
+{
+ unsigned int i = 0;
+
+ do {
+ const char *str = cmdline_test_range_strings[i];
+ const int *e = cmdline_test_range_values[i];
+
+ cmdline_do_one_range_test(test, str, i, e);
+ } while (++i < ARRAY_SIZE(cmdline_test_range_strings));
+}
+
+static struct kunit_case cmdline_test_cases[] = {
+ KUNIT_CASE(cmdline_test_noint),
+ KUNIT_CASE(cmdline_test_lead_int),
+ KUNIT_CASE(cmdline_test_tail_int),
+ KUNIT_CASE(cmdline_test_range),
+ {}
+};
+
+static struct kunit_suite cmdline_test_suite = {
+ .name = "cmdline",
+ .test_cases = cmdline_test_cases,
+};
+kunit_test_suite(cmdline_test_suite);
+
+MODULE_LICENSE("GPL");
diff --git a/lib/compat_audit.c b/lib/compat_audit.c
index 77eabad69b4a..3d6b8996f027 100644
--- a/lib/compat_audit.c
+++ b/lib/compat_audit.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/init.h>
#include <linux/types.h>
+#include <linux/audit_arch.h>
#include <asm/unistd32.h>
unsigned compat_dir_class[] = {
@@ -33,19 +34,23 @@ int audit_classify_compat_syscall(int abi, unsigned syscall)
switch (syscall) {
#ifdef __NR_open
case __NR_open:
- return 2;
+ return AUDITSC_OPEN;
#endif
#ifdef __NR_openat
case __NR_openat:
- return 3;
+ return AUDITSC_OPENAT;
#endif
#ifdef __NR_socketcall
case __NR_socketcall:
- return 4;
+ return AUDITSC_SOCKETCALL;
#endif
case __NR_execve:
- return 5;
+ return AUDITSC_EXECVE;
+#ifdef __NR_openat2
+ case __NR_openat2:
+ return AUDITSC_OPENAT2;
+#endif
default:
- return 1;
+ return AUDITSC_COMPAT;
}
}
diff --git a/lib/cpu_rmap.c b/lib/cpu_rmap.c
index 075f3788bbe4..4c348670da31 100644
--- a/lib/cpu_rmap.c
+++ b/lib/cpu_rmap.c
@@ -128,19 +128,31 @@ debug_print_rmap(const struct cpu_rmap *rmap, const char *prefix)
}
#endif
+static int get_free_index(struct cpu_rmap *rmap)
+{
+ int i;
+
+ for (i = 0; i < rmap->size; i++)
+ if (!rmap->obj[i])
+ return i;
+
+ return -ENOSPC;
+}
+
/**
* cpu_rmap_add - add object to a rmap
* @rmap: CPU rmap allocated with alloc_cpu_rmap()
* @obj: Object to add to rmap
*
- * Return index of object.
+ * Return index of object or -ENOSPC if no free entry was found
*/
int cpu_rmap_add(struct cpu_rmap *rmap, void *obj)
{
- u16 index;
+ int index = get_free_index(rmap);
+
+ if (index < 0)
+ return index;
- BUG_ON(rmap->used >= rmap->size);
- index = rmap->used++;
rmap->obj[index] = obj;
return index;
}
@@ -230,9 +242,10 @@ void free_irq_cpu_rmap(struct cpu_rmap *rmap)
if (!rmap)
return;
- for (index = 0; index < rmap->used; index++) {
+ for (index = 0; index < rmap->size; index++) {
glue = rmap->obj[index];
- irq_set_affinity_notifier(glue->notify.irq, NULL);
+ if (glue)
+ irq_set_affinity_notifier(glue->notify.irq, NULL);
}
cpu_rmap_put(rmap);
@@ -255,7 +268,7 @@ irq_cpu_rmap_notify(struct irq_affinity_notify *notify, const cpumask_t *mask)
rc = cpu_rmap_update(glue->rmap, glue->index, mask);
if (rc)
- pr_warning("irq_cpu_rmap_notify: update failed: %d\n", rc);
+ pr_warn("irq_cpu_rmap_notify: update failed: %d\n", rc);
}
/**
@@ -267,11 +280,23 @@ static void irq_cpu_rmap_release(struct kref *ref)
struct irq_glue *glue =
container_of(ref, struct irq_glue, notify.kref);
+ glue->rmap->obj[glue->index] = NULL;
cpu_rmap_put(glue->rmap);
kfree(glue);
}
/**
+ * irq_cpu_rmap_remove - remove an IRQ from a CPU affinity reverse-map
+ * @rmap: The reverse-map
+ * @irq: The IRQ number
+ */
+int irq_cpu_rmap_remove(struct cpu_rmap *rmap, int irq)
+{
+ return irq_set_affinity_notifier(irq, NULL);
+}
+EXPORT_SYMBOL(irq_cpu_rmap_remove);
+
+/**
* irq_cpu_rmap_add - add an IRQ to a CPU affinity reverse-map
* @rmap: The reverse-map
* @irq: The IRQ number
@@ -293,12 +318,22 @@ int irq_cpu_rmap_add(struct cpu_rmap *rmap, int irq)
glue->notify.release = irq_cpu_rmap_release;
glue->rmap = rmap;
cpu_rmap_get(rmap);
- glue->index = cpu_rmap_add(rmap, glue);
+ rc = cpu_rmap_add(rmap, glue);
+ if (rc < 0)
+ goto err_add;
+
+ glue->index = rc;
rc = irq_set_affinity_notifier(irq, &glue->notify);
- if (rc) {
- cpu_rmap_put(glue->rmap);
- kfree(glue);
- }
+ if (rc)
+ goto err_set;
+
+ return rc;
+
+err_set:
+ rmap->obj[glue->index] = NULL;
+err_add:
+ cpu_rmap_put(glue->rmap);
+ kfree(glue);
return rc;
}
EXPORT_SYMBOL(irq_cpu_rmap_add);
diff --git a/lib/cpumask.c b/lib/cpumask.c
index 0cb672eb107c..e77ee9d46f71 100644
--- a/lib/cpumask.c
+++ b/lib/cpumask.c
@@ -8,75 +8,20 @@
#include <linux/numa.h>
/**
- * cpumask_next - get the next cpu in a cpumask
- * @n: the cpu prior to the place to search (ie. return will be > @n)
- * @srcp: the cpumask pointer
- *
- * Returns >= nr_cpu_ids if no further cpus set.
- */
-unsigned int cpumask_next(int n, const struct cpumask *srcp)
-{
- /* -1 is a legal arg here. */
- if (n != -1)
- cpumask_check(n);
- return find_next_bit(cpumask_bits(srcp), nr_cpumask_bits, n + 1);
-}
-EXPORT_SYMBOL(cpumask_next);
-
-/**
- * cpumask_next_and - get the next cpu in *src1p & *src2p
- * @n: the cpu prior to the place to search (ie. return will be > @n)
- * @src1p: the first cpumask pointer
- * @src2p: the second cpumask pointer
- *
- * Returns >= nr_cpu_ids if no further cpus set in both.
- */
-int cpumask_next_and(int n, const struct cpumask *src1p,
- const struct cpumask *src2p)
-{
- /* -1 is a legal arg here. */
- if (n != -1)
- cpumask_check(n);
- return find_next_and_bit(cpumask_bits(src1p), cpumask_bits(src2p),
- nr_cpumask_bits, n + 1);
-}
-EXPORT_SYMBOL(cpumask_next_and);
-
-/**
- * cpumask_any_but - return a "random" in a cpumask, but not this one.
- * @mask: the cpumask to search
- * @cpu: the cpu to ignore.
- *
- * Often used to find any cpu but smp_processor_id() in a mask.
- * Returns >= nr_cpu_ids if no cpus set.
- */
-int cpumask_any_but(const struct cpumask *mask, unsigned int cpu)
-{
- unsigned int i;
-
- cpumask_check(cpu);
- for_each_cpu(i, mask)
- if (i != cpu)
- break;
- return i;
-}
-EXPORT_SYMBOL(cpumask_any_but);
-
-/**
* cpumask_next_wrap - helper to implement for_each_cpu_wrap
* @n: the cpu prior to the place to search
* @mask: the cpumask pointer
* @start: the start point of the iteration
* @wrap: assume @n crossing @start terminates the iteration
*
- * Returns >= nr_cpu_ids on completion
+ * Return: >= nr_cpu_ids on completion
*
* Note: the @wrap argument is required for the start condition when
* we cannot assume @start is set in @mask.
*/
-int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap)
+unsigned int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap)
{
- int next;
+ unsigned int next;
again:
next = cpumask_next(n, mask);
@@ -100,10 +45,12 @@ EXPORT_SYMBOL(cpumask_next_wrap);
* alloc_cpumask_var_node - allocate a struct cpumask on a given node
* @mask: pointer to cpumask_var_t where the cpumask is returned
* @flags: GFP_ flags
+ * @node: memory node from which to allocate or %NUMA_NO_NODE
*
* Only defined when CONFIG_CPUMASK_OFFSTACK=y, otherwise is
- * a nop returning a constant 1 (in <linux/cpumask.h>)
- * Returns TRUE if memory allocation succeeded, FALSE otherwise.
+ * a nop returning a constant 1 (in <linux/cpumask.h>).
+ *
+ * Return: TRUE if memory allocation succeeded, FALSE otherwise.
*
* In addition, mask will be NULL if this fails. Note that gcc is
* usually smart enough to know that mask can never be NULL if
@@ -125,34 +72,6 @@ bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node)
}
EXPORT_SYMBOL(alloc_cpumask_var_node);
-bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node)
-{
- return alloc_cpumask_var_node(mask, flags | __GFP_ZERO, node);
-}
-EXPORT_SYMBOL(zalloc_cpumask_var_node);
-
-/**
- * alloc_cpumask_var - allocate a struct cpumask
- * @mask: pointer to cpumask_var_t where the cpumask is returned
- * @flags: GFP_ flags
- *
- * Only defined when CONFIG_CPUMASK_OFFSTACK=y, otherwise is
- * a nop returning a constant 1 (in <linux/cpumask.h>).
- *
- * See alloc_cpumask_var_node.
- */
-bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
-{
- return alloc_cpumask_var_node(mask, flags, NUMA_NO_NODE);
-}
-EXPORT_SYMBOL(alloc_cpumask_var);
-
-bool zalloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
-{
- return alloc_cpumask_var(mask, flags | __GFP_ZERO);
-}
-EXPORT_SYMBOL(zalloc_cpumask_var);
-
/**
* alloc_bootmem_cpumask_var - allocate a struct cpumask from the bootmem arena.
* @mask: pointer to cpumask_var_t where the cpumask is returned
@@ -188,47 +107,98 @@ EXPORT_SYMBOL(free_cpumask_var);
*/
void __init free_bootmem_cpumask_var(cpumask_var_t mask)
{
- memblock_free_early(__pa(mask), cpumask_size());
+ memblock_free(mask, cpumask_size());
}
#endif
/**
- * cpumask_local_spread - select the i'th cpu with local numa cpu's first
+ * cpumask_local_spread - select the i'th cpu based on NUMA distances
* @i: index number
* @node: local numa_node
*
- * This function selects an online CPU according to a numa aware policy;
- * local cpus are returned first, followed by non-local ones, then it
- * wraps around.
+ * Return: online CPU according to a numa aware policy; local cpus are returned
+ * first, followed by non-local ones, then it wraps around.
+ *
+ * For those who wants to enumerate all CPUs based on their NUMA distances,
+ * i.e. call this function in a loop, like:
+ *
+ * for (i = 0; i < num_online_cpus(); i++) {
+ * cpu = cpumask_local_spread(i, node);
+ * do_something(cpu);
+ * }
+ *
+ * There's a better alternative based on for_each()-like iterators:
*
- * It's not very efficient, but useful for setup.
+ * for_each_numa_hop_mask(mask, node) {
+ * for_each_cpu_andnot(cpu, mask, prev)
+ * do_something(cpu);
+ * prev = mask;
+ * }
+ *
+ * It's simpler and more verbose than above. Complexity of iterator-based
+ * enumeration is O(sched_domains_numa_levels * nr_cpu_ids), while
+ * cpumask_local_spread() when called for each cpu is
+ * O(sched_domains_numa_levels * nr_cpu_ids * log(nr_cpu_ids)).
*/
unsigned int cpumask_local_spread(unsigned int i, int node)
{
- int cpu;
+ unsigned int cpu;
/* Wrap: we always want a cpu. */
i %= num_online_cpus();
- if (node == NUMA_NO_NODE) {
- for_each_cpu(cpu, cpu_online_mask)
- if (i-- == 0)
- return cpu;
- } else {
- /* NUMA first. */
- for_each_cpu_and(cpu, cpumask_of_node(node), cpu_online_mask)
- if (i-- == 0)
- return cpu;
-
- for_each_cpu(cpu, cpu_online_mask) {
- /* Skip NUMA nodes, done above. */
- if (cpumask_test_cpu(cpu, cpumask_of_node(node)))
- continue;
-
- if (i-- == 0)
- return cpu;
- }
- }
- BUG();
+ cpu = sched_numa_find_nth_cpu(cpu_online_mask, i, node);
+
+ WARN_ON(cpu >= nr_cpu_ids);
+ return cpu;
}
EXPORT_SYMBOL(cpumask_local_spread);
+
+static DEFINE_PER_CPU(int, distribute_cpu_mask_prev);
+
+/**
+ * cpumask_any_and_distribute - Return an arbitrary cpu within src1p & src2p.
+ * @src1p: first &cpumask for intersection
+ * @src2p: second &cpumask for intersection
+ *
+ * Iterated calls using the same srcp1 and srcp2 will be distributed within
+ * their intersection.
+ *
+ * Return: >= nr_cpu_ids if the intersection is empty.
+ */
+unsigned int cpumask_any_and_distribute(const struct cpumask *src1p,
+ const struct cpumask *src2p)
+{
+ unsigned int next, prev;
+
+ /* NOTE: our first selection will skip 0. */
+ prev = __this_cpu_read(distribute_cpu_mask_prev);
+
+ next = find_next_and_bit_wrap(cpumask_bits(src1p), cpumask_bits(src2p),
+ nr_cpumask_bits, prev + 1);
+ if (next < nr_cpu_ids)
+ __this_cpu_write(distribute_cpu_mask_prev, next);
+
+ return next;
+}
+EXPORT_SYMBOL(cpumask_any_and_distribute);
+
+/**
+ * cpumask_any_distribute - Return an arbitrary cpu from srcp
+ * @srcp: &cpumask for selection
+ *
+ * Return: >= nr_cpu_ids if the intersection is empty.
+ */
+unsigned int cpumask_any_distribute(const struct cpumask *srcp)
+{
+ unsigned int next, prev;
+
+ /* NOTE: our first selection will skip 0. */
+ prev = __this_cpu_read(distribute_cpu_mask_prev);
+ next = find_next_bit_wrap(cpumask_bits(srcp), nr_cpumask_bits, prev + 1);
+ if (next < nr_cpu_ids)
+ __this_cpu_write(distribute_cpu_mask_prev, next);
+
+ return next;
+}
+EXPORT_SYMBOL(cpumask_any_distribute);
diff --git a/lib/cpumask_kunit.c b/lib/cpumask_kunit.c
new file mode 100644
index 000000000000..a105e6369efc
--- /dev/null
+++ b/lib/cpumask_kunit.c
@@ -0,0 +1,155 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * KUnit tests for cpumask.
+ *
+ * Author: Sander Vanheule <sander@svanheule.net>
+ */
+
+#include <kunit/test.h>
+#include <linux/cpu.h>
+#include <linux/cpumask.h>
+
+#define MASK_MSG(m) \
+ "%s contains %sCPUs %*pbl", #m, (cpumask_weight(m) ? "" : "no "), \
+ nr_cpumask_bits, cpumask_bits(m)
+
+#define EXPECT_FOR_EACH_CPU_EQ(test, mask) \
+ do { \
+ const cpumask_t *m = (mask); \
+ int mask_weight = cpumask_weight(m); \
+ int cpu, iter = 0; \
+ for_each_cpu(cpu, m) \
+ iter++; \
+ KUNIT_EXPECT_EQ_MSG((test), mask_weight, iter, MASK_MSG(mask)); \
+ } while (0)
+
+#define EXPECT_FOR_EACH_CPU_OP_EQ(test, op, mask1, mask2) \
+ do { \
+ const cpumask_t *m1 = (mask1); \
+ const cpumask_t *m2 = (mask2); \
+ int weight; \
+ int cpu, iter = 0; \
+ cpumask_##op(&mask_tmp, m1, m2); \
+ weight = cpumask_weight(&mask_tmp); \
+ for_each_cpu_##op(cpu, mask1, mask2) \
+ iter++; \
+ KUNIT_EXPECT_EQ((test), weight, iter); \
+ } while (0)
+
+#define EXPECT_FOR_EACH_CPU_WRAP_EQ(test, mask) \
+ do { \
+ const cpumask_t *m = (mask); \
+ int mask_weight = cpumask_weight(m); \
+ int cpu, iter = 0; \
+ for_each_cpu_wrap(cpu, m, nr_cpu_ids / 2) \
+ iter++; \
+ KUNIT_EXPECT_EQ_MSG((test), mask_weight, iter, MASK_MSG(mask)); \
+ } while (0)
+
+#define EXPECT_FOR_EACH_CPU_BUILTIN_EQ(test, name) \
+ do { \
+ int mask_weight = num_##name##_cpus(); \
+ int cpu, iter = 0; \
+ for_each_##name##_cpu(cpu) \
+ iter++; \
+ KUNIT_EXPECT_EQ_MSG((test), mask_weight, iter, MASK_MSG(cpu_##name##_mask)); \
+ } while (0)
+
+static cpumask_t mask_empty;
+static cpumask_t mask_all;
+static cpumask_t mask_tmp;
+
+static void test_cpumask_weight(struct kunit *test)
+{
+ KUNIT_EXPECT_TRUE_MSG(test, cpumask_empty(&mask_empty), MASK_MSG(&mask_empty));
+ KUNIT_EXPECT_TRUE_MSG(test, cpumask_full(&mask_all), MASK_MSG(&mask_all));
+
+ KUNIT_EXPECT_EQ_MSG(test, 0, cpumask_weight(&mask_empty), MASK_MSG(&mask_empty));
+ KUNIT_EXPECT_EQ_MSG(test, nr_cpu_ids, cpumask_weight(cpu_possible_mask),
+ MASK_MSG(cpu_possible_mask));
+ KUNIT_EXPECT_EQ_MSG(test, nr_cpu_ids, cpumask_weight(&mask_all), MASK_MSG(&mask_all));
+}
+
+static void test_cpumask_first(struct kunit *test)
+{
+ KUNIT_EXPECT_LE_MSG(test, nr_cpu_ids, cpumask_first(&mask_empty), MASK_MSG(&mask_empty));
+ KUNIT_EXPECT_EQ_MSG(test, 0, cpumask_first(cpu_possible_mask), MASK_MSG(cpu_possible_mask));
+
+ KUNIT_EXPECT_EQ_MSG(test, 0, cpumask_first_zero(&mask_empty), MASK_MSG(&mask_empty));
+ KUNIT_EXPECT_LE_MSG(test, nr_cpu_ids, cpumask_first_zero(cpu_possible_mask),
+ MASK_MSG(cpu_possible_mask));
+}
+
+static void test_cpumask_last(struct kunit *test)
+{
+ KUNIT_EXPECT_LE_MSG(test, nr_cpumask_bits, cpumask_last(&mask_empty),
+ MASK_MSG(&mask_empty));
+ KUNIT_EXPECT_EQ_MSG(test, nr_cpu_ids - 1, cpumask_last(cpu_possible_mask),
+ MASK_MSG(cpu_possible_mask));
+}
+
+static void test_cpumask_next(struct kunit *test)
+{
+ KUNIT_EXPECT_EQ_MSG(test, 0, cpumask_next_zero(-1, &mask_empty), MASK_MSG(&mask_empty));
+ KUNIT_EXPECT_LE_MSG(test, nr_cpu_ids, cpumask_next_zero(-1, cpu_possible_mask),
+ MASK_MSG(cpu_possible_mask));
+
+ KUNIT_EXPECT_LE_MSG(test, nr_cpu_ids, cpumask_next(-1, &mask_empty),
+ MASK_MSG(&mask_empty));
+ KUNIT_EXPECT_EQ_MSG(test, 0, cpumask_next(-1, cpu_possible_mask),
+ MASK_MSG(cpu_possible_mask));
+}
+
+static void test_cpumask_iterators(struct kunit *test)
+{
+ EXPECT_FOR_EACH_CPU_EQ(test, &mask_empty);
+ EXPECT_FOR_EACH_CPU_WRAP_EQ(test, &mask_empty);
+ EXPECT_FOR_EACH_CPU_OP_EQ(test, and, &mask_empty, &mask_empty);
+ EXPECT_FOR_EACH_CPU_OP_EQ(test, and, cpu_possible_mask, &mask_empty);
+ EXPECT_FOR_EACH_CPU_OP_EQ(test, andnot, &mask_empty, &mask_empty);
+
+ EXPECT_FOR_EACH_CPU_EQ(test, cpu_possible_mask);
+ EXPECT_FOR_EACH_CPU_WRAP_EQ(test, cpu_possible_mask);
+ EXPECT_FOR_EACH_CPU_OP_EQ(test, and, cpu_possible_mask, cpu_possible_mask);
+ EXPECT_FOR_EACH_CPU_OP_EQ(test, andnot, cpu_possible_mask, &mask_empty);
+}
+
+static void test_cpumask_iterators_builtin(struct kunit *test)
+{
+ EXPECT_FOR_EACH_CPU_BUILTIN_EQ(test, possible);
+
+ /* Ensure the dynamic masks are stable while running the tests */
+ cpu_hotplug_disable();
+
+ EXPECT_FOR_EACH_CPU_BUILTIN_EQ(test, online);
+ EXPECT_FOR_EACH_CPU_BUILTIN_EQ(test, present);
+
+ cpu_hotplug_enable();
+}
+
+static int test_cpumask_init(struct kunit *test)
+{
+ cpumask_clear(&mask_empty);
+ cpumask_setall(&mask_all);
+
+ return 0;
+}
+
+static struct kunit_case test_cpumask_cases[] = {
+ KUNIT_CASE(test_cpumask_weight),
+ KUNIT_CASE(test_cpumask_first),
+ KUNIT_CASE(test_cpumask_last),
+ KUNIT_CASE(test_cpumask_next),
+ KUNIT_CASE(test_cpumask_iterators),
+ KUNIT_CASE(test_cpumask_iterators_builtin),
+ {}
+};
+
+static struct kunit_suite test_cpumask_suite = {
+ .name = "cpumask",
+ .init = test_cpumask_init,
+ .test_cases = test_cpumask_cases,
+};
+kunit_test_suite(test_cpumask_suite);
+
+MODULE_LICENSE("GPL");
diff --git a/lib/crc-ccitt.c b/lib/crc-ccitt.c
index d1a7d29d2ac9..9cddf35d3b66 100644
--- a/lib/crc-ccitt.c
+++ b/lib/crc-ccitt.c
@@ -49,46 +49,6 @@ u16 const crc_ccitt_table[256] = {
};
EXPORT_SYMBOL(crc_ccitt_table);
-/*
- * Similar table to calculate CRC16 variant known as CRC-CCITT-FALSE
- * Reflected bits order, does not augment final value.
- */
-u16 const crc_ccitt_false_table[256] = {
- 0x0000, 0x1021, 0x2042, 0x3063, 0x4084, 0x50A5, 0x60C6, 0x70E7,
- 0x8108, 0x9129, 0xA14A, 0xB16B, 0xC18C, 0xD1AD, 0xE1CE, 0xF1EF,
- 0x1231, 0x0210, 0x3273, 0x2252, 0x52B5, 0x4294, 0x72F7, 0x62D6,
- 0x9339, 0x8318, 0xB37B, 0xA35A, 0xD3BD, 0xC39C, 0xF3FF, 0xE3DE,
- 0x2462, 0x3443, 0x0420, 0x1401, 0x64E6, 0x74C7, 0x44A4, 0x5485,
- 0xA56A, 0xB54B, 0x8528, 0x9509, 0xE5EE, 0xF5CF, 0xC5AC, 0xD58D,
- 0x3653, 0x2672, 0x1611, 0x0630, 0x76D7, 0x66F6, 0x5695, 0x46B4,
- 0xB75B, 0xA77A, 0x9719, 0x8738, 0xF7DF, 0xE7FE, 0xD79D, 0xC7BC,
- 0x48C4, 0x58E5, 0x6886, 0x78A7, 0x0840, 0x1861, 0x2802, 0x3823,
- 0xC9CC, 0xD9ED, 0xE98E, 0xF9AF, 0x8948, 0x9969, 0xA90A, 0xB92B,
- 0x5AF5, 0x4AD4, 0x7AB7, 0x6A96, 0x1A71, 0x0A50, 0x3A33, 0x2A12,
- 0xDBFD, 0xCBDC, 0xFBBF, 0xEB9E, 0x9B79, 0x8B58, 0xBB3B, 0xAB1A,
- 0x6CA6, 0x7C87, 0x4CE4, 0x5CC5, 0x2C22, 0x3C03, 0x0C60, 0x1C41,
- 0xEDAE, 0xFD8F, 0xCDEC, 0xDDCD, 0xAD2A, 0xBD0B, 0x8D68, 0x9D49,
- 0x7E97, 0x6EB6, 0x5ED5, 0x4EF4, 0x3E13, 0x2E32, 0x1E51, 0x0E70,
- 0xFF9F, 0xEFBE, 0xDFDD, 0xCFFC, 0xBF1B, 0xAF3A, 0x9F59, 0x8F78,
- 0x9188, 0x81A9, 0xB1CA, 0xA1EB, 0xD10C, 0xC12D, 0xF14E, 0xE16F,
- 0x1080, 0x00A1, 0x30C2, 0x20E3, 0x5004, 0x4025, 0x7046, 0x6067,
- 0x83B9, 0x9398, 0xA3FB, 0xB3DA, 0xC33D, 0xD31C, 0xE37F, 0xF35E,
- 0x02B1, 0x1290, 0x22F3, 0x32D2, 0x4235, 0x5214, 0x6277, 0x7256,
- 0xB5EA, 0xA5CB, 0x95A8, 0x8589, 0xF56E, 0xE54F, 0xD52C, 0xC50D,
- 0x34E2, 0x24C3, 0x14A0, 0x0481, 0x7466, 0x6447, 0x5424, 0x4405,
- 0xA7DB, 0xB7FA, 0x8799, 0x97B8, 0xE75F, 0xF77E, 0xC71D, 0xD73C,
- 0x26D3, 0x36F2, 0x0691, 0x16B0, 0x6657, 0x7676, 0x4615, 0x5634,
- 0xD94C, 0xC96D, 0xF90E, 0xE92F, 0x99C8, 0x89E9, 0xB98A, 0xA9AB,
- 0x5844, 0x4865, 0x7806, 0x6827, 0x18C0, 0x08E1, 0x3882, 0x28A3,
- 0xCB7D, 0xDB5C, 0xEB3F, 0xFB1E, 0x8BF9, 0x9BD8, 0xABBB, 0xBB9A,
- 0x4A75, 0x5A54, 0x6A37, 0x7A16, 0x0AF1, 0x1AD0, 0x2AB3, 0x3A92,
- 0xFD2E, 0xED0F, 0xDD6C, 0xCD4D, 0xBDAA, 0xAD8B, 0x9DE8, 0x8DC9,
- 0x7C26, 0x6C07, 0x5C64, 0x4C45, 0x3CA2, 0x2C83, 0x1CE0, 0x0CC1,
- 0xEF1F, 0xFF3E, 0xCF5D, 0xDF7C, 0xAF9B, 0xBFBA, 0x8FD9, 0x9FF8,
- 0x6E17, 0x7E36, 0x4E55, 0x5E74, 0x2E93, 0x3EB2, 0x0ED1, 0x1EF0
-};
-EXPORT_SYMBOL(crc_ccitt_false_table);
-
/**
* crc_ccitt - recompute the CRC (CRC-CCITT variant) for the data
* buffer
@@ -104,20 +64,5 @@ u16 crc_ccitt(u16 crc, u8 const *buffer, size_t len)
}
EXPORT_SYMBOL(crc_ccitt);
-/**
- * crc_ccitt_false - recompute the CRC (CRC-CCITT-FALSE variant)
- * for the data buffer
- * @crc: previous CRC value
- * @buffer: data pointer
- * @len: number of bytes in the buffer
- */
-u16 crc_ccitt_false(u16 crc, u8 const *buffer, size_t len)
-{
- while (len--)
- crc = crc_ccitt_false_byte(crc, *buffer++);
- return crc;
-}
-EXPORT_SYMBOL(crc_ccitt_false);
-
MODULE_DESCRIPTION("CRC-CCITT calculations");
MODULE_LICENSE("GPL");
diff --git a/lib/crc-itu-t.c b/lib/crc-itu-t.c
index 1974b355c148..1d26a1647da5 100644
--- a/lib/crc-itu-t.c
+++ b/lib/crc-itu-t.c
@@ -7,7 +7,7 @@
#include <linux/module.h>
#include <linux/crc-itu-t.h>
-/** CRC table for the CRC ITU-T V.41 0x1021 (x^16 + x^12 + x^15 + 1) */
+/* CRC table for the CRC ITU-T V.41 0x1021 (x^16 + x^12 + x^5 + 1) */
const u16 crc_itu_t_table[256] = {
0x0000, 0x1021, 0x2042, 0x3063, 0x4084, 0x50a5, 0x60c6, 0x70e7,
0x8108, 0x9129, 0xa14a, 0xb16b, 0xc18c, 0xd1ad, 0xe1ce, 0xf1ef,
diff --git a/lib/crc-t10dif.c b/lib/crc-t10dif.c
index 8cc01a603416..1ed2ed487097 100644
--- a/lib/crc-t10dif.c
+++ b/lib/crc-t10dif.c
@@ -17,64 +17,69 @@
#include <linux/notifier.h>
static struct crypto_shash __rcu *crct10dif_tfm;
-static struct static_key crct10dif_fallback __read_mostly;
+static DEFINE_STATIC_KEY_TRUE(crct10dif_fallback);
static DEFINE_MUTEX(crc_t10dif_mutex);
+static struct work_struct crct10dif_rehash_work;
-static int crc_t10dif_rehash(struct notifier_block *self, unsigned long val, void *data)
+static int crc_t10dif_notify(struct notifier_block *self, unsigned long val, void *data)
{
struct crypto_alg *alg = data;
- struct crypto_shash *new, *old;
if (val != CRYPTO_MSG_ALG_LOADED ||
- static_key_false(&crct10dif_fallback) ||
- strncmp(alg->cra_name, CRC_T10DIF_STRING, strlen(CRC_T10DIF_STRING)))
- return 0;
+ strcmp(alg->cra_name, CRC_T10DIF_STRING))
+ return NOTIFY_DONE;
+
+ schedule_work(&crct10dif_rehash_work);
+ return NOTIFY_OK;
+}
+
+static void crc_t10dif_rehash(struct work_struct *work)
+{
+ struct crypto_shash *new, *old;
mutex_lock(&crc_t10dif_mutex);
old = rcu_dereference_protected(crct10dif_tfm,
lockdep_is_held(&crc_t10dif_mutex));
- if (!old) {
- mutex_unlock(&crc_t10dif_mutex);
- return 0;
- }
- new = crypto_alloc_shash("crct10dif", 0, 0);
+ new = crypto_alloc_shash(CRC_T10DIF_STRING, 0, 0);
if (IS_ERR(new)) {
mutex_unlock(&crc_t10dif_mutex);
- return 0;
+ return;
}
rcu_assign_pointer(crct10dif_tfm, new);
mutex_unlock(&crc_t10dif_mutex);
- synchronize_rcu();
- crypto_free_shash(old);
- return 0;
+ if (old) {
+ synchronize_rcu();
+ crypto_free_shash(old);
+ } else {
+ static_branch_disable(&crct10dif_fallback);
+ }
}
static struct notifier_block crc_t10dif_nb = {
- .notifier_call = crc_t10dif_rehash,
+ .notifier_call = crc_t10dif_notify,
};
__u16 crc_t10dif_update(__u16 crc, const unsigned char *buffer, size_t len)
{
struct {
struct shash_desc shash;
- char ctx[2];
+ __u16 crc;
} desc;
int err;
- if (static_key_false(&crct10dif_fallback))
+ if (static_branch_unlikely(&crct10dif_fallback))
return crc_t10dif_generic(crc, buffer, len);
rcu_read_lock();
desc.shash.tfm = rcu_dereference(crct10dif_tfm);
- *(__u16 *)desc.ctx = crc;
-
+ desc.crc = crc;
err = crypto_shash_update(&desc.shash, buffer, len);
rcu_read_unlock();
BUG_ON(err);
- return *(__u16 *)desc.ctx;
+ return desc.crc;
}
EXPORT_SYMBOL(crc_t10dif_update);
@@ -86,19 +91,17 @@ EXPORT_SYMBOL(crc_t10dif);
static int __init crc_t10dif_mod_init(void)
{
+ INIT_WORK(&crct10dif_rehash_work, crc_t10dif_rehash);
crypto_register_notifier(&crc_t10dif_nb);
- crct10dif_tfm = crypto_alloc_shash("crct10dif", 0, 0);
- if (IS_ERR(crct10dif_tfm)) {
- static_key_slow_inc(&crct10dif_fallback);
- crct10dif_tfm = NULL;
- }
+ crc_t10dif_rehash(&crct10dif_rehash_work);
return 0;
}
static void __exit crc_t10dif_mod_fini(void)
{
crypto_unregister_notifier(&crc_t10dif_nb);
- crypto_free_shash(crct10dif_tfm);
+ cancel_work_sync(&crct10dif_rehash_work);
+ crypto_free_shash(rcu_dereference_protected(crct10dif_tfm, 1));
}
module_init(crc_t10dif_mod_init);
@@ -106,15 +109,23 @@ module_exit(crc_t10dif_mod_fini);
static int crc_t10dif_transform_show(char *buffer, const struct kernel_param *kp)
{
- if (static_key_false(&crct10dif_fallback))
+ struct crypto_shash *tfm;
+ int len;
+
+ if (static_branch_unlikely(&crct10dif_fallback))
return sprintf(buffer, "fallback\n");
- return sprintf(buffer, "%s\n",
- crypto_tfm_alg_driver_name(crypto_shash_tfm(crct10dif_tfm)));
+ rcu_read_lock();
+ tfm = rcu_dereference(crct10dif_tfm);
+ len = snprintf(buffer, PAGE_SIZE, "%s\n",
+ crypto_shash_driver_name(tfm));
+ rcu_read_unlock();
+
+ return len;
}
-module_param_call(transform, NULL, crc_t10dif_transform_show, NULL, 0644);
+module_param_call(transform, NULL, crc_t10dif_transform_show, NULL, 0444);
-MODULE_DESCRIPTION("T10 DIF CRC calculation");
+MODULE_DESCRIPTION("T10 DIF CRC calculation (library API)");
MODULE_LICENSE("GPL");
MODULE_SOFTDEP("pre: crct10dif");
diff --git a/lib/crc32.c b/lib/crc32.c
index 4a20455d1f61..5649847d0a8d 100644
--- a/lib/crc32.c
+++ b/lib/crc32.c
@@ -24,7 +24,7 @@
* Version 2. See the file COPYING for more details.
*/
-/* see: Documentation/crc32.txt for a description of algorithms */
+/* see: Documentation/staging/crc32.rst for a description of algorithms */
#include <linux/crc32.h>
#include <linux/crc32poly.h>
@@ -194,13 +194,11 @@ u32 __pure __weak __crc32c_le(u32 crc, unsigned char const *p, size_t len)
#else
u32 __pure __weak crc32_le(u32 crc, unsigned char const *p, size_t len)
{
- return crc32_le_generic(crc, p, len,
- (const u32 (*)[256])crc32table_le, CRC32_POLY_LE);
+ return crc32_le_generic(crc, p, len, crc32table_le, CRC32_POLY_LE);
}
u32 __pure __weak __crc32c_le(u32 crc, unsigned char const *p, size_t len)
{
- return crc32_le_generic(crc, p, len,
- (const u32 (*)[256])crc32ctable_le, CRC32C_POLY_LE);
+ return crc32_le_generic(crc, p, len, crc32ctable_le, CRC32C_POLY_LE);
}
#endif
EXPORT_SYMBOL(crc32_le);
@@ -208,6 +206,7 @@ EXPORT_SYMBOL(__crc32c_le);
u32 __pure crc32_le_base(u32, unsigned char const *, size_t) __alias(crc32_le);
u32 __pure __crc32c_le_base(u32, unsigned char const *, size_t) __alias(__crc32c_le);
+u32 __pure crc32_be_base(u32, unsigned char const *, size_t) __alias(crc32_be);
/*
* This multiplies the polynomials x and y modulo the given modulus.
@@ -331,16 +330,15 @@ static inline u32 __pure crc32_be_generic(u32 crc, unsigned char const *p,
return crc;
}
-#if CRC_LE_BITS == 1
-u32 __pure crc32_be(u32 crc, unsigned char const *p, size_t len)
+#if CRC_BE_BITS == 1
+u32 __pure __weak crc32_be(u32 crc, unsigned char const *p, size_t len)
{
return crc32_be_generic(crc, p, len, NULL, CRC32_POLY_BE);
}
#else
-u32 __pure crc32_be(u32 crc, unsigned char const *p, size_t len)
+u32 __pure __weak crc32_be(u32 crc, unsigned char const *p, size_t len)
{
- return crc32_be_generic(crc, p, len,
- (const u32 (*)[256])crc32table_be, CRC32_POLY_BE);
+ return crc32_be_generic(crc, p, len, crc32table_be, CRC32_POLY_BE);
}
#endif
EXPORT_SYMBOL(crc32_be);
diff --git a/lib/crc32test.c b/lib/crc32test.c
index 97d6a57cefcc..9b4af79412c4 100644
--- a/lib/crc32test.c
+++ b/lib/crc32test.c
@@ -675,7 +675,7 @@ static int __init crc32c_test(void)
/* pre-warm the cache */
for (i = 0; i < 100; i++) {
- bytes += 2*test[i].length;
+ bytes += test[i].length;
crc ^= __crc32c_le(test[i].crc, test_buf +
test[i].start, test[i].length);
@@ -683,7 +683,6 @@ static int __init crc32c_test(void)
/* reduce OS noise */
local_irq_save(flags);
- local_irq_disable();
nsec = ktime_get_ns();
for (i = 0; i < 100; i++) {
@@ -694,7 +693,6 @@ static int __init crc32c_test(void)
nsec = ktime_get_ns() - nsec;
local_irq_restore(flags);
- local_irq_enable();
pr_info("crc32c: CRC_LE_BITS = %d\n", CRC_LE_BITS);
@@ -768,7 +766,6 @@ static int __init crc32_test(void)
/* reduce OS noise */
local_irq_save(flags);
- local_irq_disable();
nsec = ktime_get_ns();
for (i = 0; i < 100; i++) {
@@ -783,7 +780,6 @@ static int __init crc32_test(void)
nsec = ktime_get_ns() - nsec;
local_irq_restore(flags);
- local_irq_enable();
pr_info("crc32: CRC_LE_BITS = %d, CRC_BE BITS = %d\n",
CRC_LE_BITS, CRC_BE_BITS);
diff --git a/lib/crc64-rocksoft.c b/lib/crc64-rocksoft.c
new file mode 100644
index 000000000000..fc9ae0da5df7
--- /dev/null
+++ b/lib/crc64-rocksoft.c
@@ -0,0 +1,126 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/crc64.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <crypto/hash.h>
+#include <crypto/algapi.h>
+#include <linux/static_key.h>
+#include <linux/notifier.h>
+
+static struct crypto_shash __rcu *crc64_rocksoft_tfm;
+static DEFINE_STATIC_KEY_TRUE(crc64_rocksoft_fallback);
+static DEFINE_MUTEX(crc64_rocksoft_mutex);
+static struct work_struct crc64_rocksoft_rehash_work;
+
+static int crc64_rocksoft_notify(struct notifier_block *self, unsigned long val, void *data)
+{
+ struct crypto_alg *alg = data;
+
+ if (val != CRYPTO_MSG_ALG_LOADED ||
+ strcmp(alg->cra_name, CRC64_ROCKSOFT_STRING))
+ return NOTIFY_DONE;
+
+ schedule_work(&crc64_rocksoft_rehash_work);
+ return NOTIFY_OK;
+}
+
+static void crc64_rocksoft_rehash(struct work_struct *work)
+{
+ struct crypto_shash *new, *old;
+
+ mutex_lock(&crc64_rocksoft_mutex);
+ old = rcu_dereference_protected(crc64_rocksoft_tfm,
+ lockdep_is_held(&crc64_rocksoft_mutex));
+ new = crypto_alloc_shash(CRC64_ROCKSOFT_STRING, 0, 0);
+ if (IS_ERR(new)) {
+ mutex_unlock(&crc64_rocksoft_mutex);
+ return;
+ }
+ rcu_assign_pointer(crc64_rocksoft_tfm, new);
+ mutex_unlock(&crc64_rocksoft_mutex);
+
+ if (old) {
+ synchronize_rcu();
+ crypto_free_shash(old);
+ } else {
+ static_branch_disable(&crc64_rocksoft_fallback);
+ }
+}
+
+static struct notifier_block crc64_rocksoft_nb = {
+ .notifier_call = crc64_rocksoft_notify,
+};
+
+u64 crc64_rocksoft_update(u64 crc, const unsigned char *buffer, size_t len)
+{
+ struct {
+ struct shash_desc shash;
+ u64 crc;
+ } desc;
+ int err;
+
+ if (static_branch_unlikely(&crc64_rocksoft_fallback))
+ return crc64_rocksoft_generic(crc, buffer, len);
+
+ rcu_read_lock();
+ desc.shash.tfm = rcu_dereference(crc64_rocksoft_tfm);
+ desc.crc = crc;
+ err = crypto_shash_update(&desc.shash, buffer, len);
+ rcu_read_unlock();
+
+ BUG_ON(err);
+
+ return desc.crc;
+}
+EXPORT_SYMBOL_GPL(crc64_rocksoft_update);
+
+u64 crc64_rocksoft(const unsigned char *buffer, size_t len)
+{
+ return crc64_rocksoft_update(0, buffer, len);
+}
+EXPORT_SYMBOL_GPL(crc64_rocksoft);
+
+static int __init crc64_rocksoft_mod_init(void)
+{
+ INIT_WORK(&crc64_rocksoft_rehash_work, crc64_rocksoft_rehash);
+ crypto_register_notifier(&crc64_rocksoft_nb);
+ crc64_rocksoft_rehash(&crc64_rocksoft_rehash_work);
+ return 0;
+}
+
+static void __exit crc64_rocksoft_mod_fini(void)
+{
+ crypto_unregister_notifier(&crc64_rocksoft_nb);
+ cancel_work_sync(&crc64_rocksoft_rehash_work);
+ crypto_free_shash(rcu_dereference_protected(crc64_rocksoft_tfm, 1));
+}
+
+module_init(crc64_rocksoft_mod_init);
+module_exit(crc64_rocksoft_mod_fini);
+
+static int crc64_rocksoft_transform_show(char *buffer, const struct kernel_param *kp)
+{
+ struct crypto_shash *tfm;
+ int len;
+
+ if (static_branch_unlikely(&crc64_rocksoft_fallback))
+ return sprintf(buffer, "fallback\n");
+
+ rcu_read_lock();
+ tfm = rcu_dereference(crc64_rocksoft_tfm);
+ len = snprintf(buffer, PAGE_SIZE, "%s\n",
+ crypto_shash_driver_name(tfm));
+ rcu_read_unlock();
+
+ return len;
+}
+
+module_param_call(transform, NULL, crc64_rocksoft_transform_show, NULL, 0444);
+
+MODULE_AUTHOR("Keith Busch <kbusch@kernel.org>");
+MODULE_DESCRIPTION("Rocksoft model CRC64 calculation (library API)");
+MODULE_LICENSE("GPL");
+MODULE_SOFTDEP("pre: crc64");
diff --git a/lib/crc64.c b/lib/crc64.c
index 0ef8ae6ac047..61ae8dfb6a1c 100644
--- a/lib/crc64.c
+++ b/lib/crc64.c
@@ -4,7 +4,7 @@
*
* This is a basic crc64 implementation following ECMA-182 specification,
* which can be found from,
- * http://www.ecma-international.org/publications/standards/Ecma-182.htm
+ * https://www.ecma-international.org/publications/standards/Ecma-182.htm
*
* Dr. Ross N. Williams has a great document to introduce the idea of CRC
* algorithm, here the CRC64 code is also inspired by the table-driven
@@ -22,12 +22,20 @@
* x^24 + x^23 + x^22 + x^21 + x^19 + x^17 + x^13 + x^12 + x^10 + x^9 +
* x^7 + x^4 + x + 1
*
+ * crc64rocksoft[256] table is from the Rocksoft specification polynomial
+ * defined as,
+ *
+ * x^64 + x^63 + x^61 + x^59 + x^58 + x^56 + x^55 + x^52 + x^49 + x^48 + x^47 +
+ * x^46 + x^44 + x^41 + x^37 + x^36 + x^34 + x^32 + x^31 + x^28 + x^26 + x^23 +
+ * x^22 + x^19 + x^16 + x^13 + x^12 + x^10 + x^9 + x^6 + x^4 + x^3 + 1
+ *
* Copyright 2018 SUSE Linux.
* Author: Coly Li <colyli@suse.de>
*/
#include <linux/module.h>
#include <linux/types.h>
+#include <linux/crc64.h>
#include "crc64table.h"
MODULE_DESCRIPTION("CRC64 calculations");
@@ -36,7 +44,7 @@ MODULE_LICENSE("GPL v2");
/**
* crc64_be - Calculate bitwise big-endian ECMA-182 CRC64
* @crc: seed value for computation. 0 or (u64)~0 for a new CRC calculation,
- or the previous crc64 value if computing incrementally.
+ * or the previous crc64 value if computing incrementally.
* @p: pointer to buffer over which CRC64 is run
* @len: length of buffer @p
*/
@@ -54,3 +62,24 @@ u64 __pure crc64_be(u64 crc, const void *p, size_t len)
return crc;
}
EXPORT_SYMBOL_GPL(crc64_be);
+
+/**
+ * crc64_rocksoft_generic - Calculate bitwise Rocksoft CRC64
+ * @crc: seed value for computation. 0 for a new CRC calculation, or the
+ * previous crc64 value if computing incrementally.
+ * @p: pointer to buffer over which CRC64 is run
+ * @len: length of buffer @p
+ */
+u64 __pure crc64_rocksoft_generic(u64 crc, const void *p, size_t len)
+{
+ const unsigned char *_p = p;
+ size_t i;
+
+ crc = ~crc;
+
+ for (i = 0; i < len; i++)
+ crc = (crc >> 8) ^ crc64rocksofttable[(crc & 0xff) ^ *_p++];
+
+ return ~crc;
+}
+EXPORT_SYMBOL_GPL(crc64_rocksoft_generic);
diff --git a/lib/crc7.c b/lib/crc7.c
index 6a848d73e804..3848e313b722 100644
--- a/lib/crc7.c
+++ b/lib/crc7.c
@@ -51,7 +51,7 @@ const u8 crc7_be_syndrome_table[256] = {
EXPORT_SYMBOL(crc7_be_syndrome_table);
/**
- * crc7 - update the CRC7 for the data buffer
+ * crc7_be - update the CRC7 for the data buffer
* @crc: previous CRC7 value
* @buffer: data pointer
* @len: number of bytes in the buffer
diff --git a/lib/crc8.c b/lib/crc8.c
index 595a5a75e3cd..1ad8e501d9b6 100644
--- a/lib/crc8.c
+++ b/lib/crc8.c
@@ -71,7 +71,7 @@ EXPORT_SYMBOL(crc8_populate_lsb);
* @nbytes: number of bytes in data buffer.
* @crc: previous returned crc8 value.
*/
-u8 crc8(const u8 table[CRC8_TABLE_SIZE], u8 *pdata, size_t nbytes, u8 crc)
+u8 crc8(const u8 table[CRC8_TABLE_SIZE], const u8 *pdata, size_t nbytes, u8 crc)
{
/* loop over the buffer data */
while (nbytes-- > 0)
diff --git a/lib/crypto/Kconfig b/lib/crypto/Kconfig
new file mode 100644
index 000000000000..45436bfc6dff
--- /dev/null
+++ b/lib/crypto/Kconfig
@@ -0,0 +1,141 @@
+# SPDX-License-Identifier: GPL-2.0
+
+menu "Crypto library routines"
+
+config CRYPTO_LIB_UTILS
+ tristate
+
+config CRYPTO_LIB_AES
+ tristate
+
+config CRYPTO_LIB_AESGCM
+ tristate
+ select CRYPTO_LIB_AES
+ select CRYPTO_LIB_GF128MUL
+ select CRYPTO_LIB_UTILS
+
+config CRYPTO_LIB_ARC4
+ tristate
+
+config CRYPTO_LIB_GF128MUL
+ tristate
+
+config CRYPTO_ARCH_HAVE_LIB_BLAKE2S
+ bool
+ help
+ Declares whether the architecture provides an arch-specific
+ accelerated implementation of the Blake2s library interface,
+ either builtin or as a module.
+
+config CRYPTO_LIB_BLAKE2S_GENERIC
+ def_bool !CRYPTO_ARCH_HAVE_LIB_BLAKE2S
+ help
+ This symbol can be depended upon by arch implementations of the
+ Blake2s library interface that require the generic code as a
+ fallback, e.g., for SIMD implementations. If no arch specific
+ implementation is enabled, this implementation serves the users
+ of CRYPTO_LIB_BLAKE2S.
+
+config CRYPTO_ARCH_HAVE_LIB_CHACHA
+ tristate
+ help
+ Declares whether the architecture provides an arch-specific
+ accelerated implementation of the ChaCha library interface,
+ either builtin or as a module.
+
+config CRYPTO_LIB_CHACHA_GENERIC
+ tristate
+ select CRYPTO_LIB_UTILS
+ help
+ This symbol can be depended upon by arch implementations of the
+ ChaCha library interface that require the generic code as a
+ fallback, e.g., for SIMD implementations. If no arch specific
+ implementation is enabled, this implementation serves the users
+ of CRYPTO_LIB_CHACHA.
+
+config CRYPTO_LIB_CHACHA
+ tristate "ChaCha library interface"
+ depends on CRYPTO_ARCH_HAVE_LIB_CHACHA || !CRYPTO_ARCH_HAVE_LIB_CHACHA
+ select CRYPTO_LIB_CHACHA_GENERIC if CRYPTO_ARCH_HAVE_LIB_CHACHA=n
+ help
+ Enable the ChaCha library interface. This interface may be fulfilled
+ by either the generic implementation or an arch-specific one, if one
+ is available and enabled.
+
+config CRYPTO_ARCH_HAVE_LIB_CURVE25519
+ tristate
+ help
+ Declares whether the architecture provides an arch-specific
+ accelerated implementation of the Curve25519 library interface,
+ either builtin or as a module.
+
+config CRYPTO_LIB_CURVE25519_GENERIC
+ tristate
+ help
+ This symbol can be depended upon by arch implementations of the
+ Curve25519 library interface that require the generic code as a
+ fallback, e.g., for SIMD implementations. If no arch specific
+ implementation is enabled, this implementation serves the users
+ of CRYPTO_LIB_CURVE25519.
+
+config CRYPTO_LIB_CURVE25519
+ tristate "Curve25519 scalar multiplication library"
+ depends on CRYPTO_ARCH_HAVE_LIB_CURVE25519 || !CRYPTO_ARCH_HAVE_LIB_CURVE25519
+ select CRYPTO_LIB_CURVE25519_GENERIC if CRYPTO_ARCH_HAVE_LIB_CURVE25519=n
+ select CRYPTO_LIB_UTILS
+ help
+ Enable the Curve25519 library interface. This interface may be
+ fulfilled by either the generic implementation or an arch-specific
+ one, if one is available and enabled.
+
+config CRYPTO_LIB_DES
+ tristate
+
+config CRYPTO_LIB_POLY1305_RSIZE
+ int
+ default 2 if MIPS
+ default 11 if X86_64
+ default 9 if ARM || ARM64
+ default 1
+
+config CRYPTO_ARCH_HAVE_LIB_POLY1305
+ tristate
+ help
+ Declares whether the architecture provides an arch-specific
+ accelerated implementation of the Poly1305 library interface,
+ either builtin or as a module.
+
+config CRYPTO_LIB_POLY1305_GENERIC
+ tristate
+ help
+ This symbol can be depended upon by arch implementations of the
+ Poly1305 library interface that require the generic code as a
+ fallback, e.g., for SIMD implementations. If no arch specific
+ implementation is enabled, this implementation serves the users
+ of CRYPTO_LIB_POLY1305.
+
+config CRYPTO_LIB_POLY1305
+ tristate "Poly1305 library interface"
+ depends on CRYPTO_ARCH_HAVE_LIB_POLY1305 || !CRYPTO_ARCH_HAVE_LIB_POLY1305
+ select CRYPTO_LIB_POLY1305_GENERIC if CRYPTO_ARCH_HAVE_LIB_POLY1305=n
+ help
+ Enable the Poly1305 library interface. This interface may be fulfilled
+ by either the generic implementation or an arch-specific one, if one
+ is available and enabled.
+
+config CRYPTO_LIB_CHACHA20POLY1305
+ tristate "ChaCha20-Poly1305 AEAD support (8-byte nonce library version)"
+ depends on CRYPTO_ARCH_HAVE_LIB_CHACHA || !CRYPTO_ARCH_HAVE_LIB_CHACHA
+ depends on CRYPTO_ARCH_HAVE_LIB_POLY1305 || !CRYPTO_ARCH_HAVE_LIB_POLY1305
+ depends on CRYPTO
+ select CRYPTO_LIB_CHACHA
+ select CRYPTO_LIB_POLY1305
+ select CRYPTO_ALGAPI
+
+config CRYPTO_LIB_SHA1
+ tristate
+
+config CRYPTO_LIB_SHA256
+ tristate
+
+endmenu
diff --git a/lib/crypto/Makefile b/lib/crypto/Makefile
new file mode 100644
index 000000000000..8d1446c2be71
--- /dev/null
+++ b/lib/crypto/Makefile
@@ -0,0 +1,57 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_CRYPTO_LIB_UTILS) += libcryptoutils.o
+libcryptoutils-y := memneq.o utils.o
+
+# chacha is used by the /dev/random driver which is always builtin
+obj-y += chacha.o
+obj-$(CONFIG_CRYPTO_LIB_CHACHA_GENERIC) += libchacha.o
+
+obj-$(CONFIG_CRYPTO_LIB_AES) += libaes.o
+libaes-y := aes.o
+
+obj-$(CONFIG_CRYPTO_LIB_AESGCM) += libaesgcm.o
+libaesgcm-y := aesgcm.o
+
+obj-$(CONFIG_CRYPTO_LIB_ARC4) += libarc4.o
+libarc4-y := arc4.o
+
+obj-$(CONFIG_CRYPTO_LIB_GF128MUL) += gf128mul.o
+
+# blake2s is used by the /dev/random driver which is always builtin
+obj-y += libblake2s.o
+libblake2s-y := blake2s.o
+libblake2s-$(CONFIG_CRYPTO_LIB_BLAKE2S_GENERIC) += blake2s-generic.o
+
+obj-$(CONFIG_CRYPTO_LIB_CHACHA20POLY1305) += libchacha20poly1305.o
+libchacha20poly1305-y += chacha20poly1305.o
+
+obj-$(CONFIG_CRYPTO_LIB_CURVE25519_GENERIC) += libcurve25519-generic.o
+libcurve25519-generic-y := curve25519-fiat32.o
+libcurve25519-generic-$(CONFIG_ARCH_SUPPORTS_INT128) := curve25519-hacl64.o
+libcurve25519-generic-y += curve25519-generic.o
+
+obj-$(CONFIG_CRYPTO_LIB_CURVE25519) += libcurve25519.o
+libcurve25519-y += curve25519.o
+
+obj-$(CONFIG_CRYPTO_LIB_DES) += libdes.o
+libdes-y := des.o
+
+obj-$(CONFIG_CRYPTO_LIB_POLY1305_GENERIC) += libpoly1305.o
+libpoly1305-y := poly1305-donna32.o
+libpoly1305-$(CONFIG_ARCH_SUPPORTS_INT128) := poly1305-donna64.o
+libpoly1305-y += poly1305.o
+
+obj-$(CONFIG_CRYPTO_LIB_SHA1) += libsha1.o
+libsha1-y := sha1.o
+
+obj-$(CONFIG_CRYPTO_LIB_SHA256) += libsha256.o
+libsha256-y := sha256.o
+
+ifneq ($(CONFIG_CRYPTO_MANAGER_DISABLE_TESTS),y)
+libblake2s-y += blake2s-selftest.o
+libchacha20poly1305-y += chacha20poly1305-selftest.o
+libcurve25519-y += curve25519-selftest.o
+endif
+
+obj-$(CONFIG_MPILIB) += mpi/
diff --git a/lib/crypto/aes.c b/lib/crypto/aes.c
new file mode 100644
index 000000000000..827fe89922ff
--- /dev/null
+++ b/lib/crypto/aes.c
@@ -0,0 +1,356 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2017-2019 Linaro Ltd <ard.biesheuvel@linaro.org>
+ */
+
+#include <crypto/aes.h>
+#include <linux/crypto.h>
+#include <linux/module.h>
+#include <asm/unaligned.h>
+
+/*
+ * Emit the sbox as volatile const to prevent the compiler from doing
+ * constant folding on sbox references involving fixed indexes.
+ */
+static volatile const u8 __cacheline_aligned aes_sbox[] = {
+ 0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5,
+ 0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76,
+ 0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0,
+ 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0,
+ 0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc,
+ 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15,
+ 0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a,
+ 0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75,
+ 0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0,
+ 0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84,
+ 0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b,
+ 0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf,
+ 0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85,
+ 0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8,
+ 0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5,
+ 0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2,
+ 0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17,
+ 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73,
+ 0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88,
+ 0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb,
+ 0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c,
+ 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79,
+ 0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9,
+ 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08,
+ 0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6,
+ 0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a,
+ 0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e,
+ 0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e,
+ 0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94,
+ 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf,
+ 0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68,
+ 0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16,
+};
+
+static volatile const u8 __cacheline_aligned aes_inv_sbox[] = {
+ 0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38,
+ 0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb,
+ 0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87,
+ 0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb,
+ 0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d,
+ 0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e,
+ 0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2,
+ 0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25,
+ 0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16,
+ 0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92,
+ 0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda,
+ 0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84,
+ 0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a,
+ 0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06,
+ 0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02,
+ 0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b,
+ 0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea,
+ 0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73,
+ 0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85,
+ 0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e,
+ 0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89,
+ 0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b,
+ 0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20,
+ 0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4,
+ 0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31,
+ 0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f,
+ 0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d,
+ 0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef,
+ 0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0,
+ 0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61,
+ 0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26,
+ 0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d,
+};
+
+extern const u8 crypto_aes_sbox[256] __alias(aes_sbox);
+extern const u8 crypto_aes_inv_sbox[256] __alias(aes_inv_sbox);
+
+EXPORT_SYMBOL(crypto_aes_sbox);
+EXPORT_SYMBOL(crypto_aes_inv_sbox);
+
+static u32 mul_by_x(u32 w)
+{
+ u32 x = w & 0x7f7f7f7f;
+ u32 y = w & 0x80808080;
+
+ /* multiply by polynomial 'x' (0b10) in GF(2^8) */
+ return (x << 1) ^ (y >> 7) * 0x1b;
+}
+
+static u32 mul_by_x2(u32 w)
+{
+ u32 x = w & 0x3f3f3f3f;
+ u32 y = w & 0x80808080;
+ u32 z = w & 0x40404040;
+
+ /* multiply by polynomial 'x^2' (0b100) in GF(2^8) */
+ return (x << 2) ^ (y >> 7) * 0x36 ^ (z >> 6) * 0x1b;
+}
+
+static u32 mix_columns(u32 x)
+{
+ /*
+ * Perform the following matrix multiplication in GF(2^8)
+ *
+ * | 0x2 0x3 0x1 0x1 | | x[0] |
+ * | 0x1 0x2 0x3 0x1 | | x[1] |
+ * | 0x1 0x1 0x2 0x3 | x | x[2] |
+ * | 0x3 0x1 0x1 0x2 | | x[3] |
+ */
+ u32 y = mul_by_x(x) ^ ror32(x, 16);
+
+ return y ^ ror32(x ^ y, 8);
+}
+
+static u32 inv_mix_columns(u32 x)
+{
+ /*
+ * Perform the following matrix multiplication in GF(2^8)
+ *
+ * | 0xe 0xb 0xd 0x9 | | x[0] |
+ * | 0x9 0xe 0xb 0xd | | x[1] |
+ * | 0xd 0x9 0xe 0xb | x | x[2] |
+ * | 0xb 0xd 0x9 0xe | | x[3] |
+ *
+ * which can conveniently be reduced to
+ *
+ * | 0x2 0x3 0x1 0x1 | | 0x5 0x0 0x4 0x0 | | x[0] |
+ * | 0x1 0x2 0x3 0x1 | | 0x0 0x5 0x0 0x4 | | x[1] |
+ * | 0x1 0x1 0x2 0x3 | x | 0x4 0x0 0x5 0x0 | x | x[2] |
+ * | 0x3 0x1 0x1 0x2 | | 0x0 0x4 0x0 0x5 | | x[3] |
+ */
+ u32 y = mul_by_x2(x);
+
+ return mix_columns(x ^ y ^ ror32(y, 16));
+}
+
+static __always_inline u32 subshift(u32 in[], int pos)
+{
+ return (aes_sbox[in[pos] & 0xff]) ^
+ (aes_sbox[(in[(pos + 1) % 4] >> 8) & 0xff] << 8) ^
+ (aes_sbox[(in[(pos + 2) % 4] >> 16) & 0xff] << 16) ^
+ (aes_sbox[(in[(pos + 3) % 4] >> 24) & 0xff] << 24);
+}
+
+static __always_inline u32 inv_subshift(u32 in[], int pos)
+{
+ return (aes_inv_sbox[in[pos] & 0xff]) ^
+ (aes_inv_sbox[(in[(pos + 3) % 4] >> 8) & 0xff] << 8) ^
+ (aes_inv_sbox[(in[(pos + 2) % 4] >> 16) & 0xff] << 16) ^
+ (aes_inv_sbox[(in[(pos + 1) % 4] >> 24) & 0xff] << 24);
+}
+
+static u32 subw(u32 in)
+{
+ return (aes_sbox[in & 0xff]) ^
+ (aes_sbox[(in >> 8) & 0xff] << 8) ^
+ (aes_sbox[(in >> 16) & 0xff] << 16) ^
+ (aes_sbox[(in >> 24) & 0xff] << 24);
+}
+
+/**
+ * aes_expandkey - Expands the AES key as described in FIPS-197
+ * @ctx: The location where the computed key will be stored.
+ * @in_key: The supplied key.
+ * @key_len: The length of the supplied key.
+ *
+ * Returns 0 on success. The function fails only if an invalid key size (or
+ * pointer) is supplied.
+ * The expanded key size is 240 bytes (max of 14 rounds with a unique 16 bytes
+ * key schedule plus a 16 bytes key which is used before the first round).
+ * The decryption key is prepared for the "Equivalent Inverse Cipher" as
+ * described in FIPS-197. The first slot (16 bytes) of each key (enc or dec) is
+ * for the initial combination, the second slot for the first round and so on.
+ */
+int aes_expandkey(struct crypto_aes_ctx *ctx, const u8 *in_key,
+ unsigned int key_len)
+{
+ u32 kwords = key_len / sizeof(u32);
+ u32 rc, i, j;
+ int err;
+
+ err = aes_check_keylen(key_len);
+ if (err)
+ return err;
+
+ ctx->key_length = key_len;
+
+ for (i = 0; i < kwords; i++)
+ ctx->key_enc[i] = get_unaligned_le32(in_key + i * sizeof(u32));
+
+ for (i = 0, rc = 1; i < 10; i++, rc = mul_by_x(rc)) {
+ u32 *rki = ctx->key_enc + (i * kwords);
+ u32 *rko = rki + kwords;
+
+ rko[0] = ror32(subw(rki[kwords - 1]), 8) ^ rc ^ rki[0];
+ rko[1] = rko[0] ^ rki[1];
+ rko[2] = rko[1] ^ rki[2];
+ rko[3] = rko[2] ^ rki[3];
+
+ if (key_len == AES_KEYSIZE_192) {
+ if (i >= 7)
+ break;
+ rko[4] = rko[3] ^ rki[4];
+ rko[5] = rko[4] ^ rki[5];
+ } else if (key_len == AES_KEYSIZE_256) {
+ if (i >= 6)
+ break;
+ rko[4] = subw(rko[3]) ^ rki[4];
+ rko[5] = rko[4] ^ rki[5];
+ rko[6] = rko[5] ^ rki[6];
+ rko[7] = rko[6] ^ rki[7];
+ }
+ }
+
+ /*
+ * Generate the decryption keys for the Equivalent Inverse Cipher.
+ * This involves reversing the order of the round keys, and applying
+ * the Inverse Mix Columns transformation to all but the first and
+ * the last one.
+ */
+ ctx->key_dec[0] = ctx->key_enc[key_len + 24];
+ ctx->key_dec[1] = ctx->key_enc[key_len + 25];
+ ctx->key_dec[2] = ctx->key_enc[key_len + 26];
+ ctx->key_dec[3] = ctx->key_enc[key_len + 27];
+
+ for (i = 4, j = key_len + 20; j > 0; i += 4, j -= 4) {
+ ctx->key_dec[i] = inv_mix_columns(ctx->key_enc[j]);
+ ctx->key_dec[i + 1] = inv_mix_columns(ctx->key_enc[j + 1]);
+ ctx->key_dec[i + 2] = inv_mix_columns(ctx->key_enc[j + 2]);
+ ctx->key_dec[i + 3] = inv_mix_columns(ctx->key_enc[j + 3]);
+ }
+
+ ctx->key_dec[i] = ctx->key_enc[0];
+ ctx->key_dec[i + 1] = ctx->key_enc[1];
+ ctx->key_dec[i + 2] = ctx->key_enc[2];
+ ctx->key_dec[i + 3] = ctx->key_enc[3];
+
+ return 0;
+}
+EXPORT_SYMBOL(aes_expandkey);
+
+/**
+ * aes_encrypt - Encrypt a single AES block
+ * @ctx: Context struct containing the key schedule
+ * @out: Buffer to store the ciphertext
+ * @in: Buffer containing the plaintext
+ */
+void aes_encrypt(const struct crypto_aes_ctx *ctx, u8 *out, const u8 *in)
+{
+ const u32 *rkp = ctx->key_enc + 4;
+ int rounds = 6 + ctx->key_length / 4;
+ u32 st0[4], st1[4];
+ int round;
+
+ st0[0] = ctx->key_enc[0] ^ get_unaligned_le32(in);
+ st0[1] = ctx->key_enc[1] ^ get_unaligned_le32(in + 4);
+ st0[2] = ctx->key_enc[2] ^ get_unaligned_le32(in + 8);
+ st0[3] = ctx->key_enc[3] ^ get_unaligned_le32(in + 12);
+
+ /*
+ * Force the compiler to emit data independent Sbox references,
+ * by xoring the input with Sbox values that are known to add up
+ * to zero. This pulls the entire Sbox into the D-cache before any
+ * data dependent lookups are done.
+ */
+ st0[0] ^= aes_sbox[ 0] ^ aes_sbox[ 64] ^ aes_sbox[134] ^ aes_sbox[195];
+ st0[1] ^= aes_sbox[16] ^ aes_sbox[ 82] ^ aes_sbox[158] ^ aes_sbox[221];
+ st0[2] ^= aes_sbox[32] ^ aes_sbox[ 96] ^ aes_sbox[160] ^ aes_sbox[234];
+ st0[3] ^= aes_sbox[48] ^ aes_sbox[112] ^ aes_sbox[186] ^ aes_sbox[241];
+
+ for (round = 0;; round += 2, rkp += 8) {
+ st1[0] = mix_columns(subshift(st0, 0)) ^ rkp[0];
+ st1[1] = mix_columns(subshift(st0, 1)) ^ rkp[1];
+ st1[2] = mix_columns(subshift(st0, 2)) ^ rkp[2];
+ st1[3] = mix_columns(subshift(st0, 3)) ^ rkp[3];
+
+ if (round == rounds - 2)
+ break;
+
+ st0[0] = mix_columns(subshift(st1, 0)) ^ rkp[4];
+ st0[1] = mix_columns(subshift(st1, 1)) ^ rkp[5];
+ st0[2] = mix_columns(subshift(st1, 2)) ^ rkp[6];
+ st0[3] = mix_columns(subshift(st1, 3)) ^ rkp[7];
+ }
+
+ put_unaligned_le32(subshift(st1, 0) ^ rkp[4], out);
+ put_unaligned_le32(subshift(st1, 1) ^ rkp[5], out + 4);
+ put_unaligned_le32(subshift(st1, 2) ^ rkp[6], out + 8);
+ put_unaligned_le32(subshift(st1, 3) ^ rkp[7], out + 12);
+}
+EXPORT_SYMBOL(aes_encrypt);
+
+/**
+ * aes_decrypt - Decrypt a single AES block
+ * @ctx: Context struct containing the key schedule
+ * @out: Buffer to store the plaintext
+ * @in: Buffer containing the ciphertext
+ */
+void aes_decrypt(const struct crypto_aes_ctx *ctx, u8 *out, const u8 *in)
+{
+ const u32 *rkp = ctx->key_dec + 4;
+ int rounds = 6 + ctx->key_length / 4;
+ u32 st0[4], st1[4];
+ int round;
+
+ st0[0] = ctx->key_dec[0] ^ get_unaligned_le32(in);
+ st0[1] = ctx->key_dec[1] ^ get_unaligned_le32(in + 4);
+ st0[2] = ctx->key_dec[2] ^ get_unaligned_le32(in + 8);
+ st0[3] = ctx->key_dec[3] ^ get_unaligned_le32(in + 12);
+
+ /*
+ * Force the compiler to emit data independent Sbox references,
+ * by xoring the input with Sbox values that are known to add up
+ * to zero. This pulls the entire Sbox into the D-cache before any
+ * data dependent lookups are done.
+ */
+ st0[0] ^= aes_inv_sbox[ 0] ^ aes_inv_sbox[ 64] ^ aes_inv_sbox[129] ^ aes_inv_sbox[200];
+ st0[1] ^= aes_inv_sbox[16] ^ aes_inv_sbox[ 83] ^ aes_inv_sbox[150] ^ aes_inv_sbox[212];
+ st0[2] ^= aes_inv_sbox[32] ^ aes_inv_sbox[ 96] ^ aes_inv_sbox[160] ^ aes_inv_sbox[236];
+ st0[3] ^= aes_inv_sbox[48] ^ aes_inv_sbox[112] ^ aes_inv_sbox[187] ^ aes_inv_sbox[247];
+
+ for (round = 0;; round += 2, rkp += 8) {
+ st1[0] = inv_mix_columns(inv_subshift(st0, 0)) ^ rkp[0];
+ st1[1] = inv_mix_columns(inv_subshift(st0, 1)) ^ rkp[1];
+ st1[2] = inv_mix_columns(inv_subshift(st0, 2)) ^ rkp[2];
+ st1[3] = inv_mix_columns(inv_subshift(st0, 3)) ^ rkp[3];
+
+ if (round == rounds - 2)
+ break;
+
+ st0[0] = inv_mix_columns(inv_subshift(st1, 0)) ^ rkp[4];
+ st0[1] = inv_mix_columns(inv_subshift(st1, 1)) ^ rkp[5];
+ st0[2] = inv_mix_columns(inv_subshift(st1, 2)) ^ rkp[6];
+ st0[3] = inv_mix_columns(inv_subshift(st1, 3)) ^ rkp[7];
+ }
+
+ put_unaligned_le32(inv_subshift(st1, 0) ^ rkp[4], out);
+ put_unaligned_le32(inv_subshift(st1, 1) ^ rkp[5], out + 4);
+ put_unaligned_le32(inv_subshift(st1, 2) ^ rkp[6], out + 8);
+ put_unaligned_le32(inv_subshift(st1, 3) ^ rkp[7], out + 12);
+}
+EXPORT_SYMBOL(aes_decrypt);
+
+MODULE_DESCRIPTION("Generic AES library");
+MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
+MODULE_LICENSE("GPL v2");
diff --git a/lib/crypto/aesgcm.c b/lib/crypto/aesgcm.c
new file mode 100644
index 000000000000..6bba6473fdf3
--- /dev/null
+++ b/lib/crypto/aesgcm.c
@@ -0,0 +1,740 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Minimal library implementation of GCM
+ *
+ * Copyright 2022 Google LLC
+ */
+
+#include <linux/module.h>
+
+#include <crypto/algapi.h>
+#include <crypto/gcm.h>
+#include <crypto/ghash.h>
+
+#include <asm/irqflags.h>
+
+static void aesgcm_encrypt_block(const struct crypto_aes_ctx *ctx, void *dst,
+ const void *src)
+{
+ unsigned long flags;
+
+ /*
+ * In AES-GCM, both the GHASH key derivation and the CTR mode
+ * encryption operate on known plaintext, making them susceptible to
+ * timing attacks on the encryption key. The AES library already
+ * mitigates this risk to some extent by pulling the entire S-box into
+ * the caches before doing any substitutions, but this strategy is more
+ * effective when running with interrupts disabled.
+ */
+ local_irq_save(flags);
+ aes_encrypt(ctx, dst, src);
+ local_irq_restore(flags);
+}
+
+/**
+ * aesgcm_expandkey - Expands the AES and GHASH keys for the AES-GCM key
+ * schedule
+ *
+ * @ctx: The data structure that will hold the AES-GCM key schedule
+ * @key: The AES encryption input key
+ * @keysize: The length in bytes of the input key
+ * @authsize: The size in bytes of the GCM authentication tag
+ *
+ * Returns: 0 on success, or -EINVAL if @keysize or @authsize contain values
+ * that are not permitted by the GCM specification.
+ */
+int aesgcm_expandkey(struct aesgcm_ctx *ctx, const u8 *key,
+ unsigned int keysize, unsigned int authsize)
+{
+ u8 kin[AES_BLOCK_SIZE] = {};
+ int ret;
+
+ ret = crypto_gcm_check_authsize(authsize) ?:
+ aes_expandkey(&ctx->aes_ctx, key, keysize);
+ if (ret)
+ return ret;
+
+ ctx->authsize = authsize;
+ aesgcm_encrypt_block(&ctx->aes_ctx, &ctx->ghash_key, kin);
+
+ return 0;
+}
+EXPORT_SYMBOL(aesgcm_expandkey);
+
+static void aesgcm_ghash(be128 *ghash, const be128 *key, const void *src,
+ int len)
+{
+ while (len > 0) {
+ crypto_xor((u8 *)ghash, src, min(len, GHASH_BLOCK_SIZE));
+ gf128mul_lle(ghash, key);
+
+ src += GHASH_BLOCK_SIZE;
+ len -= GHASH_BLOCK_SIZE;
+ }
+}
+
+/**
+ * aesgcm_mac - Generates the authentication tag using AES-GCM algorithm.
+ * @ctx: The data structure that will hold the AES-GCM key schedule
+ * @src: The input source data.
+ * @src_len: Length of the source data.
+ * @assoc: Points to the associated data.
+ * @assoc_len: Length of the associated data values.
+ * @ctr: Points to the counter value.
+ * @authtag: The output buffer for the authentication tag.
+ *
+ * It takes in the AES-GCM context, source data, associated data, counter value,
+ * and an output buffer for the authentication tag.
+ */
+static void aesgcm_mac(const struct aesgcm_ctx *ctx, const u8 *src, int src_len,
+ const u8 *assoc, int assoc_len, __be32 *ctr, u8 *authtag)
+{
+ be128 tail = { cpu_to_be64(assoc_len * 8), cpu_to_be64(src_len * 8) };
+ u8 buf[AES_BLOCK_SIZE];
+ be128 ghash = {};
+
+ aesgcm_ghash(&ghash, &ctx->ghash_key, assoc, assoc_len);
+ aesgcm_ghash(&ghash, &ctx->ghash_key, src, src_len);
+ aesgcm_ghash(&ghash, &ctx->ghash_key, &tail, sizeof(tail));
+
+ ctr[3] = cpu_to_be32(1);
+ aesgcm_encrypt_block(&ctx->aes_ctx, buf, ctr);
+ crypto_xor_cpy(authtag, buf, (u8 *)&ghash, ctx->authsize);
+
+ memzero_explicit(&ghash, sizeof(ghash));
+ memzero_explicit(buf, sizeof(buf));
+}
+
+static void aesgcm_crypt(const struct aesgcm_ctx *ctx, u8 *dst, const u8 *src,
+ int len, __be32 *ctr)
+{
+ u8 buf[AES_BLOCK_SIZE];
+ unsigned int n = 2;
+
+ while (len > 0) {
+ /*
+ * The counter increment below must not result in overflow or
+ * carry into the next 32-bit word, as this could result in
+ * inadvertent IV reuse, which must be avoided at all cost for
+ * stream ciphers such as AES-CTR. Given the range of 'int
+ * len', this cannot happen, so no explicit test is necessary.
+ */
+ ctr[3] = cpu_to_be32(n++);
+ aesgcm_encrypt_block(&ctx->aes_ctx, buf, ctr);
+ crypto_xor_cpy(dst, src, buf, min(len, AES_BLOCK_SIZE));
+
+ dst += AES_BLOCK_SIZE;
+ src += AES_BLOCK_SIZE;
+ len -= AES_BLOCK_SIZE;
+ }
+ memzero_explicit(buf, sizeof(buf));
+}
+
+/**
+ * aesgcm_encrypt - Perform AES-GCM encryption on a block of data
+ *
+ * @ctx: The AES-GCM key schedule
+ * @dst: Pointer to the ciphertext output buffer
+ * @src: Pointer the plaintext (may equal @dst for encryption in place)
+ * @crypt_len: The size in bytes of the plaintext and ciphertext.
+ * @assoc: Pointer to the associated data,
+ * @assoc_len: The size in bytes of the associated data
+ * @iv: The initialization vector (IV) to use for this block of data
+ * (must be 12 bytes in size as per the GCM spec recommendation)
+ * @authtag: The address of the buffer in memory where the authentication
+ * tag should be stored. The buffer is assumed to have space for
+ * @ctx->authsize bytes.
+ */
+void aesgcm_encrypt(const struct aesgcm_ctx *ctx, u8 *dst, const u8 *src,
+ int crypt_len, const u8 *assoc, int assoc_len,
+ const u8 iv[GCM_AES_IV_SIZE], u8 *authtag)
+{
+ __be32 ctr[4];
+
+ memcpy(ctr, iv, GCM_AES_IV_SIZE);
+
+ aesgcm_crypt(ctx, dst, src, crypt_len, ctr);
+ aesgcm_mac(ctx, dst, crypt_len, assoc, assoc_len, ctr, authtag);
+}
+EXPORT_SYMBOL(aesgcm_encrypt);
+
+/**
+ * aesgcm_decrypt - Perform AES-GCM decryption on a block of data
+ *
+ * @ctx: The AES-GCM key schedule
+ * @dst: Pointer to the plaintext output buffer
+ * @src: Pointer the ciphertext (may equal @dst for decryption in place)
+ * @crypt_len: The size in bytes of the plaintext and ciphertext.
+ * @assoc: Pointer to the associated data,
+ * @assoc_len: The size in bytes of the associated data
+ * @iv: The initialization vector (IV) to use for this block of data
+ * (must be 12 bytes in size as per the GCM spec recommendation)
+ * @authtag: The address of the buffer in memory where the authentication
+ * tag is stored.
+ *
+ * Returns: true on success, or false if the ciphertext failed authentication.
+ * On failure, no plaintext will be returned.
+ */
+bool __must_check aesgcm_decrypt(const struct aesgcm_ctx *ctx, u8 *dst,
+ const u8 *src, int crypt_len, const u8 *assoc,
+ int assoc_len, const u8 iv[GCM_AES_IV_SIZE],
+ const u8 *authtag)
+{
+ u8 tagbuf[AES_BLOCK_SIZE];
+ __be32 ctr[4];
+
+ memcpy(ctr, iv, GCM_AES_IV_SIZE);
+
+ aesgcm_mac(ctx, src, crypt_len, assoc, assoc_len, ctr, tagbuf);
+ if (crypto_memneq(authtag, tagbuf, ctx->authsize)) {
+ memzero_explicit(tagbuf, sizeof(tagbuf));
+ return false;
+ }
+ aesgcm_crypt(ctx, dst, src, crypt_len, ctr);
+ return true;
+}
+EXPORT_SYMBOL(aesgcm_decrypt);
+
+MODULE_DESCRIPTION("Generic AES-GCM library");
+MODULE_AUTHOR("Ard Biesheuvel <ardb@kernel.org>");
+MODULE_LICENSE("GPL");
+
+#ifndef CONFIG_CRYPTO_MANAGER_DISABLE_TESTS
+
+/*
+ * Test code below. Vectors taken from crypto/testmgr.h
+ */
+
+static const u8 __initconst ctext0[16] =
+ "\x58\xe2\xfc\xce\xfa\x7e\x30\x61"
+ "\x36\x7f\x1d\x57\xa4\xe7\x45\x5a";
+
+static const u8 __initconst ptext1[16];
+
+static const u8 __initconst ctext1[32] =
+ "\x03\x88\xda\xce\x60\xb6\xa3\x92"
+ "\xf3\x28\xc2\xb9\x71\xb2\xfe\x78"
+ "\xab\x6e\x47\xd4\x2c\xec\x13\xbd"
+ "\xf5\x3a\x67\xb2\x12\x57\xbd\xdf";
+
+static const u8 __initconst ptext2[64] =
+ "\xd9\x31\x32\x25\xf8\x84\x06\xe5"
+ "\xa5\x59\x09\xc5\xaf\xf5\x26\x9a"
+ "\x86\xa7\xa9\x53\x15\x34\xf7\xda"
+ "\x2e\x4c\x30\x3d\x8a\x31\x8a\x72"
+ "\x1c\x3c\x0c\x95\x95\x68\x09\x53"
+ "\x2f\xcf\x0e\x24\x49\xa6\xb5\x25"
+ "\xb1\x6a\xed\xf5\xaa\x0d\xe6\x57"
+ "\xba\x63\x7b\x39\x1a\xaf\xd2\x55";
+
+static const u8 __initconst ctext2[80] =
+ "\x42\x83\x1e\xc2\x21\x77\x74\x24"
+ "\x4b\x72\x21\xb7\x84\xd0\xd4\x9c"
+ "\xe3\xaa\x21\x2f\x2c\x02\xa4\xe0"
+ "\x35\xc1\x7e\x23\x29\xac\xa1\x2e"
+ "\x21\xd5\x14\xb2\x54\x66\x93\x1c"
+ "\x7d\x8f\x6a\x5a\xac\x84\xaa\x05"
+ "\x1b\xa3\x0b\x39\x6a\x0a\xac\x97"
+ "\x3d\x58\xe0\x91\x47\x3f\x59\x85"
+ "\x4d\x5c\x2a\xf3\x27\xcd\x64\xa6"
+ "\x2c\xf3\x5a\xbd\x2b\xa6\xfa\xb4";
+
+static const u8 __initconst ptext3[60] =
+ "\xd9\x31\x32\x25\xf8\x84\x06\xe5"
+ "\xa5\x59\x09\xc5\xaf\xf5\x26\x9a"
+ "\x86\xa7\xa9\x53\x15\x34\xf7\xda"
+ "\x2e\x4c\x30\x3d\x8a\x31\x8a\x72"
+ "\x1c\x3c\x0c\x95\x95\x68\x09\x53"
+ "\x2f\xcf\x0e\x24\x49\xa6\xb5\x25"
+ "\xb1\x6a\xed\xf5\xaa\x0d\xe6\x57"
+ "\xba\x63\x7b\x39";
+
+static const u8 __initconst ctext3[76] =
+ "\x42\x83\x1e\xc2\x21\x77\x74\x24"
+ "\x4b\x72\x21\xb7\x84\xd0\xd4\x9c"
+ "\xe3\xaa\x21\x2f\x2c\x02\xa4\xe0"
+ "\x35\xc1\x7e\x23\x29\xac\xa1\x2e"
+ "\x21\xd5\x14\xb2\x54\x66\x93\x1c"
+ "\x7d\x8f\x6a\x5a\xac\x84\xaa\x05"
+ "\x1b\xa3\x0b\x39\x6a\x0a\xac\x97"
+ "\x3d\x58\xe0\x91"
+ "\x5b\xc9\x4f\xbc\x32\x21\xa5\xdb"
+ "\x94\xfa\xe9\x5a\xe7\x12\x1a\x47";
+
+static const u8 __initconst ctext4[16] =
+ "\xcd\x33\xb2\x8a\xc7\x73\xf7\x4b"
+ "\xa0\x0e\xd1\xf3\x12\x57\x24\x35";
+
+static const u8 __initconst ctext5[32] =
+ "\x98\xe7\x24\x7c\x07\xf0\xfe\x41"
+ "\x1c\x26\x7e\x43\x84\xb0\xf6\x00"
+ "\x2f\xf5\x8d\x80\x03\x39\x27\xab"
+ "\x8e\xf4\xd4\x58\x75\x14\xf0\xfb";
+
+static const u8 __initconst ptext6[64] =
+ "\xd9\x31\x32\x25\xf8\x84\x06\xe5"
+ "\xa5\x59\x09\xc5\xaf\xf5\x26\x9a"
+ "\x86\xa7\xa9\x53\x15\x34\xf7\xda"
+ "\x2e\x4c\x30\x3d\x8a\x31\x8a\x72"
+ "\x1c\x3c\x0c\x95\x95\x68\x09\x53"
+ "\x2f\xcf\x0e\x24\x49\xa6\xb5\x25"
+ "\xb1\x6a\xed\xf5\xaa\x0d\xe6\x57"
+ "\xba\x63\x7b\x39\x1a\xaf\xd2\x55";
+
+static const u8 __initconst ctext6[80] =
+ "\x39\x80\xca\x0b\x3c\x00\xe8\x41"
+ "\xeb\x06\xfa\xc4\x87\x2a\x27\x57"
+ "\x85\x9e\x1c\xea\xa6\xef\xd9\x84"
+ "\x62\x85\x93\xb4\x0c\xa1\xe1\x9c"
+ "\x7d\x77\x3d\x00\xc1\x44\xc5\x25"
+ "\xac\x61\x9d\x18\xc8\x4a\x3f\x47"
+ "\x18\xe2\x44\x8b\x2f\xe3\x24\xd9"
+ "\xcc\xda\x27\x10\xac\xad\xe2\x56"
+ "\x99\x24\xa7\xc8\x58\x73\x36\xbf"
+ "\xb1\x18\x02\x4d\xb8\x67\x4a\x14";
+
+static const u8 __initconst ctext7[16] =
+ "\x53\x0f\x8a\xfb\xc7\x45\x36\xb9"
+ "\xa9\x63\xb4\xf1\xc4\xcb\x73\x8b";
+
+static const u8 __initconst ctext8[32] =
+ "\xce\xa7\x40\x3d\x4d\x60\x6b\x6e"
+ "\x07\x4e\xc5\xd3\xba\xf3\x9d\x18"
+ "\xd0\xd1\xc8\xa7\x99\x99\x6b\xf0"
+ "\x26\x5b\x98\xb5\xd4\x8a\xb9\x19";
+
+static const u8 __initconst ptext9[64] =
+ "\xd9\x31\x32\x25\xf8\x84\x06\xe5"
+ "\xa5\x59\x09\xc5\xaf\xf5\x26\x9a"
+ "\x86\xa7\xa9\x53\x15\x34\xf7\xda"
+ "\x2e\x4c\x30\x3d\x8a\x31\x8a\x72"
+ "\x1c\x3c\x0c\x95\x95\x68\x09\x53"
+ "\x2f\xcf\x0e\x24\x49\xa6\xb5\x25"
+ "\xb1\x6a\xed\xf5\xaa\x0d\xe6\x57"
+ "\xba\x63\x7b\x39\x1a\xaf\xd2\x55";
+
+static const u8 __initconst ctext9[80] =
+ "\x52\x2d\xc1\xf0\x99\x56\x7d\x07"
+ "\xf4\x7f\x37\xa3\x2a\x84\x42\x7d"
+ "\x64\x3a\x8c\xdc\xbf\xe5\xc0\xc9"
+ "\x75\x98\xa2\xbd\x25\x55\xd1\xaa"
+ "\x8c\xb0\x8e\x48\x59\x0d\xbb\x3d"
+ "\xa7\xb0\x8b\x10\x56\x82\x88\x38"
+ "\xc5\xf6\x1e\x63\x93\xba\x7a\x0a"
+ "\xbc\xc9\xf6\x62\x89\x80\x15\xad"
+ "\xb0\x94\xda\xc5\xd9\x34\x71\xbd"
+ "\xec\x1a\x50\x22\x70\xe3\xcc\x6c";
+
+static const u8 __initconst ptext10[60] =
+ "\xd9\x31\x32\x25\xf8\x84\x06\xe5"
+ "\xa5\x59\x09\xc5\xaf\xf5\x26\x9a"
+ "\x86\xa7\xa9\x53\x15\x34\xf7\xda"
+ "\x2e\x4c\x30\x3d\x8a\x31\x8a\x72"
+ "\x1c\x3c\x0c\x95\x95\x68\x09\x53"
+ "\x2f\xcf\x0e\x24\x49\xa6\xb5\x25"
+ "\xb1\x6a\xed\xf5\xaa\x0d\xe6\x57"
+ "\xba\x63\x7b\x39";
+
+static const u8 __initconst ctext10[76] =
+ "\x52\x2d\xc1\xf0\x99\x56\x7d\x07"
+ "\xf4\x7f\x37\xa3\x2a\x84\x42\x7d"
+ "\x64\x3a\x8c\xdc\xbf\xe5\xc0\xc9"
+ "\x75\x98\xa2\xbd\x25\x55\xd1\xaa"
+ "\x8c\xb0\x8e\x48\x59\x0d\xbb\x3d"
+ "\xa7\xb0\x8b\x10\x56\x82\x88\x38"
+ "\xc5\xf6\x1e\x63\x93\xba\x7a\x0a"
+ "\xbc\xc9\xf6\x62"
+ "\x76\xfc\x6e\xce\x0f\x4e\x17\x68"
+ "\xcd\xdf\x88\x53\xbb\x2d\x55\x1b";
+
+static const u8 __initconst ptext11[60] =
+ "\xd9\x31\x32\x25\xf8\x84\x06\xe5"
+ "\xa5\x59\x09\xc5\xaf\xf5\x26\x9a"
+ "\x86\xa7\xa9\x53\x15\x34\xf7\xda"
+ "\x2e\x4c\x30\x3d\x8a\x31\x8a\x72"
+ "\x1c\x3c\x0c\x95\x95\x68\x09\x53"
+ "\x2f\xcf\x0e\x24\x49\xa6\xb5\x25"
+ "\xb1\x6a\xed\xf5\xaa\x0d\xe6\x57"
+ "\xba\x63\x7b\x39";
+
+static const u8 __initconst ctext11[76] =
+ "\x39\x80\xca\x0b\x3c\x00\xe8\x41"
+ "\xeb\x06\xfa\xc4\x87\x2a\x27\x57"
+ "\x85\x9e\x1c\xea\xa6\xef\xd9\x84"
+ "\x62\x85\x93\xb4\x0c\xa1\xe1\x9c"
+ "\x7d\x77\x3d\x00\xc1\x44\xc5\x25"
+ "\xac\x61\x9d\x18\xc8\x4a\x3f\x47"
+ "\x18\xe2\x44\x8b\x2f\xe3\x24\xd9"
+ "\xcc\xda\x27\x10"
+ "\x25\x19\x49\x8e\x80\xf1\x47\x8f"
+ "\x37\xba\x55\xbd\x6d\x27\x61\x8c";
+
+static const u8 __initconst ptext12[719] =
+ "\x42\xc1\xcc\x08\x48\x6f\x41\x3f"
+ "\x2f\x11\x66\x8b\x2a\x16\xf0\xe0"
+ "\x58\x83\xf0\xc3\x70\x14\xc0\x5b"
+ "\x3f\xec\x1d\x25\x3c\x51\xd2\x03"
+ "\xcf\x59\x74\x1f\xb2\x85\xb4\x07"
+ "\xc6\x6a\x63\x39\x8a\x5b\xde\xcb"
+ "\xaf\x08\x44\xbd\x6f\x91\x15\xe1"
+ "\xf5\x7a\x6e\x18\xbd\xdd\x61\x50"
+ "\x59\xa9\x97\xab\xbb\x0e\x74\x5c"
+ "\x00\xa4\x43\x54\x04\x54\x9b\x3b"
+ "\x77\xec\xfd\x5c\xa6\xe8\x7b\x08"
+ "\xae\xe6\x10\x3f\x32\x65\xd1\xfc"
+ "\xa4\x1d\x2c\x31\xfb\x33\x7a\xb3"
+ "\x35\x23\xf4\x20\x41\xd4\xad\x82"
+ "\x8b\xa4\xad\x96\x1c\x20\x53\xbe"
+ "\x0e\xa6\xf4\xdc\x78\x49\x3e\x72"
+ "\xb1\xa9\xb5\x83\xcb\x08\x54\xb7"
+ "\xad\x49\x3a\xae\x98\xce\xa6\x66"
+ "\x10\x30\x90\x8c\x55\x83\xd7\x7c"
+ "\x8b\xe6\x53\xde\xd2\x6e\x18\x21"
+ "\x01\x52\xd1\x9f\x9d\xbb\x9c\x73"
+ "\x57\xcc\x89\x09\x75\x9b\x78\x70"
+ "\xed\x26\x97\x4d\xb4\xe4\x0c\xa5"
+ "\xfa\x70\x04\x70\xc6\x96\x1c\x7d"
+ "\x54\x41\x77\xa8\xe3\xb0\x7e\x96"
+ "\x82\xd9\xec\xa2\x87\x68\x55\xf9"
+ "\x8f\x9e\x73\x43\x47\x6a\x08\x36"
+ "\x93\x67\xa8\x2d\xde\xac\x41\xa9"
+ "\x5c\x4d\x73\x97\x0f\x70\x68\xfa"
+ "\x56\x4d\x00\xc2\x3b\x1f\xc8\xb9"
+ "\x78\x1f\x51\x07\xe3\x9a\x13\x4e"
+ "\xed\x2b\x2e\xa3\xf7\x44\xb2\xe7"
+ "\xab\x19\x37\xd9\xba\x76\x5e\xd2"
+ "\xf2\x53\x15\x17\x4c\x6b\x16\x9f"
+ "\x02\x66\x49\xca\x7c\x91\x05\xf2"
+ "\x45\x36\x1e\xf5\x77\xad\x1f\x46"
+ "\xa8\x13\xfb\x63\xb6\x08\x99\x63"
+ "\x82\xa2\xed\xb3\xac\xdf\x43\x19"
+ "\x45\xea\x78\x73\xd9\xb7\x39\x11"
+ "\xa3\x13\x7c\xf8\x3f\xf7\xad\x81"
+ "\x48\x2f\xa9\x5c\x5f\xa0\xf0\x79"
+ "\xa4\x47\x7d\x80\x20\x26\xfd\x63"
+ "\x0a\xc7\x7e\x6d\x75\x47\xff\x76"
+ "\x66\x2e\x8a\x6c\x81\x35\xaf\x0b"
+ "\x2e\x6a\x49\x60\xc1\x10\xe1\xe1"
+ "\x54\x03\xa4\x09\x0c\x37\x7a\x15"
+ "\x23\x27\x5b\x8b\x4b\xa5\x64\x97"
+ "\xae\x4a\x50\x73\x1f\x66\x1c\x5c"
+ "\x03\x25\x3c\x8d\x48\x58\x71\x34"
+ "\x0e\xec\x4e\x55\x1a\x03\x6a\xe5"
+ "\xb6\x19\x2b\x84\x2a\x20\xd1\xea"
+ "\x80\x6f\x96\x0e\x05\x62\xc7\x78"
+ "\x87\x79\x60\x38\x46\xb4\x25\x57"
+ "\x6e\x16\x63\xf8\xad\x6e\xd7\x42"
+ "\x69\xe1\x88\xef\x6e\xd5\xb4\x9a"
+ "\x3c\x78\x6c\x3b\xe5\xa0\x1d\x22"
+ "\x86\x5c\x74\x3a\xeb\x24\x26\xc7"
+ "\x09\xfc\x91\x96\x47\x87\x4f\x1a"
+ "\xd6\x6b\x2c\x18\x47\xc0\xb8\x24"
+ "\xa8\x5a\x4a\x9e\xcb\x03\xe7\x2a"
+ "\x09\xe6\x4d\x9c\x6d\x86\x60\xf5"
+ "\x2f\x48\x69\x37\x9f\xf2\xd2\xcb"
+ "\x0e\x5a\xdd\x6e\x8a\xfb\x6a\xfe"
+ "\x0b\x63\xde\x87\x42\x79\x8a\x68"
+ "\x51\x28\x9b\x7a\xeb\xaf\xb8\x2f"
+ "\x9d\xd1\xc7\x45\x90\x08\xc9\x83"
+ "\xe9\x83\x84\xcb\x28\x69\x09\x69"
+ "\xce\x99\x46\x00\x54\xcb\xd8\x38"
+ "\xf9\x53\x4a\xbf\x31\xce\x57\x15"
+ "\x33\xfa\x96\x04\x33\x42\xe3\xc0"
+ "\xb7\x54\x4a\x65\x7a\x7c\x02\xe6"
+ "\x19\x95\xd0\x0e\x82\x07\x63\xf9"
+ "\xe1\x2b\x2a\xfc\x55\x92\x52\xc9"
+ "\xb5\x9f\x23\x28\x60\xe7\x20\x51"
+ "\x10\xd3\xed\x6d\x9b\xab\xb8\xe2"
+ "\x5d\x9a\x34\xb3\xbe\x9c\x64\xcb"
+ "\x78\xc6\x91\x22\x40\x91\x80\xbe"
+ "\xd7\x78\x5c\x0e\x0a\xdc\x08\xe9"
+ "\x67\x10\xa4\x83\x98\x79\x23\xe7"
+ "\x92\xda\xa9\x22\x16\xb1\xe7\x78"
+ "\xa3\x1c\x6c\x8f\x35\x7c\x4d\x37"
+ "\x2f\x6e\x0b\x50\x5c\x34\xb9\xf9"
+ "\xe6\x3d\x91\x0d\x32\x95\xaa\x3d"
+ "\x48\x11\x06\xbb\x2d\xf2\x63\x88"
+ "\x3f\x73\x09\xe2\x45\x56\x31\x51"
+ "\xfa\x5e\x4e\x62\xf7\x90\xf9\xa9"
+ "\x7d\x7b\x1b\xb1\xc8\x26\x6e\x66"
+ "\xf6\x90\x9a\x7f\xf2\x57\xcc\x23"
+ "\x59\xfa\xfa\xaa\x44\x04\x01\xa7"
+ "\xa4\x78\xdb\x74\x3d\x8b\xb5";
+
+static const u8 __initconst ctext12[735] =
+ "\x84\x0b\xdb\xd5\xb7\xa8\xfe\x20"
+ "\xbb\xb1\x12\x7f\x41\xea\xb3\xc0"
+ "\xa2\xb4\x37\x19\x11\x58\xb6\x0b"
+ "\x4c\x1d\x38\x05\x54\xd1\x16\x73"
+ "\x8e\x1c\x20\x90\xa2\x9a\xb7\x74"
+ "\x47\xe6\xd8\xfc\x18\x3a\xb4\xea"
+ "\xd5\x16\x5a\x2c\x53\x01\x46\xb3"
+ "\x18\x33\x74\x6c\x50\xf2\xe8\xc0"
+ "\x73\xda\x60\x22\xeb\xe3\xe5\x9b"
+ "\x20\x93\x6c\x4b\x37\x99\xb8\x23"
+ "\x3b\x4e\xac\xe8\x5b\xe8\x0f\xb7"
+ "\xc3\x8f\xfb\x4a\x37\xd9\x39\x95"
+ "\x34\xf1\xdb\x8f\x71\xd9\xc7\x0b"
+ "\x02\xf1\x63\xfc\x9b\xfc\xc5\xab"
+ "\xb9\x14\x13\x21\xdf\xce\xaa\x88"
+ "\x44\x30\x1e\xce\x26\x01\x92\xf8"
+ "\x9f\x00\x4b\x0c\x4b\xf7\x5f\xe0"
+ "\x89\xca\x94\x66\x11\x21\x97\xca"
+ "\x3e\x83\x74\x2d\xdb\x4d\x11\xeb"
+ "\x97\xc2\x14\xff\x9e\x1e\xa0\x6b"
+ "\x08\xb4\x31\x2b\x85\xc6\x85\x6c"
+ "\x90\xec\x39\xc0\xec\xb3\xb5\x4e"
+ "\xf3\x9c\xe7\x83\x3a\x77\x0a\xf4"
+ "\x56\xfe\xce\x18\x33\x6d\x0b\x2d"
+ "\x33\xda\xc8\x05\x5c\xb4\x09\x2a"
+ "\xde\x6b\x52\x98\x01\xef\x36\x3d"
+ "\xbd\xf9\x8f\xa8\x3e\xaa\xcd\xd1"
+ "\x01\x2d\x42\x49\xc3\xb6\x84\xbb"
+ "\x48\x96\xe0\x90\x93\x6c\x48\x64"
+ "\xd4\xfa\x7f\x93\x2c\xa6\x21\xc8"
+ "\x7a\x23\x7b\xaa\x20\x56\x12\xae"
+ "\x16\x9d\x94\x0f\x54\xa1\xec\xca"
+ "\x51\x4e\xf2\x39\xf4\xf8\x5f\x04"
+ "\x5a\x0d\xbf\xf5\x83\xa1\x15\xe1"
+ "\xf5\x3c\xd8\x62\xa3\xed\x47\x89"
+ "\x85\x4c\xe5\xdb\xac\x9e\x17\x1d"
+ "\x0c\x09\xe3\x3e\x39\x5b\x4d\x74"
+ "\x0e\xf5\x34\xee\x70\x11\x4c\xfd"
+ "\xdb\x34\xb1\xb5\x10\x3f\x73\xb7"
+ "\xf5\xfa\xed\xb0\x1f\xa5\xcd\x3c"
+ "\x8d\x35\x83\xd4\x11\x44\x6e\x6c"
+ "\x5b\xe0\x0e\x69\xa5\x39\xe5\xbb"
+ "\xa9\x57\x24\x37\xe6\x1f\xdd\xcf"
+ "\x16\x2a\x13\xf9\x6a\x2d\x90\xa0"
+ "\x03\x60\x7a\xed\x69\xd5\x00\x8b"
+ "\x7e\x4f\xcb\xb9\xfa\x91\xb9\x37"
+ "\xc1\x26\xce\x90\x97\x22\x64\x64"
+ "\xc1\x72\x43\x1b\xf6\xac\xc1\x54"
+ "\x8a\x10\x9c\xdd\x8d\xd5\x8e\xb2"
+ "\xe4\x85\xda\xe0\x20\x5f\xf4\xb4"
+ "\x15\xb5\xa0\x8d\x12\x74\x49\x23"
+ "\x3a\xdf\x4a\xd3\xf0\x3b\x89\xeb"
+ "\xf8\xcc\x62\x7b\xfb\x93\x07\x41"
+ "\x61\x26\x94\x58\x70\xa6\x3c\xe4"
+ "\xff\x58\xc4\x13\x3d\xcb\x36\x6b"
+ "\x32\xe5\xb2\x6d\x03\x74\x6f\x76"
+ "\x93\x77\xde\x48\xc4\xfa\x30\x4a"
+ "\xda\x49\x80\x77\x0f\x1c\xbe\x11"
+ "\xc8\x48\xb1\xe5\xbb\xf2\x8a\xe1"
+ "\x96\x2f\x9f\xd1\x8e\x8a\x5c\xe2"
+ "\xf7\xd7\xd8\x54\xf3\x3f\xc4\x91"
+ "\xb8\xfb\x86\xdc\x46\x24\x91\x60"
+ "\x6c\x2f\xc9\x41\x37\x51\x49\x54"
+ "\x09\x81\x21\xf3\x03\x9f\x2b\xe3"
+ "\x1f\x39\x63\xaf\xf4\xd7\x53\x60"
+ "\xa7\xc7\x54\xf9\xee\xb1\xb1\x7d"
+ "\x75\x54\x65\x93\xfe\xb1\x68\x6b"
+ "\x57\x02\xf9\xbb\x0e\xf9\xf8\xbf"
+ "\x01\x12\x27\xb4\xfe\xe4\x79\x7a"
+ "\x40\x5b\x51\x4b\xdf\x38\xec\xb1"
+ "\x6a\x56\xff\x35\x4d\x42\x33\xaa"
+ "\x6f\x1b\xe4\xdc\xe0\xdb\x85\x35"
+ "\x62\x10\xd4\xec\xeb\xc5\x7e\x45"
+ "\x1c\x6f\x17\xca\x3b\x8e\x2d\x66"
+ "\x4f\x4b\x36\x56\xcd\x1b\x59\xaa"
+ "\xd2\x9b\x17\xb9\x58\xdf\x7b\x64"
+ "\x8a\xff\x3b\x9c\xa6\xb5\x48\x9e"
+ "\xaa\xe2\x5d\x09\x71\x32\x5f\xb6"
+ "\x29\xbe\xe7\xc7\x52\x7e\x91\x82"
+ "\x6b\x6d\x33\xe1\x34\x06\x36\x21"
+ "\x5e\xbe\x1e\x2f\x3e\xc1\xfb\xea"
+ "\x49\x2c\xb5\xca\xf7\xb0\x37\xea"
+ "\x1f\xed\x10\x04\xd9\x48\x0d\x1a"
+ "\x1c\xfb\xe7\x84\x0e\x83\x53\x74"
+ "\xc7\x65\xe2\x5c\xe5\xba\x73\x4c"
+ "\x0e\xe1\xb5\x11\x45\x61\x43\x46"
+ "\xaa\x25\x8f\xbd\x85\x08\xfa\x4c"
+ "\x15\xc1\xc0\xd8\xf5\xdc\x16\xbb"
+ "\x7b\x1d\xe3\x87\x57\xa7\x2a\x1d"
+ "\x38\x58\x9e\x8a\x43\xdc\x57"
+ "\xd1\x81\x7d\x2b\xe9\xff\x99\x3a"
+ "\x4b\x24\x52\x58\x55\xe1\x49\x14";
+
+static struct {
+ const u8 *ptext;
+ const u8 *ctext;
+
+ u8 key[AES_MAX_KEY_SIZE];
+ u8 iv[GCM_AES_IV_SIZE];
+ u8 assoc[20];
+
+ int klen;
+ int clen;
+ int plen;
+ int alen;
+} const aesgcm_tv[] __initconst = {
+ { /* From McGrew & Viega - http://citeseer.ist.psu.edu/656989.html */
+ .klen = 16,
+ .ctext = ctext0,
+ .clen = sizeof(ctext0),
+ }, {
+ .klen = 16,
+ .ptext = ptext1,
+ .plen = sizeof(ptext1),
+ .ctext = ctext1,
+ .clen = sizeof(ctext1),
+ }, {
+ .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
+ "\x6d\x6a\x8f\x94\x67\x30\x83\x08",
+ .klen = 16,
+ .iv = "\xca\xfe\xba\xbe\xfa\xce\xdb\xad"
+ "\xde\xca\xf8\x88",
+ .ptext = ptext2,
+ .plen = sizeof(ptext2),
+ .ctext = ctext2,
+ .clen = sizeof(ctext2),
+ }, {
+ .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
+ "\x6d\x6a\x8f\x94\x67\x30\x83\x08",
+ .klen = 16,
+ .iv = "\xca\xfe\xba\xbe\xfa\xce\xdb\xad"
+ "\xde\xca\xf8\x88",
+ .ptext = ptext3,
+ .plen = sizeof(ptext3),
+ .assoc = "\xfe\xed\xfa\xce\xde\xad\xbe\xef"
+ "\xfe\xed\xfa\xce\xde\xad\xbe\xef"
+ "\xab\xad\xda\xd2",
+ .alen = 20,
+ .ctext = ctext3,
+ .clen = sizeof(ctext3),
+ }, {
+ .klen = 24,
+ .ctext = ctext4,
+ .clen = sizeof(ctext4),
+ }, {
+ .klen = 24,
+ .ptext = ptext1,
+ .plen = sizeof(ptext1),
+ .ctext = ctext5,
+ .clen = sizeof(ctext5),
+ }, {
+ .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
+ "\x6d\x6a\x8f\x94\x67\x30\x83\x08"
+ "\xfe\xff\xe9\x92\x86\x65\x73\x1c",
+ .klen = 24,
+ .iv = "\xca\xfe\xba\xbe\xfa\xce\xdb\xad"
+ "\xde\xca\xf8\x88",
+ .ptext = ptext6,
+ .plen = sizeof(ptext6),
+ .ctext = ctext6,
+ .clen = sizeof(ctext6),
+ }, {
+ .klen = 32,
+ .ctext = ctext7,
+ .clen = sizeof(ctext7),
+ }, {
+ .klen = 32,
+ .ptext = ptext1,
+ .plen = sizeof(ptext1),
+ .ctext = ctext8,
+ .clen = sizeof(ctext8),
+ }, {
+ .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
+ "\x6d\x6a\x8f\x94\x67\x30\x83\x08"
+ "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
+ "\x6d\x6a\x8f\x94\x67\x30\x83\x08",
+ .klen = 32,
+ .iv = "\xca\xfe\xba\xbe\xfa\xce\xdb\xad"
+ "\xde\xca\xf8\x88",
+ .ptext = ptext9,
+ .plen = sizeof(ptext9),
+ .ctext = ctext9,
+ .clen = sizeof(ctext9),
+ }, {
+ .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
+ "\x6d\x6a\x8f\x94\x67\x30\x83\x08"
+ "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
+ "\x6d\x6a\x8f\x94\x67\x30\x83\x08",
+ .klen = 32,
+ .iv = "\xca\xfe\xba\xbe\xfa\xce\xdb\xad"
+ "\xde\xca\xf8\x88",
+ .ptext = ptext10,
+ .plen = sizeof(ptext10),
+ .assoc = "\xfe\xed\xfa\xce\xde\xad\xbe\xef"
+ "\xfe\xed\xfa\xce\xde\xad\xbe\xef"
+ "\xab\xad\xda\xd2",
+ .alen = 20,
+ .ctext = ctext10,
+ .clen = sizeof(ctext10),
+ }, {
+ .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
+ "\x6d\x6a\x8f\x94\x67\x30\x83\x08"
+ "\xfe\xff\xe9\x92\x86\x65\x73\x1c",
+ .klen = 24,
+ .iv = "\xca\xfe\xba\xbe\xfa\xce\xdb\xad"
+ "\xde\xca\xf8\x88",
+ .ptext = ptext11,
+ .plen = sizeof(ptext11),
+ .assoc = "\xfe\xed\xfa\xce\xde\xad\xbe\xef"
+ "\xfe\xed\xfa\xce\xde\xad\xbe\xef"
+ "\xab\xad\xda\xd2",
+ .alen = 20,
+ .ctext = ctext11,
+ .clen = sizeof(ctext11),
+ }, {
+ .key = "\x62\x35\xf8\x95\xfc\xa5\xeb\xf6"
+ "\x0e\x92\x12\x04\xd3\xa1\x3f\x2e"
+ "\x8b\x32\xcf\xe7\x44\xed\x13\x59"
+ "\x04\x38\x77\xb0\xb9\xad\xb4\x38",
+ .klen = 32,
+ .iv = "\x00\xff\xff\xff\xff\x00\x00\xff"
+ "\xff\xff\x00\xff",
+ .ptext = ptext12,
+ .plen = sizeof(ptext12),
+ .ctext = ctext12,
+ .clen = sizeof(ctext12),
+ }
+};
+
+static int __init libaesgcm_init(void)
+{
+ for (int i = 0; i < ARRAY_SIZE(aesgcm_tv); i++) {
+ u8 tagbuf[AES_BLOCK_SIZE];
+ int plen = aesgcm_tv[i].plen;
+ struct aesgcm_ctx ctx;
+ u8 buf[sizeof(ptext12)];
+
+ if (aesgcm_expandkey(&ctx, aesgcm_tv[i].key, aesgcm_tv[i].klen,
+ aesgcm_tv[i].clen - plen)) {
+ pr_err("aesgcm_expandkey() failed on vector %d\n", i);
+ return -ENODEV;
+ }
+
+ if (!aesgcm_decrypt(&ctx, buf, aesgcm_tv[i].ctext, plen,
+ aesgcm_tv[i].assoc, aesgcm_tv[i].alen,
+ aesgcm_tv[i].iv, aesgcm_tv[i].ctext + plen)
+ || memcmp(buf, aesgcm_tv[i].ptext, plen)) {
+ pr_err("aesgcm_decrypt() #1 failed on vector %d\n", i);
+ return -ENODEV;
+ }
+
+ /* encrypt in place */
+ aesgcm_encrypt(&ctx, buf, buf, plen, aesgcm_tv[i].assoc,
+ aesgcm_tv[i].alen, aesgcm_tv[i].iv, tagbuf);
+ if (memcmp(buf, aesgcm_tv[i].ctext, plen)) {
+ pr_err("aesgcm_encrypt() failed on vector %d\n", i);
+ return -ENODEV;
+ }
+
+ /* decrypt in place */
+ if (!aesgcm_decrypt(&ctx, buf, buf, plen, aesgcm_tv[i].assoc,
+ aesgcm_tv[i].alen, aesgcm_tv[i].iv, tagbuf)
+ || memcmp(buf, aesgcm_tv[i].ptext, plen)) {
+ pr_err("aesgcm_decrypt() #2 failed on vector %d\n", i);
+ return -ENODEV;
+ }
+ }
+ return 0;
+}
+module_init(libaesgcm_init);
+
+static void __exit libaesgcm_exit(void)
+{
+}
+module_exit(libaesgcm_exit);
+#endif
diff --git a/lib/crypto/arc4.c b/lib/crypto/arc4.c
new file mode 100644
index 000000000000..c2020f19c652
--- /dev/null
+++ b/lib/crypto/arc4.c
@@ -0,0 +1,74 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Cryptographic API
+ *
+ * ARC4 Cipher Algorithm
+ *
+ * Jon Oberheide <jon@oberheide.org>
+ */
+
+#include <crypto/arc4.h>
+#include <linux/module.h>
+
+int arc4_setkey(struct arc4_ctx *ctx, const u8 *in_key, unsigned int key_len)
+{
+ int i, j = 0, k = 0;
+
+ ctx->x = 1;
+ ctx->y = 0;
+
+ for (i = 0; i < 256; i++)
+ ctx->S[i] = i;
+
+ for (i = 0; i < 256; i++) {
+ u32 a = ctx->S[i];
+
+ j = (j + in_key[k] + a) & 0xff;
+ ctx->S[i] = ctx->S[j];
+ ctx->S[j] = a;
+ if (++k >= key_len)
+ k = 0;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(arc4_setkey);
+
+void arc4_crypt(struct arc4_ctx *ctx, u8 *out, const u8 *in, unsigned int len)
+{
+ u32 *const S = ctx->S;
+ u32 x, y, a, b;
+ u32 ty, ta, tb;
+
+ if (len == 0)
+ return;
+
+ x = ctx->x;
+ y = ctx->y;
+
+ a = S[x];
+ y = (y + a) & 0xff;
+ b = S[y];
+
+ do {
+ S[y] = a;
+ a = (a + b) & 0xff;
+ S[x] = b;
+ x = (x + 1) & 0xff;
+ ta = S[x];
+ ty = (y + ta) & 0xff;
+ tb = S[ty];
+ *out++ = *in++ ^ S[a];
+ if (--len == 0)
+ break;
+ y = ty;
+ a = ta;
+ b = tb;
+ } while (true);
+
+ ctx->x = x;
+ ctx->y = y;
+}
+EXPORT_SYMBOL(arc4_crypt);
+
+MODULE_LICENSE("GPL");
diff --git a/lib/crypto/blake2s-generic.c b/lib/crypto/blake2s-generic.c
new file mode 100644
index 000000000000..3b6dcfdd9628
--- /dev/null
+++ b/lib/crypto/blake2s-generic.c
@@ -0,0 +1,110 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/*
+ * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ *
+ * This is an implementation of the BLAKE2s hash and PRF functions.
+ *
+ * Information: https://blake2.net/
+ *
+ */
+
+#include <crypto/internal/blake2s.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/bug.h>
+#include <asm/unaligned.h>
+
+static const u8 blake2s_sigma[10][16] = {
+ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 },
+ { 14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3 },
+ { 11, 8, 12, 0, 5, 2, 15, 13, 10, 14, 3, 6, 7, 1, 9, 4 },
+ { 7, 9, 3, 1, 13, 12, 11, 14, 2, 6, 5, 10, 4, 0, 15, 8 },
+ { 9, 0, 5, 7, 2, 4, 10, 15, 14, 1, 11, 12, 6, 8, 3, 13 },
+ { 2, 12, 6, 10, 0, 11, 8, 3, 4, 13, 7, 5, 15, 14, 1, 9 },
+ { 12, 5, 1, 15, 14, 13, 4, 10, 0, 7, 6, 3, 9, 2, 8, 11 },
+ { 13, 11, 7, 14, 12, 1, 3, 9, 5, 0, 15, 4, 8, 6, 2, 10 },
+ { 6, 15, 14, 9, 11, 3, 0, 8, 12, 2, 13, 7, 1, 4, 10, 5 },
+ { 10, 2, 8, 4, 7, 6, 1, 5, 15, 11, 9, 14, 3, 12, 13, 0 },
+};
+
+static inline void blake2s_increment_counter(struct blake2s_state *state,
+ const u32 inc)
+{
+ state->t[0] += inc;
+ state->t[1] += (state->t[0] < inc);
+}
+
+void blake2s_compress(struct blake2s_state *state, const u8 *block,
+ size_t nblocks, const u32 inc)
+ __weak __alias(blake2s_compress_generic);
+
+void blake2s_compress_generic(struct blake2s_state *state, const u8 *block,
+ size_t nblocks, const u32 inc)
+{
+ u32 m[16];
+ u32 v[16];
+ int i;
+
+ WARN_ON(IS_ENABLED(DEBUG) &&
+ (nblocks > 1 && inc != BLAKE2S_BLOCK_SIZE));
+
+ while (nblocks > 0) {
+ blake2s_increment_counter(state, inc);
+ memcpy(m, block, BLAKE2S_BLOCK_SIZE);
+ le32_to_cpu_array(m, ARRAY_SIZE(m));
+ memcpy(v, state->h, 32);
+ v[ 8] = BLAKE2S_IV0;
+ v[ 9] = BLAKE2S_IV1;
+ v[10] = BLAKE2S_IV2;
+ v[11] = BLAKE2S_IV3;
+ v[12] = BLAKE2S_IV4 ^ state->t[0];
+ v[13] = BLAKE2S_IV5 ^ state->t[1];
+ v[14] = BLAKE2S_IV6 ^ state->f[0];
+ v[15] = BLAKE2S_IV7 ^ state->f[1];
+
+#define G(r, i, a, b, c, d) do { \
+ a += b + m[blake2s_sigma[r][2 * i + 0]]; \
+ d = ror32(d ^ a, 16); \
+ c += d; \
+ b = ror32(b ^ c, 12); \
+ a += b + m[blake2s_sigma[r][2 * i + 1]]; \
+ d = ror32(d ^ a, 8); \
+ c += d; \
+ b = ror32(b ^ c, 7); \
+} while (0)
+
+#define ROUND(r) do { \
+ G(r, 0, v[0], v[ 4], v[ 8], v[12]); \
+ G(r, 1, v[1], v[ 5], v[ 9], v[13]); \
+ G(r, 2, v[2], v[ 6], v[10], v[14]); \
+ G(r, 3, v[3], v[ 7], v[11], v[15]); \
+ G(r, 4, v[0], v[ 5], v[10], v[15]); \
+ G(r, 5, v[1], v[ 6], v[11], v[12]); \
+ G(r, 6, v[2], v[ 7], v[ 8], v[13]); \
+ G(r, 7, v[3], v[ 4], v[ 9], v[14]); \
+} while (0)
+ ROUND(0);
+ ROUND(1);
+ ROUND(2);
+ ROUND(3);
+ ROUND(4);
+ ROUND(5);
+ ROUND(6);
+ ROUND(7);
+ ROUND(8);
+ ROUND(9);
+
+#undef G
+#undef ROUND
+
+ for (i = 0; i < 8; ++i)
+ state->h[i] ^= v[i] ^ v[i + 8];
+
+ block += BLAKE2S_BLOCK_SIZE;
+ --nblocks;
+ }
+}
+
+EXPORT_SYMBOL(blake2s_compress_generic);
diff --git a/lib/crypto/blake2s-selftest.c b/lib/crypto/blake2s-selftest.c
new file mode 100644
index 000000000000..d0634ed6a937
--- /dev/null
+++ b/lib/crypto/blake2s-selftest.c
@@ -0,0 +1,651 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/*
+ * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ */
+
+#include <crypto/internal/blake2s.h>
+#include <linux/kernel.h>
+#include <linux/random.h>
+#include <linux/string.h>
+
+/*
+ * blake2s_testvecs[] generated with the program below (using libb2-dev and
+ * libssl-dev [OpenSSL])
+ *
+ * #include <blake2.h>
+ * #include <stdint.h>
+ * #include <stdio.h>
+ *
+ * #include <openssl/evp.h>
+ *
+ * #define BLAKE2S_TESTVEC_COUNT 256
+ *
+ * static void print_vec(const uint8_t vec[], int len)
+ * {
+ * int i;
+ *
+ * printf(" { ");
+ * for (i = 0; i < len; i++) {
+ * if (i && (i % 12) == 0)
+ * printf("\n ");
+ * printf("0x%02x, ", vec[i]);
+ * }
+ * printf("},\n");
+ * }
+ *
+ * int main(void)
+ * {
+ * uint8_t key[BLAKE2S_KEYBYTES];
+ * uint8_t buf[BLAKE2S_TESTVEC_COUNT];
+ * uint8_t hash[BLAKE2S_OUTBYTES];
+ * int i, j;
+ *
+ * key[0] = key[1] = 1;
+ * for (i = 2; i < BLAKE2S_KEYBYTES; ++i)
+ * key[i] = key[i - 2] + key[i - 1];
+ *
+ * for (i = 0; i < BLAKE2S_TESTVEC_COUNT; ++i)
+ * buf[i] = (uint8_t)i;
+ *
+ * printf("static const u8 blake2s_testvecs[][BLAKE2S_HASH_SIZE] __initconst = {\n");
+ *
+ * for (i = 0; i < BLAKE2S_TESTVEC_COUNT; ++i) {
+ * int outlen = 1 + i % BLAKE2S_OUTBYTES;
+ * int keylen = (13 * i) % (BLAKE2S_KEYBYTES + 1);
+ *
+ * blake2s(hash, buf, key + BLAKE2S_KEYBYTES - keylen, outlen, i,
+ * keylen);
+ * print_vec(hash, outlen);
+ * }
+ * printf("};\n\n");
+ *
+ * return 0;
+ *}
+ */
+static const u8 blake2s_testvecs[][BLAKE2S_HASH_SIZE] __initconst = {
+ { 0xa1, },
+ { 0x7c, 0x89, },
+ { 0x74, 0x0e, 0xd4, },
+ { 0x47, 0x0c, 0x21, 0x15, },
+ { 0x18, 0xd6, 0x9c, 0xa6, 0xc4, },
+ { 0x13, 0x5d, 0x16, 0x63, 0x2e, 0xf9, },
+ { 0x2c, 0xb5, 0x04, 0xb7, 0x99, 0xe2, 0x73, },
+ { 0x9a, 0x0f, 0xd2, 0x39, 0xd6, 0x68, 0x1b, 0x92, },
+ { 0xc8, 0xde, 0x7a, 0xea, 0x2f, 0xf4, 0xd2, 0xe3, 0x2b, },
+ { 0x5b, 0xf9, 0x43, 0x52, 0x0c, 0x12, 0xba, 0xb5, 0x93, 0x9f, },
+ { 0xc6, 0x2c, 0x4e, 0x80, 0xfc, 0x32, 0x5b, 0x33, 0xb8, 0xb8, 0x0a, },
+ { 0xa7, 0x5c, 0xfd, 0x3a, 0xcc, 0xbf, 0x90, 0xca, 0xb7, 0x97, 0xde, 0xd8, },
+ { 0x66, 0xca, 0x3c, 0xc4, 0x19, 0xef, 0x92, 0x66, 0x3f, 0x21, 0x8f, 0xda,
+ 0xb7, },
+ { 0xba, 0xe5, 0xbb, 0x30, 0x25, 0x94, 0x6d, 0xc3, 0x89, 0x09, 0xc4, 0x25,
+ 0x52, 0x3e, },
+ { 0xa2, 0xef, 0x0e, 0x52, 0x0b, 0x5f, 0xa2, 0x01, 0x6d, 0x0a, 0x25, 0xbc,
+ 0x57, 0xe2, 0x27, },
+ { 0x4f, 0xe0, 0xf9, 0x52, 0x12, 0xda, 0x84, 0xb7, 0xab, 0xae, 0xb0, 0xa6,
+ 0x47, 0x2a, 0xc7, 0xf5, },
+ { 0x56, 0xe7, 0xa8, 0x1c, 0x4c, 0xca, 0xed, 0x90, 0x31, 0xec, 0x87, 0x43,
+ 0xe7, 0x72, 0x08, 0xec, 0xbe, },
+ { 0x7e, 0xdf, 0x80, 0x1c, 0x93, 0x33, 0xfd, 0x53, 0x44, 0xba, 0xfd, 0x96,
+ 0xe1, 0xbb, 0xb5, 0x65, 0xa5, 0x00, },
+ { 0xec, 0x6b, 0xed, 0xf7, 0x7b, 0x62, 0x1d, 0x7d, 0xf4, 0x82, 0xf3, 0x1e,
+ 0x18, 0xff, 0x2b, 0xc4, 0x06, 0x20, 0x2a, },
+ { 0x74, 0x98, 0xd7, 0x68, 0x63, 0xed, 0x87, 0xe4, 0x5d, 0x8d, 0x9e, 0x1d,
+ 0xfd, 0x2a, 0xbb, 0x86, 0xac, 0xe9, 0x2a, 0x89, },
+ { 0x89, 0xc3, 0x88, 0xce, 0x2b, 0x33, 0x1e, 0x10, 0xd1, 0x37, 0x20, 0x86,
+ 0x28, 0x43, 0x70, 0xd9, 0xfb, 0x96, 0xd9, 0xb5, 0xd3, },
+ { 0xcb, 0x56, 0x74, 0x41, 0x8d, 0x80, 0x01, 0x9a, 0x6b, 0x38, 0xe1, 0x41,
+ 0xad, 0x9c, 0x62, 0x74, 0xce, 0x35, 0xd5, 0x6c, 0x89, 0x6e, },
+ { 0x79, 0xaf, 0x94, 0x59, 0x99, 0x26, 0xe1, 0xc9, 0x34, 0xfe, 0x7c, 0x22,
+ 0xf7, 0x43, 0xd7, 0x65, 0xd4, 0x48, 0x18, 0xac, 0x3d, 0xfd, 0x93, },
+ { 0x85, 0x0d, 0xff, 0xb8, 0x3e, 0x87, 0x41, 0xb0, 0x95, 0xd3, 0x3d, 0x00,
+ 0x47, 0x55, 0x9e, 0xd2, 0x69, 0xea, 0xbf, 0xe9, 0x7a, 0x2d, 0x61, 0x45, },
+ { 0x03, 0xe0, 0x85, 0xec, 0x54, 0xb5, 0x16, 0x53, 0xa8, 0xc4, 0x71, 0xe9,
+ 0x6a, 0xe7, 0xcb, 0xc4, 0x15, 0x02, 0xfc, 0x34, 0xa4, 0xa4, 0x28, 0x13,
+ 0xd1, },
+ { 0xe3, 0x34, 0x4b, 0xe1, 0xd0, 0x4b, 0x55, 0x61, 0x8f, 0xc0, 0x24, 0x05,
+ 0xe6, 0xe0, 0x3d, 0x70, 0x24, 0x4d, 0xda, 0xb8, 0x91, 0x05, 0x29, 0x07,
+ 0x01, 0x3e, },
+ { 0x61, 0xff, 0x01, 0x72, 0xb1, 0x4d, 0xf6, 0xfe, 0xd1, 0xd1, 0x08, 0x74,
+ 0xe6, 0x91, 0x44, 0xeb, 0x61, 0xda, 0x40, 0xaf, 0xfc, 0x8c, 0x91, 0x6b,
+ 0xec, 0x13, 0xed, },
+ { 0xd4, 0x40, 0xd2, 0xa0, 0x7f, 0xc1, 0x58, 0x0c, 0x85, 0xa0, 0x86, 0xc7,
+ 0x86, 0xb9, 0x61, 0xc9, 0xea, 0x19, 0x86, 0x1f, 0xab, 0x07, 0xce, 0x37,
+ 0x72, 0x67, 0x09, 0xfc, },
+ { 0x9e, 0xf8, 0x18, 0x67, 0x93, 0x10, 0x9b, 0x39, 0x75, 0xe8, 0x8b, 0x38,
+ 0x82, 0x7d, 0xb8, 0xb7, 0xa5, 0xaf, 0xe6, 0x6a, 0x22, 0x5e, 0x1f, 0x9c,
+ 0x95, 0x29, 0x19, 0xf2, 0x4b, },
+ { 0xc8, 0x62, 0x25, 0xf5, 0x98, 0xc9, 0xea, 0xe5, 0x29, 0x3a, 0xd3, 0x22,
+ 0xeb, 0xeb, 0x07, 0x7c, 0x15, 0x07, 0xee, 0x15, 0x61, 0xbb, 0x05, 0x30,
+ 0x99, 0x7f, 0x11, 0xf6, 0x0a, 0x1d, },
+ { 0x68, 0x70, 0xf7, 0x90, 0xa1, 0x8b, 0x1f, 0x0f, 0xbb, 0xce, 0xd2, 0x0e,
+ 0x33, 0x1f, 0x7f, 0xa9, 0x78, 0xa8, 0xa6, 0x81, 0x66, 0xab, 0x8d, 0xcd,
+ 0x58, 0x55, 0x3a, 0x0b, 0x7a, 0xdb, 0xb5, },
+ { 0xdd, 0x35, 0xd2, 0xb4, 0xf6, 0xc7, 0xea, 0xab, 0x64, 0x24, 0x4e, 0xfe,
+ 0xe5, 0x3d, 0x4e, 0x95, 0x8b, 0x6d, 0x6c, 0xbc, 0xb0, 0xf8, 0x88, 0x61,
+ 0x09, 0xb7, 0x78, 0xa3, 0x31, 0xfe, 0xd9, 0x2f, },
+ { 0x0a, },
+ { 0x6e, 0xd4, },
+ { 0x64, 0xe9, 0xd1, },
+ { 0x30, 0xdd, 0x71, 0xef, },
+ { 0x11, 0xb5, 0x0c, 0x87, 0xc9, },
+ { 0x06, 0x1c, 0x6d, 0x04, 0x82, 0xd0, },
+ { 0x5c, 0x42, 0x0b, 0xee, 0xc5, 0x9c, 0xb2, },
+ { 0xe8, 0x29, 0xd6, 0xb4, 0x5d, 0xf7, 0x2b, 0x93, },
+ { 0x18, 0xca, 0x27, 0x72, 0x43, 0x39, 0x16, 0xbc, 0x6a, },
+ { 0x39, 0x8f, 0xfd, 0x64, 0xf5, 0x57, 0x23, 0xb0, 0x45, 0xf8, },
+ { 0xbb, 0x3a, 0x78, 0x6b, 0x02, 0x1d, 0x0b, 0x16, 0xe3, 0xb2, 0x9a, },
+ { 0xb8, 0xb4, 0x0b, 0xe5, 0xd4, 0x1d, 0x0d, 0x85, 0x49, 0x91, 0x35, 0xfa, },
+ { 0x6d, 0x48, 0x2a, 0x0c, 0x42, 0x08, 0xbd, 0xa9, 0x78, 0x6f, 0x18, 0xaf,
+ 0xe2, },
+ { 0x10, 0x45, 0xd4, 0x58, 0x88, 0xec, 0x4e, 0x1e, 0xf6, 0x14, 0x92, 0x64,
+ 0x7e, 0xb0, },
+ { 0x8b, 0x0b, 0x95, 0xee, 0x92, 0xc6, 0x3b, 0x91, 0xf1, 0x1e, 0xeb, 0x51,
+ 0x98, 0x0a, 0x8d, },
+ { 0xa3, 0x50, 0x4d, 0xa5, 0x1d, 0x03, 0x68, 0xe9, 0x57, 0x78, 0xd6, 0x04,
+ 0xf1, 0xc3, 0x94, 0xd8, },
+ { 0xb8, 0x66, 0x6e, 0xdd, 0x46, 0x15, 0xae, 0x3d, 0x83, 0x7e, 0xcf, 0xe7,
+ 0x2c, 0xe8, 0x8f, 0xc7, 0x34, },
+ { 0x2e, 0xc0, 0x1f, 0x29, 0xea, 0xf6, 0xb9, 0xe2, 0xc2, 0x93, 0xeb, 0x41,
+ 0x0d, 0xf0, 0x0a, 0x13, 0x0e, 0xa2, },
+ { 0x71, 0xb8, 0x33, 0xa9, 0x1b, 0xac, 0xf1, 0xb5, 0x42, 0x8f, 0x5e, 0x81,
+ 0x34, 0x43, 0xb7, 0xa4, 0x18, 0x5c, 0x47, },
+ { 0xda, 0x45, 0xb8, 0x2e, 0x82, 0x1e, 0xc0, 0x59, 0x77, 0x9d, 0xfa, 0xb4,
+ 0x1c, 0x5e, 0xa0, 0x2b, 0x33, 0x96, 0x5a, 0x58, },
+ { 0xe3, 0x09, 0x05, 0xa9, 0xeb, 0x48, 0x13, 0xad, 0x71, 0x88, 0x81, 0x9a,
+ 0x3e, 0x2c, 0xe1, 0x23, 0x99, 0x13, 0x35, 0x9f, 0xb5, },
+ { 0xb7, 0x86, 0x2d, 0x16, 0xe1, 0x04, 0x00, 0x47, 0x47, 0x61, 0x31, 0xfb,
+ 0x14, 0xac, 0xd8, 0xe9, 0xe3, 0x49, 0xbd, 0xf7, 0x9c, 0x3f, },
+ { 0x7f, 0xd9, 0x95, 0xa8, 0xa7, 0xa0, 0xcc, 0xba, 0xef, 0xb1, 0x0a, 0xa9,
+ 0x21, 0x62, 0x08, 0x0f, 0x1b, 0xff, 0x7b, 0x9d, 0xae, 0xb2, 0x95, },
+ { 0x85, 0x99, 0xea, 0x33, 0xe0, 0x56, 0xff, 0x13, 0xc6, 0x61, 0x8c, 0xf9,
+ 0x57, 0x05, 0x03, 0x11, 0xf9, 0xfb, 0x3a, 0xf7, 0xce, 0xbb, 0x52, 0x30, },
+ { 0xb2, 0x72, 0x9c, 0xf8, 0x77, 0x4e, 0x8f, 0x6b, 0x01, 0x6c, 0xff, 0x4e,
+ 0x4f, 0x02, 0xd2, 0xbc, 0xeb, 0x51, 0x28, 0x99, 0x50, 0xab, 0xc4, 0x42,
+ 0xe3, },
+ { 0x8b, 0x0a, 0xb5, 0x90, 0x8f, 0xf5, 0x7b, 0xdd, 0xba, 0x47, 0x37, 0xc9,
+ 0x2a, 0xd5, 0x4b, 0x25, 0x08, 0x8b, 0x02, 0x17, 0xa7, 0x9e, 0x6b, 0x6e,
+ 0xe3, 0x90, },
+ { 0x90, 0xdd, 0xf7, 0x75, 0xa7, 0xa3, 0x99, 0x5e, 0x5b, 0x7d, 0x75, 0xc3,
+ 0x39, 0x6b, 0xa0, 0xe2, 0x44, 0x53, 0xb1, 0x9e, 0xc8, 0xf1, 0x77, 0x10,
+ 0x58, 0x06, 0x9a, },
+ { 0x99, 0x52, 0xf0, 0x49, 0xa8, 0x8c, 0xec, 0xa6, 0x97, 0x32, 0x13, 0xb5,
+ 0xf7, 0xa3, 0x8e, 0xfb, 0x4b, 0x59, 0x31, 0x3d, 0x01, 0x59, 0x98, 0x5d,
+ 0x53, 0x03, 0x1a, 0x39, },
+ { 0x9f, 0xe0, 0xc2, 0xe5, 0x5d, 0x93, 0xd6, 0x9b, 0x47, 0x8f, 0x9b, 0xe0,
+ 0x26, 0x35, 0x84, 0x20, 0x1d, 0xc5, 0x53, 0x10, 0x0f, 0x22, 0xb9, 0xb5,
+ 0xd4, 0x36, 0xb1, 0xac, 0x73, },
+ { 0x30, 0x32, 0x20, 0x3b, 0x10, 0x28, 0xec, 0x1f, 0x4f, 0x9b, 0x47, 0x59,
+ 0xeb, 0x7b, 0xee, 0x45, 0xfb, 0x0c, 0x49, 0xd8, 0x3d, 0x69, 0xbd, 0x90,
+ 0x2c, 0xf0, 0x9e, 0x8d, 0xbf, 0xd5, },
+ { 0x2a, 0x37, 0x73, 0x7f, 0xf9, 0x96, 0x19, 0xaa, 0x25, 0xd8, 0x13, 0x28,
+ 0x01, 0x29, 0x89, 0xdf, 0x6e, 0x0c, 0x9b, 0x43, 0x44, 0x51, 0xe9, 0x75,
+ 0x26, 0x0c, 0xb7, 0x87, 0x66, 0x0b, 0x5f, },
+ { 0x23, 0xdf, 0x96, 0x68, 0x91, 0x86, 0xd0, 0x93, 0x55, 0x33, 0x24, 0xf6,
+ 0xba, 0x08, 0x75, 0x5b, 0x59, 0x11, 0x69, 0xb8, 0xb9, 0xe5, 0x2c, 0x77,
+ 0x02, 0xf6, 0x47, 0xee, 0x81, 0xdd, 0xb9, 0x06, },
+ { 0x9d, },
+ { 0x9d, 0x7d, },
+ { 0xfd, 0xc3, 0xda, },
+ { 0xe8, 0x82, 0xcd, 0x21, },
+ { 0xc3, 0x1d, 0x42, 0x4c, 0x74, },
+ { 0xe9, 0xda, 0xf1, 0xa2, 0xe5, 0x7c, },
+ { 0x52, 0xb8, 0x6f, 0x81, 0x5c, 0x3a, 0x4c, },
+ { 0x5b, 0x39, 0x26, 0xfc, 0x92, 0x5e, 0xe0, 0x49, },
+ { 0x59, 0xe4, 0x7c, 0x93, 0x1c, 0xf9, 0x28, 0x93, 0xde, },
+ { 0xde, 0xdf, 0xb2, 0x43, 0x61, 0x0b, 0x86, 0x16, 0x4c, 0x2e, },
+ { 0x14, 0x8f, 0x75, 0x51, 0xaf, 0xb9, 0xee, 0x51, 0x5a, 0xae, 0x23, },
+ { 0x43, 0x5f, 0x50, 0xd5, 0x70, 0xb0, 0x5b, 0x87, 0xf5, 0xd9, 0xb3, 0x6d, },
+ { 0x66, 0x0a, 0x64, 0x93, 0x79, 0x71, 0x94, 0x40, 0xb7, 0x68, 0x2d, 0xd3,
+ 0x63, },
+ { 0x15, 0x00, 0xc4, 0x0c, 0x7d, 0x1b, 0x10, 0xa9, 0x73, 0x1b, 0x90, 0x6f,
+ 0xe6, 0xa9, },
+ { 0x34, 0x75, 0xf3, 0x86, 0x8f, 0x56, 0xcf, 0x2a, 0x0a, 0xf2, 0x62, 0x0a,
+ 0xf6, 0x0e, 0x20, },
+ { 0xb1, 0xde, 0xc9, 0xf5, 0xdb, 0xf3, 0x2f, 0x4c, 0xd6, 0x41, 0x7d, 0x39,
+ 0x18, 0x3e, 0xc7, 0xc3, },
+ { 0xc5, 0x89, 0xb2, 0xf8, 0xb8, 0xc0, 0xa3, 0xb9, 0x3b, 0x10, 0x6d, 0x7c,
+ 0x92, 0xfc, 0x7f, 0x34, 0x41, },
+ { 0xc4, 0xd8, 0xef, 0xba, 0xef, 0xd2, 0xaa, 0xc5, 0x6c, 0x8e, 0x3e, 0xbb,
+ 0x12, 0xfc, 0x0f, 0x72, 0xbf, 0x0f, },
+ { 0xdd, 0x91, 0xd1, 0x15, 0x9e, 0x7d, 0xf8, 0xc1, 0xb9, 0x14, 0x63, 0x96,
+ 0xb5, 0xcb, 0x83, 0x1d, 0x35, 0x1c, 0xec, },
+ { 0xa9, 0xf8, 0x52, 0xc9, 0x67, 0x76, 0x2b, 0xad, 0xfb, 0xd8, 0x3a, 0xa6,
+ 0x74, 0x02, 0xae, 0xb8, 0x25, 0x2c, 0x63, 0x49, },
+ { 0x77, 0x1f, 0x66, 0x70, 0xfd, 0x50, 0x29, 0xaa, 0xeb, 0xdc, 0xee, 0xba,
+ 0x75, 0x98, 0xdc, 0x93, 0x12, 0x3f, 0xdc, 0x7c, 0x38, },
+ { 0xe2, 0xe1, 0x89, 0x5c, 0x37, 0x38, 0x6a, 0xa3, 0x40, 0xac, 0x3f, 0xb0,
+ 0xca, 0xfc, 0xa7, 0xf3, 0xea, 0xf9, 0x0f, 0x5d, 0x8e, 0x39, },
+ { 0x0f, 0x67, 0xc8, 0x38, 0x01, 0xb1, 0xb7, 0xb8, 0xa2, 0xe7, 0x0a, 0x6d,
+ 0xd2, 0x63, 0x69, 0x9e, 0xcc, 0xf0, 0xf2, 0xbe, 0x9b, 0x98, 0xdd, },
+ { 0x13, 0xe1, 0x36, 0x30, 0xfe, 0xc6, 0x01, 0x8a, 0xa1, 0x63, 0x96, 0x59,
+ 0xc2, 0xa9, 0x68, 0x3f, 0x58, 0xd4, 0x19, 0x0c, 0x40, 0xf3, 0xde, 0x02, },
+ { 0xa3, 0x9e, 0xce, 0xda, 0x42, 0xee, 0x8c, 0x6c, 0x5a, 0x7d, 0xdc, 0x89,
+ 0x02, 0x77, 0xdd, 0xe7, 0x95, 0xbb, 0xff, 0x0d, 0xa4, 0xb5, 0x38, 0x1e,
+ 0xaf, },
+ { 0x9a, 0xf6, 0xb5, 0x9a, 0x4f, 0xa9, 0x4f, 0x2c, 0x35, 0x3c, 0x24, 0xdc,
+ 0x97, 0x6f, 0xd9, 0xa1, 0x7d, 0x1a, 0x85, 0x0b, 0xf5, 0xda, 0x2e, 0xe7,
+ 0xb1, 0x1d, },
+ { 0x84, 0x1e, 0x8e, 0x3d, 0x45, 0xa5, 0xf2, 0x27, 0xf3, 0x31, 0xfe, 0xb9,
+ 0xfb, 0xc5, 0x45, 0x99, 0x99, 0xdd, 0x93, 0x43, 0x02, 0xee, 0x58, 0xaf,
+ 0xee, 0x6a, 0xbe, },
+ { 0x07, 0x2f, 0xc0, 0xa2, 0x04, 0xc4, 0xab, 0x7c, 0x26, 0xbb, 0xa8, 0xd8,
+ 0xe3, 0x1c, 0x75, 0x15, 0x64, 0x5d, 0x02, 0x6a, 0xf0, 0x86, 0xe9, 0xcd,
+ 0x5c, 0xef, 0xa3, 0x25, },
+ { 0x2f, 0x3b, 0x1f, 0xb5, 0x91, 0x8f, 0x86, 0xe0, 0xdc, 0x31, 0x48, 0xb6,
+ 0xa1, 0x8c, 0xfd, 0x75, 0xbb, 0x7d, 0x3d, 0xc1, 0xf0, 0x10, 0x9a, 0xd8,
+ 0x4b, 0x0e, 0xe3, 0x94, 0x9f, },
+ { 0x29, 0xbb, 0x8f, 0x6c, 0xd1, 0xf2, 0xb6, 0xaf, 0xe5, 0xe3, 0x2d, 0xdc,
+ 0x6f, 0xa4, 0x53, 0x88, 0xd8, 0xcf, 0x4d, 0x45, 0x42, 0x62, 0xdb, 0xdf,
+ 0xf8, 0x45, 0xc2, 0x13, 0xec, 0x35, },
+ { 0x06, 0x3c, 0xe3, 0x2c, 0x15, 0xc6, 0x43, 0x03, 0x81, 0xfb, 0x08, 0x76,
+ 0x33, 0xcb, 0x02, 0xc1, 0xba, 0x33, 0xe5, 0xe0, 0xd1, 0x92, 0xa8, 0x46,
+ 0x28, 0x3f, 0x3e, 0x9d, 0x2c, 0x44, 0x54, },
+ { 0xea, 0xbb, 0x96, 0xf8, 0xd1, 0x8b, 0x04, 0x11, 0x40, 0x78, 0x42, 0x02,
+ 0x19, 0xd1, 0xbc, 0x65, 0x92, 0xd3, 0xc3, 0xd6, 0xd9, 0x19, 0xe7, 0xc3,
+ 0x40, 0x97, 0xbd, 0xd4, 0xed, 0xfa, 0x5e, 0x28, },
+ { 0x02, },
+ { 0x52, 0xa8, },
+ { 0x38, 0x25, 0x0d, },
+ { 0xe3, 0x04, 0xd4, 0x92, },
+ { 0x97, 0xdb, 0xf7, 0x81, 0xca, },
+ { 0x8a, 0x56, 0x9d, 0x62, 0x56, 0xcc, },
+ { 0xa1, 0x8e, 0x3c, 0x72, 0x8f, 0x63, 0x03, },
+ { 0xf7, 0xf3, 0x39, 0x09, 0x0a, 0xa1, 0xbb, 0x23, },
+ { 0x6b, 0x03, 0xc0, 0xe9, 0xd9, 0x83, 0x05, 0x22, 0x01, },
+ { 0x1b, 0x4b, 0xf5, 0xd6, 0x4f, 0x05, 0x75, 0x91, 0x4c, 0x7f, },
+ { 0x4c, 0x8c, 0x25, 0x20, 0x21, 0xcb, 0xc2, 0x4b, 0x3a, 0x5b, 0x8d, },
+ { 0x56, 0xe2, 0x77, 0xa0, 0xb6, 0x9f, 0x81, 0xec, 0x83, 0x75, 0xc4, 0xf9, },
+ { 0x71, 0x70, 0x0f, 0xad, 0x4d, 0x35, 0x81, 0x9d, 0x88, 0x69, 0xf9, 0xaa,
+ 0xd3, },
+ { 0x50, 0x6e, 0x86, 0x6e, 0x43, 0xc0, 0xc2, 0x44, 0xc2, 0xe2, 0xa0, 0x1c,
+ 0xb7, 0x9a, },
+ { 0xe4, 0x7e, 0x72, 0xc6, 0x12, 0x8e, 0x7c, 0xfc, 0xbd, 0xe2, 0x08, 0x31,
+ 0x3d, 0x47, 0x3d, },
+ { 0x08, 0x97, 0x5b, 0x80, 0xae, 0xc4, 0x1d, 0x50, 0x77, 0xdf, 0x1f, 0xd0,
+ 0x24, 0xf0, 0x17, 0xc0, },
+ { 0x01, 0xb6, 0x29, 0xf4, 0xaf, 0x78, 0x5f, 0xb6, 0x91, 0xdd, 0x76, 0x76,
+ 0xd2, 0xfd, 0x0c, 0x47, 0x40, },
+ { 0xa1, 0xd8, 0x09, 0x97, 0x7a, 0xa6, 0xc8, 0x94, 0xf6, 0x91, 0x7b, 0xae,
+ 0x2b, 0x9f, 0x0d, 0x83, 0x48, 0xf7, },
+ { 0x12, 0xd5, 0x53, 0x7d, 0x9a, 0xb0, 0xbe, 0xd9, 0xed, 0xe9, 0x9e, 0xee,
+ 0x61, 0x5b, 0x42, 0xf2, 0xc0, 0x73, 0xc0, },
+ { 0xd5, 0x77, 0xd6, 0x5c, 0x6e, 0xa5, 0x69, 0x2b, 0x3b, 0x8c, 0xd6, 0x7d,
+ 0x1d, 0xbe, 0x2c, 0xa1, 0x02, 0x21, 0xcd, 0x29, },
+ { 0xa4, 0x98, 0x80, 0xca, 0x22, 0xcf, 0x6a, 0xab, 0x5e, 0x40, 0x0d, 0x61,
+ 0x08, 0x21, 0xef, 0xc0, 0x6c, 0x52, 0xb4, 0xb0, 0x53, },
+ { 0xbf, 0xaf, 0x8f, 0x3b, 0x7a, 0x97, 0x33, 0xe5, 0xca, 0x07, 0x37, 0xfd,
+ 0x15, 0xdf, 0xce, 0x26, 0x2a, 0xb1, 0xa7, 0x0b, 0xb3, 0xac, },
+ { 0x16, 0x22, 0xe1, 0xbc, 0x99, 0x4e, 0x01, 0xf0, 0xfa, 0xff, 0x8f, 0xa5,
+ 0x0c, 0x61, 0xb0, 0xad, 0xcc, 0xb1, 0xe1, 0x21, 0x46, 0xfa, 0x2e, },
+ { 0x11, 0x5b, 0x0b, 0x2b, 0xe6, 0x14, 0xc1, 0xd5, 0x4d, 0x71, 0x5e, 0x17,
+ 0xea, 0x23, 0xdd, 0x6c, 0xbd, 0x1d, 0xbe, 0x12, 0x1b, 0xee, 0x4c, 0x1a, },
+ { 0x40, 0x88, 0x22, 0xf3, 0x20, 0x6c, 0xed, 0xe1, 0x36, 0x34, 0x62, 0x2c,
+ 0x98, 0x83, 0x52, 0xe2, 0x25, 0xee, 0xe9, 0xf5, 0xe1, 0x17, 0xf0, 0x5c,
+ 0xae, },
+ { 0xc3, 0x76, 0x37, 0xde, 0x95, 0x8c, 0xca, 0x2b, 0x0c, 0x23, 0xe7, 0xb5,
+ 0x38, 0x70, 0x61, 0xcc, 0xff, 0xd3, 0x95, 0x7b, 0xf3, 0xff, 0x1f, 0x9d,
+ 0x59, 0x00, },
+ { 0x0c, 0x19, 0x52, 0x05, 0x22, 0x53, 0xcb, 0x48, 0xd7, 0x10, 0x0e, 0x7e,
+ 0x14, 0x69, 0xb5, 0xa2, 0x92, 0x43, 0xa3, 0x9e, 0x4b, 0x8f, 0x51, 0x2c,
+ 0x5a, 0x2c, 0x3b, },
+ { 0xe1, 0x9d, 0x70, 0x70, 0x28, 0xec, 0x86, 0x40, 0x55, 0x33, 0x56, 0xda,
+ 0x88, 0xca, 0xee, 0xc8, 0x6a, 0x20, 0xb1, 0xe5, 0x3d, 0x57, 0xf8, 0x3c,
+ 0x10, 0x07, 0x2a, 0xc4, },
+ { 0x0b, 0xae, 0xf1, 0xc4, 0x79, 0xee, 0x1b, 0x3d, 0x27, 0x35, 0x8d, 0x14,
+ 0xd6, 0xae, 0x4e, 0x3c, 0xe9, 0x53, 0x50, 0xb5, 0xcc, 0x0c, 0xf7, 0xdf,
+ 0xee, 0xa1, 0x74, 0xd6, 0x71, },
+ { 0xe6, 0xa4, 0xf4, 0x99, 0x98, 0xb9, 0x80, 0xea, 0x96, 0x7f, 0x4f, 0x33,
+ 0xcf, 0x74, 0x25, 0x6f, 0x17, 0x6c, 0xbf, 0xf5, 0x5c, 0x38, 0xd0, 0xff,
+ 0x96, 0xcb, 0x13, 0xf9, 0xdf, 0xfd, },
+ { 0xbe, 0x92, 0xeb, 0xba, 0x44, 0x2c, 0x24, 0x74, 0xd4, 0x03, 0x27, 0x3c,
+ 0x5d, 0x5b, 0x03, 0x30, 0x87, 0x63, 0x69, 0xe0, 0xb8, 0x94, 0xf4, 0x44,
+ 0x7e, 0xad, 0xcd, 0x20, 0x12, 0x16, 0x79, },
+ { 0x30, 0xf1, 0xc4, 0x8e, 0x05, 0x90, 0x2a, 0x97, 0x63, 0x94, 0x46, 0xff,
+ 0xce, 0xd8, 0x67, 0xa7, 0xac, 0x33, 0x8c, 0x95, 0xb7, 0xcd, 0xa3, 0x23,
+ 0x98, 0x9d, 0x76, 0x6c, 0x9d, 0xa8, 0xd6, 0x8a, },
+ { 0xbe, },
+ { 0x17, 0x6c, },
+ { 0x1a, 0x42, 0x4f, },
+ { 0xba, 0xaf, 0xb7, 0x65, },
+ { 0xc2, 0x63, 0x43, 0x6a, 0xea, },
+ { 0xe4, 0x4d, 0xad, 0xf2, 0x0b, 0x02, },
+ { 0x04, 0xc7, 0xc4, 0x7f, 0xa9, 0x2b, 0xce, },
+ { 0x66, 0xf6, 0x67, 0xcb, 0x03, 0x53, 0xc8, 0xf1, },
+ { 0x56, 0xa3, 0x60, 0x78, 0xc9, 0x5f, 0x70, 0x1b, 0x5e, },
+ { 0x99, 0xff, 0x81, 0x7c, 0x13, 0x3c, 0x29, 0x79, 0x4b, 0x65, },
+ { 0x51, 0x10, 0x50, 0x93, 0x01, 0x93, 0xb7, 0x01, 0xc9, 0x18, 0xb7, },
+ { 0x8e, 0x3c, 0x42, 0x1e, 0x5e, 0x7d, 0xc1, 0x50, 0x70, 0x1f, 0x00, 0x98, },
+ { 0x5f, 0xd9, 0x9b, 0xc8, 0xd7, 0xb2, 0x72, 0x62, 0x1a, 0x1e, 0xba, 0x92,
+ 0xe9, },
+ { 0x70, 0x2b, 0xba, 0xfe, 0xad, 0x5d, 0x96, 0x3f, 0x27, 0xc2, 0x41, 0x6d,
+ 0xc4, 0xb3, },
+ { 0xae, 0xe0, 0xd5, 0xd4, 0xc7, 0xae, 0x15, 0x5e, 0xdc, 0xdd, 0x33, 0x60,
+ 0xd7, 0xd3, 0x5e, },
+ { 0x79, 0x8e, 0xbc, 0x9e, 0x20, 0xb9, 0x19, 0x4b, 0x63, 0x80, 0xf3, 0x16,
+ 0xaf, 0x39, 0xbd, 0x92, },
+ { 0xc2, 0x0e, 0x85, 0xa0, 0x0b, 0x9a, 0xb0, 0xec, 0xde, 0x38, 0xd3, 0x10,
+ 0xd9, 0xa7, 0x66, 0x27, 0xcf, },
+ { 0x0e, 0x3b, 0x75, 0x80, 0x67, 0x14, 0x0c, 0x02, 0x90, 0xd6, 0xb3, 0x02,
+ 0x81, 0xf6, 0xa6, 0x87, 0xce, 0x58, },
+ { 0x79, 0xb5, 0xe9, 0x5d, 0x52, 0x4d, 0xf7, 0x59, 0xf4, 0x2e, 0x27, 0xdd,
+ 0xb3, 0xed, 0x57, 0x5b, 0x82, 0xea, 0x6f, },
+ { 0xa2, 0x97, 0xf5, 0x80, 0x02, 0x3d, 0xde, 0xa3, 0xf9, 0xf6, 0xab, 0xe3,
+ 0x57, 0x63, 0x7b, 0x9b, 0x10, 0x42, 0x6f, 0xf2, },
+ { 0x12, 0x7a, 0xfc, 0xb7, 0x67, 0x06, 0x0c, 0x78, 0x1a, 0xfe, 0x88, 0x4f,
+ 0xc6, 0xac, 0x52, 0x96, 0x64, 0x28, 0x97, 0x84, 0x06, },
+ { 0xc5, 0x04, 0x44, 0x6b, 0xb2, 0xa5, 0xa4, 0x66, 0xe1, 0x76, 0xa2, 0x51,
+ 0xf9, 0x59, 0x69, 0x97, 0x56, 0x0b, 0xbf, 0x50, 0xb3, 0x34, },
+ { 0x21, 0x32, 0x6b, 0x42, 0xb5, 0xed, 0x71, 0x8d, 0xf7, 0x5a, 0x35, 0xe3,
+ 0x90, 0xe2, 0xee, 0xaa, 0x89, 0xf6, 0xc9, 0x9c, 0x4d, 0x73, 0xf4, },
+ { 0x4c, 0xa6, 0x09, 0xf4, 0x48, 0xe7, 0x46, 0xbc, 0x49, 0xfc, 0xe5, 0xda,
+ 0xd1, 0x87, 0x13, 0x17, 0x4c, 0x59, 0x71, 0x26, 0x5b, 0x2c, 0x42, 0xb7, },
+ { 0x13, 0x63, 0xf3, 0x40, 0x02, 0xe5, 0xa3, 0x3a, 0x5e, 0x8e, 0xf8, 0xb6,
+ 0x8a, 0x49, 0x60, 0x76, 0x34, 0x72, 0x94, 0x73, 0xf6, 0xd9, 0x21, 0x6a,
+ 0x26, },
+ { 0xdf, 0x75, 0x16, 0x10, 0x1b, 0x5e, 0x81, 0xc3, 0xc8, 0xde, 0x34, 0x24,
+ 0xb0, 0x98, 0xeb, 0x1b, 0x8f, 0xa1, 0x9b, 0x05, 0xee, 0xa5, 0xe9, 0x35,
+ 0xf4, 0x1d, },
+ { 0xcd, 0x21, 0x93, 0x6e, 0x5b, 0xa0, 0x26, 0x2b, 0x21, 0x0e, 0xa0, 0xb9,
+ 0x1c, 0xb5, 0xbb, 0xb8, 0xf8, 0x1e, 0xff, 0x5c, 0xa8, 0xf9, 0x39, 0x46,
+ 0x4e, 0x29, 0x26, },
+ { 0x73, 0x7f, 0x0e, 0x3b, 0x0b, 0x5c, 0xf9, 0x60, 0xaa, 0x88, 0xa1, 0x09,
+ 0xb1, 0x5d, 0x38, 0x7b, 0x86, 0x8f, 0x13, 0x7a, 0x8d, 0x72, 0x7a, 0x98,
+ 0x1a, 0x5b, 0xff, 0xc9, },
+ { 0xd3, 0x3c, 0x61, 0x71, 0x44, 0x7e, 0x31, 0x74, 0x98, 0x9d, 0x9a, 0xd2,
+ 0x27, 0xf3, 0x46, 0x43, 0x42, 0x51, 0xd0, 0x5f, 0xe9, 0x1c, 0x5c, 0x69,
+ 0xbf, 0xf6, 0xbe, 0x3c, 0x40, },
+ { 0x31, 0x99, 0x31, 0x9f, 0xaa, 0x43, 0x2e, 0x77, 0x3e, 0x74, 0x26, 0x31,
+ 0x5e, 0x61, 0xf1, 0x87, 0xe2, 0xeb, 0x9b, 0xcd, 0xd0, 0x3a, 0xee, 0x20,
+ 0x7e, 0x10, 0x0a, 0x0b, 0x7e, 0xfa, },
+ { 0xa4, 0x27, 0x80, 0x67, 0x81, 0x2a, 0xa7, 0x62, 0xf7, 0x6e, 0xda, 0xd4,
+ 0x5c, 0x39, 0x74, 0xad, 0x7e, 0xbe, 0xad, 0xa5, 0x84, 0x7f, 0xa9, 0x30,
+ 0x5d, 0xdb, 0xe2, 0x05, 0x43, 0xf7, 0x1b, },
+ { 0x0b, 0x37, 0xd8, 0x02, 0xe1, 0x83, 0xd6, 0x80, 0xf2, 0x35, 0xc2, 0xb0,
+ 0x37, 0xef, 0xef, 0x5e, 0x43, 0x93, 0xf0, 0x49, 0x45, 0x0a, 0xef, 0xb5,
+ 0x76, 0x70, 0x12, 0x44, 0xc4, 0xdb, 0xf5, 0x7a, },
+ { 0x1f, },
+ { 0x82, 0x60, },
+ { 0xcc, 0xe3, 0x08, },
+ { 0x56, 0x17, 0xe4, 0x59, },
+ { 0xe2, 0xd7, 0x9e, 0xc4, 0x4c, },
+ { 0xb2, 0xad, 0xd3, 0x78, 0x58, 0x5a, },
+ { 0xce, 0x43, 0xb4, 0x02, 0x96, 0xab, 0x3c, },
+ { 0xe6, 0x05, 0x1a, 0x73, 0x22, 0x32, 0xbb, 0x77, },
+ { 0x23, 0xe7, 0xda, 0xfe, 0x2c, 0xef, 0x8c, 0x22, 0xec, },
+ { 0xe9, 0x8e, 0x55, 0x38, 0xd1, 0xd7, 0x35, 0x23, 0x98, 0xc7, },
+ { 0xb5, 0x81, 0x1a, 0xe5, 0xb5, 0xa5, 0xd9, 0x4d, 0xca, 0x41, 0xe7, },
+ { 0x41, 0x16, 0x16, 0x95, 0x8d, 0x9e, 0x0c, 0xea, 0x8c, 0x71, 0x9a, 0xc1, },
+ { 0x7c, 0x33, 0xc0, 0xa4, 0x00, 0x62, 0xea, 0x60, 0x67, 0xe4, 0x20, 0xbc,
+ 0x5b, },
+ { 0xdb, 0xb1, 0xdc, 0xfd, 0x08, 0xc0, 0xde, 0x82, 0xd1, 0xde, 0x38, 0xc0,
+ 0x90, 0x48, },
+ { 0x37, 0x18, 0x2e, 0x0d, 0x61, 0xaa, 0x61, 0xd7, 0x86, 0x20, 0x16, 0x60,
+ 0x04, 0xd9, 0xd5, },
+ { 0xb0, 0xcf, 0x2c, 0x4c, 0x5e, 0x5b, 0x4f, 0x2a, 0x23, 0x25, 0x58, 0x47,
+ 0xe5, 0x31, 0x06, 0x70, },
+ { 0x91, 0xa0, 0xa3, 0x86, 0x4e, 0xe0, 0x72, 0x38, 0x06, 0x67, 0x59, 0x5c,
+ 0x70, 0x25, 0xdb, 0x33, 0x27, },
+ { 0x44, 0x58, 0x66, 0xb8, 0x58, 0xc7, 0x13, 0xed, 0x4c, 0xc0, 0xf4, 0x9a,
+ 0x1e, 0x67, 0x75, 0x33, 0xb6, 0xb8, },
+ { 0x7f, 0x98, 0x4a, 0x8e, 0x50, 0xa2, 0x5c, 0xcd, 0x59, 0xde, 0x72, 0xb3,
+ 0x9d, 0xc3, 0x09, 0x8a, 0xab, 0x56, 0xf1, },
+ { 0x80, 0x96, 0x49, 0x1a, 0x59, 0xa2, 0xc5, 0xd5, 0xa7, 0x20, 0x8a, 0xb7,
+ 0x27, 0x62, 0x84, 0x43, 0xc6, 0xe1, 0x1b, 0x5d, },
+ { 0x6b, 0xb7, 0x2b, 0x26, 0x62, 0x14, 0x70, 0x19, 0x3d, 0x4d, 0xac, 0xac,
+ 0x63, 0x58, 0x5e, 0x94, 0xb5, 0xb7, 0xe8, 0xe8, 0xa2, },
+ { 0x20, 0xa8, 0xc0, 0xfd, 0x63, 0x3d, 0x6e, 0x98, 0xcf, 0x0c, 0x49, 0x98,
+ 0xe4, 0x5a, 0xfe, 0x8c, 0xaa, 0x70, 0x82, 0x1c, 0x7b, 0x74, },
+ { 0xc8, 0xe8, 0xdd, 0xdf, 0x69, 0x30, 0x01, 0xc2, 0x0f, 0x7e, 0x2f, 0x11,
+ 0xcc, 0x3e, 0x17, 0xa5, 0x69, 0x40, 0x3f, 0x0e, 0x79, 0x7f, 0xcf, },
+ { 0xdb, 0x61, 0xc0, 0xe2, 0x2e, 0x49, 0x07, 0x31, 0x1d, 0x91, 0x42, 0x8a,
+ 0xfc, 0x5e, 0xd3, 0xf8, 0x56, 0x1f, 0x2b, 0x73, 0xfd, 0x9f, 0xb2, 0x8e, },
+ { 0x0c, 0x89, 0x55, 0x0c, 0x1f, 0x59, 0x2c, 0x9d, 0x1b, 0x29, 0x1d, 0x41,
+ 0x1d, 0xe6, 0x47, 0x8f, 0x8c, 0x2b, 0xea, 0x8f, 0xf0, 0xff, 0x21, 0x70,
+ 0x88, },
+ { 0x12, 0x18, 0x95, 0xa6, 0x59, 0xb1, 0x31, 0x24, 0x45, 0x67, 0x55, 0xa4,
+ 0x1a, 0x2d, 0x48, 0x67, 0x1b, 0x43, 0x88, 0x2d, 0x8e, 0xa0, 0x70, 0xb3,
+ 0xc6, 0xbb, },
+ { 0xe7, 0xb1, 0x1d, 0xb2, 0x76, 0x4d, 0x68, 0x68, 0x68, 0x23, 0x02, 0x55,
+ 0x3a, 0xe2, 0xe5, 0xd5, 0x4b, 0x43, 0xf9, 0x34, 0x77, 0x5c, 0xa1, 0xf5,
+ 0x55, 0xfd, 0x4f, },
+ { 0x8c, 0x87, 0x5a, 0x08, 0x3a, 0x73, 0xad, 0x61, 0xe1, 0xe7, 0x99, 0x7e,
+ 0xf0, 0x5d, 0xe9, 0x5d, 0x16, 0x43, 0x80, 0x2f, 0xd0, 0x66, 0x34, 0xe2,
+ 0x42, 0x64, 0x3b, 0x1a, },
+ { 0x39, 0xc1, 0x99, 0xcf, 0x22, 0xbf, 0x16, 0x8f, 0x9f, 0x80, 0x7f, 0x95,
+ 0x0a, 0x05, 0x67, 0x27, 0xe7, 0x15, 0xdf, 0x9d, 0xb2, 0xfe, 0x1c, 0xb5,
+ 0x1d, 0x60, 0x8f, 0x8a, 0x1d, },
+ { 0x9b, 0x6e, 0x08, 0x09, 0x06, 0x73, 0xab, 0x68, 0x02, 0x62, 0x1a, 0xe4,
+ 0xd4, 0xdf, 0xc7, 0x02, 0x4c, 0x6a, 0x5f, 0xfd, 0x23, 0xac, 0xae, 0x6d,
+ 0x43, 0xa4, 0x7a, 0x50, 0x60, 0x3c, },
+ { 0x1d, 0xb4, 0xc6, 0xe1, 0xb1, 0x4b, 0xe3, 0xf2, 0xe2, 0x1a, 0x73, 0x1b,
+ 0xa0, 0x92, 0xa7, 0xf5, 0xff, 0x8f, 0x8b, 0x5d, 0xdf, 0xa8, 0x04, 0xb3,
+ 0xb0, 0xf7, 0xcc, 0x12, 0xfa, 0x35, 0x46, },
+ { 0x49, 0x45, 0x97, 0x11, 0x0f, 0x1c, 0x60, 0x8e, 0xe8, 0x47, 0x30, 0xcf,
+ 0x60, 0xa8, 0x71, 0xc5, 0x1b, 0xe9, 0x39, 0x4d, 0x49, 0xb6, 0x12, 0x1f,
+ 0x24, 0xab, 0x37, 0xff, 0x83, 0xc2, 0xe1, 0x3a, },
+ { 0x60, },
+ { 0x24, 0x26, },
+ { 0x47, 0xeb, 0xc9, },
+ { 0x4a, 0xd0, 0xbc, 0xf0, },
+ { 0x8e, 0x2b, 0xc9, 0x85, 0x3c, },
+ { 0xa2, 0x07, 0x15, 0xb8, 0x12, 0x74, },
+ { 0x0f, 0xdb, 0x5b, 0x33, 0x69, 0xfe, 0x4b, },
+ { 0xa2, 0x86, 0x54, 0xf4, 0xfd, 0xb2, 0xd4, 0xe6, },
+ { 0xbb, 0x84, 0x78, 0x49, 0x27, 0x8e, 0x61, 0xda, 0x60, },
+ { 0x04, 0xc3, 0xcd, 0xaa, 0x8f, 0xa7, 0x03, 0xc9, 0xf9, 0xb6, },
+ { 0xf8, 0x27, 0x1d, 0x61, 0xdc, 0x21, 0x42, 0xdd, 0xad, 0x92, 0x40, },
+ { 0x12, 0x87, 0xdf, 0xc2, 0x41, 0x45, 0x5a, 0x36, 0x48, 0x5b, 0x51, 0x2b, },
+ { 0xbb, 0x37, 0x5d, 0x1f, 0xf1, 0x68, 0x7a, 0xc4, 0xa5, 0xd2, 0xa4, 0x91,
+ 0x8d, },
+ { 0x5b, 0x27, 0xd1, 0x04, 0x54, 0x52, 0x9f, 0xa3, 0x47, 0x86, 0x33, 0x33,
+ 0xbf, 0xa0, },
+ { 0xcf, 0x04, 0xea, 0xf8, 0x03, 0x2a, 0x43, 0xff, 0xa6, 0x68, 0x21, 0x4c,
+ 0xd5, 0x4b, 0xed, },
+ { 0xaf, 0xb8, 0xbc, 0x63, 0x0f, 0x18, 0x4d, 0xe2, 0x7a, 0xdd, 0x46, 0x44,
+ 0xc8, 0x24, 0x0a, 0xb7, },
+ { 0x3e, 0xdc, 0x36, 0xe4, 0x89, 0xb1, 0xfa, 0xc6, 0x40, 0x93, 0x2e, 0x75,
+ 0xb2, 0x15, 0xd1, 0xb1, 0x10, },
+ { 0x6c, 0xd8, 0x20, 0x3b, 0x82, 0x79, 0xf9, 0xc8, 0xbc, 0x9d, 0xe0, 0x35,
+ 0xbe, 0x1b, 0x49, 0x1a, 0xbc, 0x3a, },
+ { 0x78, 0x65, 0x2c, 0xbe, 0x35, 0x67, 0xdc, 0x78, 0xd4, 0x41, 0xf6, 0xc9,
+ 0xde, 0xde, 0x1f, 0x18, 0x13, 0x31, 0x11, },
+ { 0x8a, 0x7f, 0xb1, 0x33, 0x8f, 0x0c, 0x3c, 0x0a, 0x06, 0x61, 0xf0, 0x47,
+ 0x29, 0x1b, 0x29, 0xbc, 0x1c, 0x47, 0xef, 0x7a, },
+ { 0x65, 0x91, 0xf1, 0xe6, 0xb3, 0x96, 0xd3, 0x8c, 0xc2, 0x4a, 0x59, 0x35,
+ 0x72, 0x8e, 0x0b, 0x9a, 0x87, 0xca, 0x34, 0x7b, 0x63, },
+ { 0x5f, 0x08, 0x87, 0x80, 0x56, 0x25, 0x89, 0x77, 0x61, 0x8c, 0x64, 0xa1,
+ 0x59, 0x6d, 0x59, 0x62, 0xe8, 0x4a, 0xc8, 0x58, 0x99, 0xd1, },
+ { 0x23, 0x87, 0x1d, 0xed, 0x6f, 0xf2, 0x91, 0x90, 0xe2, 0xfe, 0x43, 0x21,
+ 0xaf, 0x97, 0xc6, 0xbc, 0xd7, 0x15, 0xc7, 0x2d, 0x08, 0x77, 0x91, },
+ { 0x90, 0x47, 0x9a, 0x9e, 0x3a, 0xdf, 0xf3, 0xc9, 0x4c, 0x1e, 0xa7, 0xd4,
+ 0x6a, 0x32, 0x90, 0xfe, 0xb7, 0xb6, 0x7b, 0xfa, 0x96, 0x61, 0xfb, 0xa4, },
+ { 0xb1, 0x67, 0x60, 0x45, 0xb0, 0x96, 0xc5, 0x15, 0x9f, 0x4d, 0x26, 0xd7,
+ 0x9d, 0xf1, 0xf5, 0x6d, 0x21, 0x00, 0x94, 0x31, 0x64, 0x94, 0xd3, 0xa7,
+ 0xd3, },
+ { 0x02, 0x3e, 0xaf, 0xf3, 0x79, 0x73, 0xa5, 0xf5, 0xcc, 0x7a, 0x7f, 0xfb,
+ 0x79, 0x2b, 0x85, 0x8c, 0x88, 0x72, 0x06, 0xbe, 0xfe, 0xaf, 0xc1, 0x16,
+ 0xa6, 0xd6, },
+ { 0x2a, 0xb0, 0x1a, 0xe5, 0xaa, 0x6e, 0xb3, 0xae, 0x53, 0x85, 0x33, 0x80,
+ 0x75, 0xae, 0x30, 0xe6, 0xb8, 0x72, 0x42, 0xf6, 0x25, 0x4f, 0x38, 0x88,
+ 0x55, 0xd1, 0xa9, },
+ { 0x90, 0xd8, 0x0c, 0xc0, 0x93, 0x4b, 0x4f, 0x9e, 0x65, 0x6c, 0xa1, 0x54,
+ 0xa6, 0xf6, 0x6e, 0xca, 0xd2, 0xbb, 0x7e, 0x6a, 0x1c, 0xd3, 0xce, 0x46,
+ 0xef, 0xb0, 0x00, 0x8d, },
+ { 0xed, 0x9c, 0x49, 0xcd, 0xc2, 0xde, 0x38, 0x0e, 0xe9, 0x98, 0x6c, 0xc8,
+ 0x90, 0x9e, 0x3c, 0xd4, 0xd3, 0xeb, 0x88, 0x32, 0xc7, 0x28, 0xe3, 0x94,
+ 0x1c, 0x9f, 0x8b, 0xf3, 0xcb, },
+ { 0xac, 0xe7, 0x92, 0x16, 0xb4, 0x14, 0xa0, 0xe4, 0x04, 0x79, 0xa2, 0xf4,
+ 0x31, 0xe6, 0x0c, 0x26, 0xdc, 0xbf, 0x2f, 0x69, 0x1b, 0x55, 0x94, 0x67,
+ 0xda, 0x0c, 0xd7, 0x32, 0x1f, 0xef, },
+ { 0x68, 0x63, 0x85, 0x57, 0x95, 0x9e, 0x42, 0x27, 0x41, 0x43, 0x42, 0x02,
+ 0xa5, 0x78, 0xa7, 0xc6, 0x43, 0xc1, 0x6a, 0xba, 0x70, 0x80, 0xcd, 0x04,
+ 0xb6, 0x78, 0x76, 0x29, 0xf3, 0xe8, 0xa0, },
+ { 0xe6, 0xac, 0x8d, 0x9d, 0xf0, 0xc0, 0xf7, 0xf7, 0xe3, 0x3e, 0x4e, 0x28,
+ 0x0f, 0x59, 0xb2, 0x67, 0x9e, 0x84, 0x34, 0x42, 0x96, 0x30, 0x2b, 0xca,
+ 0x49, 0xb6, 0xc5, 0x9a, 0x84, 0x59, 0xa7, 0x81, },
+ { 0x7e, },
+ { 0x1e, 0x21, },
+ { 0x26, 0xd3, 0xdd, },
+ { 0x2c, 0xd4, 0xb3, 0x3d, },
+ { 0x86, 0x7b, 0x76, 0x3c, 0xf0, },
+ { 0x12, 0xc3, 0x70, 0x1d, 0x55, 0x18, },
+ { 0x96, 0xc2, 0xbd, 0x61, 0x55, 0xf4, 0x24, },
+ { 0x20, 0x51, 0xf7, 0x86, 0x58, 0x8f, 0x07, 0x2a, },
+ { 0x93, 0x15, 0xa8, 0x1d, 0xda, 0x97, 0xee, 0x0e, 0x6c, },
+ { 0x39, 0x93, 0xdf, 0xd5, 0x0e, 0xca, 0xdc, 0x7a, 0x92, 0xce, },
+ { 0x60, 0xd5, 0xfd, 0xf5, 0x1b, 0x26, 0x82, 0x26, 0x73, 0x02, 0xbc, },
+ { 0x98, 0xf2, 0x34, 0xe1, 0xf5, 0xfb, 0x00, 0xac, 0x10, 0x4a, 0x38, 0x9f, },
+ { 0xda, 0x3a, 0x92, 0x8a, 0xd0, 0xcd, 0x12, 0xcd, 0x15, 0xbb, 0xab, 0x77,
+ 0x66, },
+ { 0xa2, 0x92, 0x1a, 0xe5, 0xca, 0x0c, 0x30, 0x75, 0xeb, 0xaf, 0x00, 0x31,
+ 0x55, 0x66, },
+ { 0x06, 0xea, 0xfd, 0x3e, 0x86, 0x38, 0x62, 0x4e, 0xa9, 0x12, 0xa4, 0x12,
+ 0x43, 0xbf, 0xa1, },
+ { 0xe4, 0x71, 0x7b, 0x94, 0xdb, 0xa0, 0xd2, 0xff, 0x9b, 0xeb, 0xad, 0x8e,
+ 0x95, 0x8a, 0xc5, 0xed, },
+ { 0x25, 0x5a, 0x77, 0x71, 0x41, 0x0e, 0x7a, 0xe9, 0xed, 0x0c, 0x10, 0xef,
+ 0xf6, 0x2b, 0x3a, 0xba, 0x60, },
+ { 0xee, 0xe2, 0xa3, 0x67, 0x64, 0x1d, 0xc6, 0x04, 0xc4, 0xe1, 0x68, 0xd2,
+ 0x6e, 0xd2, 0x91, 0x75, 0x53, 0x07, },
+ { 0xe0, 0xf6, 0x4d, 0x8f, 0x68, 0xfc, 0x06, 0x7e, 0x18, 0x79, 0x7f, 0x2b,
+ 0x6d, 0xef, 0x46, 0x7f, 0xab, 0xb2, 0xad, },
+ { 0x3d, 0x35, 0x88, 0x9f, 0x2e, 0xcf, 0x96, 0x45, 0x07, 0x60, 0x71, 0x94,
+ 0x00, 0x8d, 0xbf, 0xf4, 0xef, 0x46, 0x2e, 0x3c, },
+ { 0x43, 0xcf, 0x98, 0xf7, 0x2d, 0xf4, 0x17, 0xe7, 0x8c, 0x05, 0x2d, 0x9b,
+ 0x24, 0xfb, 0x4d, 0xea, 0x4a, 0xec, 0x01, 0x25, 0x29, },
+ { 0x8e, 0x73, 0x9a, 0x78, 0x11, 0xfe, 0x48, 0xa0, 0x3b, 0x1a, 0x26, 0xdf,
+ 0x25, 0xe9, 0x59, 0x1c, 0x70, 0x07, 0x9f, 0xdc, 0xa0, 0xa6, },
+ { 0xe8, 0x47, 0x71, 0xc7, 0x3e, 0xdf, 0xb5, 0x13, 0xb9, 0x85, 0x13, 0xa8,
+ 0x54, 0x47, 0x6e, 0x59, 0x96, 0x09, 0x13, 0x5f, 0x82, 0x16, 0x0b, },
+ { 0xfb, 0xc0, 0x8c, 0x03, 0x21, 0xb3, 0xc4, 0xb5, 0x43, 0x32, 0x6c, 0xea,
+ 0x7f, 0xa8, 0x43, 0x91, 0xe8, 0x4e, 0x3f, 0xbf, 0x45, 0x58, 0x6a, 0xa3, },
+ { 0x55, 0xf8, 0xf3, 0x00, 0x76, 0x09, 0xef, 0x69, 0x5d, 0xd2, 0x8a, 0xf2,
+ 0x65, 0xc3, 0xcb, 0x9b, 0x43, 0xfd, 0xb1, 0x7e, 0x7f, 0xa1, 0x94, 0xb0,
+ 0xd7, },
+ { 0xaa, 0x13, 0xc1, 0x51, 0x40, 0x6d, 0x8d, 0x4c, 0x0a, 0x95, 0x64, 0x7b,
+ 0xd1, 0x96, 0xb6, 0x56, 0xb4, 0x5b, 0xcf, 0xd6, 0xd9, 0x15, 0x97, 0xdd,
+ 0xb6, 0xef, },
+ { 0xaf, 0xb7, 0x36, 0xb0, 0x04, 0xdb, 0xd7, 0x9c, 0x9a, 0x44, 0xc4, 0xf6,
+ 0x1f, 0x12, 0x21, 0x2d, 0x59, 0x30, 0x54, 0xab, 0x27, 0x61, 0xa3, 0x57,
+ 0xef, 0xf8, 0x53, },
+ { 0x97, 0x34, 0x45, 0x3e, 0xce, 0x7c, 0x35, 0xa2, 0xda, 0x9f, 0x4b, 0x46,
+ 0x6c, 0x11, 0x67, 0xff, 0x2f, 0x76, 0x58, 0x15, 0x71, 0xfa, 0x44, 0x89,
+ 0x89, 0xfd, 0xf7, 0x99, },
+ { 0x1f, 0xb1, 0x62, 0xeb, 0x83, 0xc5, 0x9c, 0x89, 0xf9, 0x2c, 0xd2, 0x03,
+ 0x61, 0xbc, 0xbb, 0xa5, 0x74, 0x0e, 0x9b, 0x7e, 0x82, 0x3e, 0x70, 0x0a,
+ 0xa9, 0x8f, 0x2b, 0x59, 0xfb, },
+ { 0xf8, 0xca, 0x5e, 0x3a, 0x4f, 0x9e, 0x10, 0x69, 0x10, 0xd5, 0x4c, 0xeb,
+ 0x1a, 0x0f, 0x3c, 0x6a, 0x98, 0xf5, 0xb0, 0x97, 0x5b, 0x37, 0x2f, 0x0d,
+ 0xbd, 0x42, 0x4b, 0x69, 0xa1, 0x82, },
+ { 0x12, 0x8c, 0x6d, 0x52, 0x08, 0xef, 0x74, 0xb2, 0xe6, 0xaa, 0xd3, 0xb0,
+ 0x26, 0xb0, 0xd9, 0x94, 0xb6, 0x11, 0x45, 0x0e, 0x36, 0x71, 0x14, 0x2d,
+ 0x41, 0x8c, 0x21, 0x53, 0x31, 0xe9, 0x68, },
+ { 0xee, 0xea, 0x0d, 0x89, 0x47, 0x7e, 0x72, 0xd1, 0xd8, 0xce, 0x58, 0x4c,
+ 0x94, 0x1f, 0x0d, 0x51, 0x08, 0xa3, 0xb6, 0x3d, 0xe7, 0x82, 0x46, 0x92,
+ 0xd6, 0x98, 0x6b, 0x07, 0x10, 0x65, 0x52, 0x65, },
+};
+
+static bool __init noinline_for_stack blake2s_digest_test(void)
+{
+ u8 key[BLAKE2S_KEY_SIZE];
+ u8 buf[ARRAY_SIZE(blake2s_testvecs)];
+ u8 hash[BLAKE2S_HASH_SIZE];
+ struct blake2s_state state;
+ bool success = true;
+ int i, l;
+
+ key[0] = key[1] = 1;
+ for (i = 2; i < sizeof(key); ++i)
+ key[i] = key[i - 2] + key[i - 1];
+
+ for (i = 0; i < sizeof(buf); ++i)
+ buf[i] = (u8)i;
+
+ for (i = l = 0; i < ARRAY_SIZE(blake2s_testvecs); l = (l + 37) % ++i) {
+ int outlen = 1 + i % BLAKE2S_HASH_SIZE;
+ int keylen = (13 * i) % (BLAKE2S_KEY_SIZE + 1);
+
+ blake2s(hash, buf, key + BLAKE2S_KEY_SIZE - keylen, outlen, i,
+ keylen);
+ if (memcmp(hash, blake2s_testvecs[i], outlen)) {
+ pr_err("blake2s self-test %d: FAIL\n", i + 1);
+ success = false;
+ }
+
+ if (!keylen)
+ blake2s_init(&state, outlen);
+ else
+ blake2s_init_key(&state, outlen,
+ key + BLAKE2S_KEY_SIZE - keylen,
+ keylen);
+
+ blake2s_update(&state, buf, l);
+ blake2s_update(&state, buf + l, i - l);
+ blake2s_final(&state, hash);
+ if (memcmp(hash, blake2s_testvecs[i], outlen)) {
+ pr_err("blake2s init/update/final self-test %d: FAIL\n",
+ i + 1);
+ success = false;
+ }
+ }
+
+ return success;
+}
+
+static bool __init noinline_for_stack blake2s_random_test(void)
+{
+ struct blake2s_state state;
+ bool success = true;
+ int i, l;
+
+ for (i = 0; i < 32; ++i) {
+ enum { TEST_ALIGNMENT = 16 };
+ u8 blocks[BLAKE2S_BLOCK_SIZE * 2 + TEST_ALIGNMENT - 1]
+ __aligned(TEST_ALIGNMENT);
+ u8 *unaligned_block = blocks + BLAKE2S_BLOCK_SIZE;
+ struct blake2s_state state1, state2;
+
+ get_random_bytes(blocks, sizeof(blocks));
+ get_random_bytes(&state, sizeof(state));
+
+#if defined(CONFIG_CRYPTO_LIB_BLAKE2S_GENERIC) && \
+ defined(CONFIG_CRYPTO_ARCH_HAVE_LIB_BLAKE2S)
+ memcpy(&state1, &state, sizeof(state1));
+ memcpy(&state2, &state, sizeof(state2));
+ blake2s_compress(&state1, blocks, 2, BLAKE2S_BLOCK_SIZE);
+ blake2s_compress_generic(&state2, blocks, 2, BLAKE2S_BLOCK_SIZE);
+ if (memcmp(&state1, &state2, sizeof(state1))) {
+ pr_err("blake2s random compress self-test %d: FAIL\n",
+ i + 1);
+ success = false;
+ }
+#endif
+
+ memcpy(&state1, &state, sizeof(state1));
+ blake2s_compress(&state1, blocks, 1, BLAKE2S_BLOCK_SIZE);
+ for (l = 1; l < TEST_ALIGNMENT; ++l) {
+ memcpy(unaligned_block + l, blocks,
+ BLAKE2S_BLOCK_SIZE);
+ memcpy(&state2, &state, sizeof(state2));
+ blake2s_compress(&state2, unaligned_block + l, 1,
+ BLAKE2S_BLOCK_SIZE);
+ if (memcmp(&state1, &state2, sizeof(state1))) {
+ pr_err("blake2s random compress align %d self-test %d: FAIL\n",
+ l, i + 1);
+ success = false;
+ }
+ }
+ }
+
+ return success;
+}
+
+bool __init blake2s_selftest(void)
+{
+ bool success;
+
+ success = blake2s_digest_test();
+ success &= blake2s_random_test();
+
+ return success;
+}
diff --git a/lib/crypto/blake2s.c b/lib/crypto/blake2s.c
new file mode 100644
index 000000000000..71a316552cc5
--- /dev/null
+++ b/lib/crypto/blake2s.c
@@ -0,0 +1,71 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/*
+ * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ *
+ * This is an implementation of the BLAKE2s hash and PRF functions.
+ *
+ * Information: https://blake2.net/
+ *
+ */
+
+#include <crypto/internal/blake2s.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/bug.h>
+
+static inline void blake2s_set_lastblock(struct blake2s_state *state)
+{
+ state->f[0] = -1;
+}
+
+void blake2s_update(struct blake2s_state *state, const u8 *in, size_t inlen)
+{
+ const size_t fill = BLAKE2S_BLOCK_SIZE - state->buflen;
+
+ if (unlikely(!inlen))
+ return;
+ if (inlen > fill) {
+ memcpy(state->buf + state->buflen, in, fill);
+ blake2s_compress(state, state->buf, 1, BLAKE2S_BLOCK_SIZE);
+ state->buflen = 0;
+ in += fill;
+ inlen -= fill;
+ }
+ if (inlen > BLAKE2S_BLOCK_SIZE) {
+ const size_t nblocks = DIV_ROUND_UP(inlen, BLAKE2S_BLOCK_SIZE);
+ blake2s_compress(state, in, nblocks - 1, BLAKE2S_BLOCK_SIZE);
+ in += BLAKE2S_BLOCK_SIZE * (nblocks - 1);
+ inlen -= BLAKE2S_BLOCK_SIZE * (nblocks - 1);
+ }
+ memcpy(state->buf + state->buflen, in, inlen);
+ state->buflen += inlen;
+}
+EXPORT_SYMBOL(blake2s_update);
+
+void blake2s_final(struct blake2s_state *state, u8 *out)
+{
+ WARN_ON(IS_ENABLED(DEBUG) && !out);
+ blake2s_set_lastblock(state);
+ memset(state->buf + state->buflen, 0,
+ BLAKE2S_BLOCK_SIZE - state->buflen); /* Padding */
+ blake2s_compress(state, state->buf, 1, state->buflen);
+ cpu_to_le32_array(state->h, ARRAY_SIZE(state->h));
+ memcpy(out, state->h, state->outlen);
+ memzero_explicit(state, sizeof(*state));
+}
+EXPORT_SYMBOL(blake2s_final);
+
+static int __init blake2s_mod_init(void)
+{
+ if (!IS_ENABLED(CONFIG_CRYPTO_MANAGER_DISABLE_TESTS) &&
+ WARN_ON(!blake2s_selftest()))
+ return -ENODEV;
+ return 0;
+}
+
+module_init(blake2s_mod_init);
+MODULE_DESCRIPTION("BLAKE2s hash function");
+MODULE_AUTHOR("Jason A. Donenfeld <Jason@zx2c4.com>");
diff --git a/lib/chacha.c b/lib/crypto/chacha.c
index c7c9826564d3..b748fd3d256e 100644
--- a/lib/chacha.c
+++ b/lib/crypto/chacha.c
@@ -5,10 +5,11 @@
* Copyright (C) 2015 Martin Willi
*/
+#include <linux/bug.h>
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/bitops.h>
-#include <linux/cryptohash.h>
+#include <linux/string.h>
#include <asm/unaligned.h>
#include <crypto/chacha.h>
@@ -63,7 +64,7 @@ static void chacha_permute(u32 *x, int nrounds)
}
/**
- * chacha_block - generate one keystream block and increment block counter
+ * chacha_block_generic - generate one keystream block and increment block counter
* @state: input state matrix (16 32-bit words)
* @stream: output keystream block (64 bytes)
* @nrounds: number of rounds (20 or 12; 20 is recommended)
@@ -72,7 +73,7 @@ static void chacha_permute(u32 *x, int nrounds)
* The caller has already converted the endianness of the input. This function
* also handles incrementing the block counter in the input matrix.
*/
-void chacha_block(u32 *state, u8 *stream, int nrounds)
+void chacha_block_generic(u32 *state, u8 *stream, int nrounds)
{
u32 x[16];
int i;
@@ -86,12 +87,12 @@ void chacha_block(u32 *state, u8 *stream, int nrounds)
state[12]++;
}
-EXPORT_SYMBOL(chacha_block);
+EXPORT_SYMBOL(chacha_block_generic);
/**
- * hchacha_block - abbreviated ChaCha core, for XChaCha
- * @in: input state matrix (16 32-bit words)
- * @out: output (8 32-bit words)
+ * hchacha_block_generic - abbreviated ChaCha core, for XChaCha
+ * @state: input state matrix (16 32-bit words)
+ * @stream: output (8 32-bit words)
* @nrounds: number of rounds (20 or 12; 20 is recommended)
*
* HChaCha is the ChaCha equivalent of HSalsa and is an intermediate step
@@ -99,15 +100,15 @@ EXPORT_SYMBOL(chacha_block);
* skips the final addition of the initial state, and outputs only certain words
* of the state. It should not be used for streaming directly.
*/
-void hchacha_block(const u32 *in, u32 *out, int nrounds)
+void hchacha_block_generic(const u32 *state, u32 *stream, int nrounds)
{
u32 x[16];
- memcpy(x, in, 64);
+ memcpy(x, state, 64);
chacha_permute(x, nrounds);
- memcpy(&out[0], &x[0], 16);
- memcpy(&out[4], &x[12], 16);
+ memcpy(&stream[0], &x[0], 16);
+ memcpy(&stream[4], &x[12], 16);
}
-EXPORT_SYMBOL(hchacha_block);
+EXPORT_SYMBOL(hchacha_block_generic);
diff --git a/lib/crypto/chacha20poly1305-selftest.c b/lib/crypto/chacha20poly1305-selftest.c
new file mode 100644
index 000000000000..fa43deda2660
--- /dev/null
+++ b/lib/crypto/chacha20poly1305-selftest.c
@@ -0,0 +1,9082 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/*
+ * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ */
+
+#include <crypto/chacha20poly1305.h>
+#include <crypto/chacha.h>
+#include <crypto/poly1305.h>
+
+#include <asm/unaligned.h>
+#include <linux/bug.h>
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+
+struct chacha20poly1305_testvec {
+ const u8 *input, *output, *assoc, *nonce, *key;
+ size_t ilen, alen, nlen;
+ bool failure;
+};
+
+/* The first of these are the ChaCha20-Poly1305 AEAD test vectors from RFC7539
+ * 2.8.2. After they are generated by reference implementations. And the final
+ * marked ones are taken from wycheproof, but we only do these for the encrypt
+ * side, because mostly we're stressing the primitives rather than the actual
+ * chapoly construction.
+ */
+
+static const u8 enc_input001[] __initconst = {
+ 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x65, 0x74,
+ 0x2d, 0x44, 0x72, 0x61, 0x66, 0x74, 0x73, 0x20,
+ 0x61, 0x72, 0x65, 0x20, 0x64, 0x72, 0x61, 0x66,
+ 0x74, 0x20, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65,
+ 0x6e, 0x74, 0x73, 0x20, 0x76, 0x61, 0x6c, 0x69,
+ 0x64, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x61, 0x20,
+ 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x20,
+ 0x6f, 0x66, 0x20, 0x73, 0x69, 0x78, 0x20, 0x6d,
+ 0x6f, 0x6e, 0x74, 0x68, 0x73, 0x20, 0x61, 0x6e,
+ 0x64, 0x20, 0x6d, 0x61, 0x79, 0x20, 0x62, 0x65,
+ 0x20, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64,
+ 0x2c, 0x20, 0x72, 0x65, 0x70, 0x6c, 0x61, 0x63,
+ 0x65, 0x64, 0x2c, 0x20, 0x6f, 0x72, 0x20, 0x6f,
+ 0x62, 0x73, 0x6f, 0x6c, 0x65, 0x74, 0x65, 0x64,
+ 0x20, 0x62, 0x79, 0x20, 0x6f, 0x74, 0x68, 0x65,
+ 0x72, 0x20, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65,
+ 0x6e, 0x74, 0x73, 0x20, 0x61, 0x74, 0x20, 0x61,
+ 0x6e, 0x79, 0x20, 0x74, 0x69, 0x6d, 0x65, 0x2e,
+ 0x20, 0x49, 0x74, 0x20, 0x69, 0x73, 0x20, 0x69,
+ 0x6e, 0x61, 0x70, 0x70, 0x72, 0x6f, 0x70, 0x72,
+ 0x69, 0x61, 0x74, 0x65, 0x20, 0x74, 0x6f, 0x20,
+ 0x75, 0x73, 0x65, 0x20, 0x49, 0x6e, 0x74, 0x65,
+ 0x72, 0x6e, 0x65, 0x74, 0x2d, 0x44, 0x72, 0x61,
+ 0x66, 0x74, 0x73, 0x20, 0x61, 0x73, 0x20, 0x72,
+ 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65,
+ 0x20, 0x6d, 0x61, 0x74, 0x65, 0x72, 0x69, 0x61,
+ 0x6c, 0x20, 0x6f, 0x72, 0x20, 0x74, 0x6f, 0x20,
+ 0x63, 0x69, 0x74, 0x65, 0x20, 0x74, 0x68, 0x65,
+ 0x6d, 0x20, 0x6f, 0x74, 0x68, 0x65, 0x72, 0x20,
+ 0x74, 0x68, 0x61, 0x6e, 0x20, 0x61, 0x73, 0x20,
+ 0x2f, 0xe2, 0x80, 0x9c, 0x77, 0x6f, 0x72, 0x6b,
+ 0x20, 0x69, 0x6e, 0x20, 0x70, 0x72, 0x6f, 0x67,
+ 0x72, 0x65, 0x73, 0x73, 0x2e, 0x2f, 0xe2, 0x80,
+ 0x9d
+};
+static const u8 enc_output001[] __initconst = {
+ 0x64, 0xa0, 0x86, 0x15, 0x75, 0x86, 0x1a, 0xf4,
+ 0x60, 0xf0, 0x62, 0xc7, 0x9b, 0xe6, 0x43, 0xbd,
+ 0x5e, 0x80, 0x5c, 0xfd, 0x34, 0x5c, 0xf3, 0x89,
+ 0xf1, 0x08, 0x67, 0x0a, 0xc7, 0x6c, 0x8c, 0xb2,
+ 0x4c, 0x6c, 0xfc, 0x18, 0x75, 0x5d, 0x43, 0xee,
+ 0xa0, 0x9e, 0xe9, 0x4e, 0x38, 0x2d, 0x26, 0xb0,
+ 0xbd, 0xb7, 0xb7, 0x3c, 0x32, 0x1b, 0x01, 0x00,
+ 0xd4, 0xf0, 0x3b, 0x7f, 0x35, 0x58, 0x94, 0xcf,
+ 0x33, 0x2f, 0x83, 0x0e, 0x71, 0x0b, 0x97, 0xce,
+ 0x98, 0xc8, 0xa8, 0x4a, 0xbd, 0x0b, 0x94, 0x81,
+ 0x14, 0xad, 0x17, 0x6e, 0x00, 0x8d, 0x33, 0xbd,
+ 0x60, 0xf9, 0x82, 0xb1, 0xff, 0x37, 0xc8, 0x55,
+ 0x97, 0x97, 0xa0, 0x6e, 0xf4, 0xf0, 0xef, 0x61,
+ 0xc1, 0x86, 0x32, 0x4e, 0x2b, 0x35, 0x06, 0x38,
+ 0x36, 0x06, 0x90, 0x7b, 0x6a, 0x7c, 0x02, 0xb0,
+ 0xf9, 0xf6, 0x15, 0x7b, 0x53, 0xc8, 0x67, 0xe4,
+ 0xb9, 0x16, 0x6c, 0x76, 0x7b, 0x80, 0x4d, 0x46,
+ 0xa5, 0x9b, 0x52, 0x16, 0xcd, 0xe7, 0xa4, 0xe9,
+ 0x90, 0x40, 0xc5, 0xa4, 0x04, 0x33, 0x22, 0x5e,
+ 0xe2, 0x82, 0xa1, 0xb0, 0xa0, 0x6c, 0x52, 0x3e,
+ 0xaf, 0x45, 0x34, 0xd7, 0xf8, 0x3f, 0xa1, 0x15,
+ 0x5b, 0x00, 0x47, 0x71, 0x8c, 0xbc, 0x54, 0x6a,
+ 0x0d, 0x07, 0x2b, 0x04, 0xb3, 0x56, 0x4e, 0xea,
+ 0x1b, 0x42, 0x22, 0x73, 0xf5, 0x48, 0x27, 0x1a,
+ 0x0b, 0xb2, 0x31, 0x60, 0x53, 0xfa, 0x76, 0x99,
+ 0x19, 0x55, 0xeb, 0xd6, 0x31, 0x59, 0x43, 0x4e,
+ 0xce, 0xbb, 0x4e, 0x46, 0x6d, 0xae, 0x5a, 0x10,
+ 0x73, 0xa6, 0x72, 0x76, 0x27, 0x09, 0x7a, 0x10,
+ 0x49, 0xe6, 0x17, 0xd9, 0x1d, 0x36, 0x10, 0x94,
+ 0xfa, 0x68, 0xf0, 0xff, 0x77, 0x98, 0x71, 0x30,
+ 0x30, 0x5b, 0xea, 0xba, 0x2e, 0xda, 0x04, 0xdf,
+ 0x99, 0x7b, 0x71, 0x4d, 0x6c, 0x6f, 0x2c, 0x29,
+ 0xa6, 0xad, 0x5c, 0xb4, 0x02, 0x2b, 0x02, 0x70,
+ 0x9b, 0xee, 0xad, 0x9d, 0x67, 0x89, 0x0c, 0xbb,
+ 0x22, 0x39, 0x23, 0x36, 0xfe, 0xa1, 0x85, 0x1f,
+ 0x38
+};
+static const u8 enc_assoc001[] __initconst = {
+ 0xf3, 0x33, 0x88, 0x86, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x4e, 0x91
+};
+static const u8 enc_nonce001[] __initconst = {
+ 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08
+};
+static const u8 enc_key001[] __initconst = {
+ 0x1c, 0x92, 0x40, 0xa5, 0xeb, 0x55, 0xd3, 0x8a,
+ 0xf3, 0x33, 0x88, 0x86, 0x04, 0xf6, 0xb5, 0xf0,
+ 0x47, 0x39, 0x17, 0xc1, 0x40, 0x2b, 0x80, 0x09,
+ 0x9d, 0xca, 0x5c, 0xbc, 0x20, 0x70, 0x75, 0xc0
+};
+
+static const u8 enc_input002[] __initconst = { };
+static const u8 enc_output002[] __initconst = {
+ 0xea, 0xe0, 0x1e, 0x9e, 0x2c, 0x91, 0xaa, 0xe1,
+ 0xdb, 0x5d, 0x99, 0x3f, 0x8a, 0xf7, 0x69, 0x92
+};
+static const u8 enc_assoc002[] __initconst = { };
+static const u8 enc_nonce002[] __initconst = {
+ 0xca, 0xbf, 0x33, 0x71, 0x32, 0x45, 0x77, 0x8e
+};
+static const u8 enc_key002[] __initconst = {
+ 0x4c, 0xf5, 0x96, 0x83, 0x38, 0xe6, 0xae, 0x7f,
+ 0x2d, 0x29, 0x25, 0x76, 0xd5, 0x75, 0x27, 0x86,
+ 0x91, 0x9a, 0x27, 0x7a, 0xfb, 0x46, 0xc5, 0xef,
+ 0x94, 0x81, 0x79, 0x57, 0x14, 0x59, 0x40, 0x68
+};
+
+static const u8 enc_input003[] __initconst = { };
+static const u8 enc_output003[] __initconst = {
+ 0xdd, 0x6b, 0x3b, 0x82, 0xce, 0x5a, 0xbd, 0xd6,
+ 0xa9, 0x35, 0x83, 0xd8, 0x8c, 0x3d, 0x85, 0x77
+};
+static const u8 enc_assoc003[] __initconst = {
+ 0x33, 0x10, 0x41, 0x12, 0x1f, 0xf3, 0xd2, 0x6b
+};
+static const u8 enc_nonce003[] __initconst = {
+ 0x3d, 0x86, 0xb5, 0x6b, 0xc8, 0xa3, 0x1f, 0x1d
+};
+static const u8 enc_key003[] __initconst = {
+ 0x2d, 0xb0, 0x5d, 0x40, 0xc8, 0xed, 0x44, 0x88,
+ 0x34, 0xd1, 0x13, 0xaf, 0x57, 0xa1, 0xeb, 0x3a,
+ 0x2a, 0x80, 0x51, 0x36, 0xec, 0x5b, 0xbc, 0x08,
+ 0x93, 0x84, 0x21, 0xb5, 0x13, 0x88, 0x3c, 0x0d
+};
+
+static const u8 enc_input004[] __initconst = {
+ 0xa4
+};
+static const u8 enc_output004[] __initconst = {
+ 0xb7, 0x1b, 0xb0, 0x73, 0x59, 0xb0, 0x84, 0xb2,
+ 0x6d, 0x8e, 0xab, 0x94, 0x31, 0xa1, 0xae, 0xac,
+ 0x89
+};
+static const u8 enc_assoc004[] __initconst = {
+ 0x6a, 0xe2, 0xad, 0x3f, 0x88, 0x39, 0x5a, 0x40
+};
+static const u8 enc_nonce004[] __initconst = {
+ 0xd2, 0x32, 0x1f, 0x29, 0x28, 0xc6, 0xc4, 0xc4
+};
+static const u8 enc_key004[] __initconst = {
+ 0x4b, 0x28, 0x4b, 0xa3, 0x7b, 0xbe, 0xe9, 0xf8,
+ 0x31, 0x80, 0x82, 0xd7, 0xd8, 0xe8, 0xb5, 0xa1,
+ 0xe2, 0x18, 0x18, 0x8a, 0x9c, 0xfa, 0xa3, 0x3d,
+ 0x25, 0x71, 0x3e, 0x40, 0xbc, 0x54, 0x7a, 0x3e
+};
+
+static const u8 enc_input005[] __initconst = {
+ 0x2d
+};
+static const u8 enc_output005[] __initconst = {
+ 0xbf, 0xe1, 0x5b, 0x0b, 0xdb, 0x6b, 0xf5, 0x5e,
+ 0x6c, 0x5d, 0x84, 0x44, 0x39, 0x81, 0xc1, 0x9c,
+ 0xac
+};
+static const u8 enc_assoc005[] __initconst = { };
+static const u8 enc_nonce005[] __initconst = {
+ 0x20, 0x1c, 0xaa, 0x5f, 0x9c, 0xbf, 0x92, 0x30
+};
+static const u8 enc_key005[] __initconst = {
+ 0x66, 0xca, 0x9c, 0x23, 0x2a, 0x4b, 0x4b, 0x31,
+ 0x0e, 0x92, 0x89, 0x8b, 0xf4, 0x93, 0xc7, 0x87,
+ 0x98, 0xa3, 0xd8, 0x39, 0xf8, 0xf4, 0xa7, 0x01,
+ 0xc0, 0x2e, 0x0a, 0xa6, 0x7e, 0x5a, 0x78, 0x87
+};
+
+static const u8 enc_input006[] __initconst = {
+ 0x33, 0x2f, 0x94, 0xc1, 0xa4, 0xef, 0xcc, 0x2a,
+ 0x5b, 0xa6, 0xe5, 0x8f, 0x1d, 0x40, 0xf0, 0x92,
+ 0x3c, 0xd9, 0x24, 0x11, 0xa9, 0x71, 0xf9, 0x37,
+ 0x14, 0x99, 0xfa, 0xbe, 0xe6, 0x80, 0xde, 0x50,
+ 0xc9, 0x96, 0xd4, 0xb0, 0xec, 0x9e, 0x17, 0xec,
+ 0xd2, 0x5e, 0x72, 0x99, 0xfc, 0x0a, 0xe1, 0xcb,
+ 0x48, 0xd2, 0x85, 0xdd, 0x2f, 0x90, 0xe0, 0x66,
+ 0x3b, 0xe6, 0x20, 0x74, 0xbe, 0x23, 0x8f, 0xcb,
+ 0xb4, 0xe4, 0xda, 0x48, 0x40, 0xa6, 0xd1, 0x1b,
+ 0xc7, 0x42, 0xce, 0x2f, 0x0c, 0xa6, 0x85, 0x6e,
+ 0x87, 0x37, 0x03, 0xb1, 0x7c, 0x25, 0x96, 0xa3,
+ 0x05, 0xd8, 0xb0, 0xf4, 0xed, 0xea, 0xc2, 0xf0,
+ 0x31, 0x98, 0x6c, 0xd1, 0x14, 0x25, 0xc0, 0xcb,
+ 0x01, 0x74, 0xd0, 0x82, 0xf4, 0x36, 0xf5, 0x41,
+ 0xd5, 0xdc, 0xca, 0xc5, 0xbb, 0x98, 0xfe, 0xfc,
+ 0x69, 0x21, 0x70, 0xd8, 0xa4, 0x4b, 0xc8, 0xde,
+ 0x8f
+};
+static const u8 enc_output006[] __initconst = {
+ 0x8b, 0x06, 0xd3, 0x31, 0xb0, 0x93, 0x45, 0xb1,
+ 0x75, 0x6e, 0x26, 0xf9, 0x67, 0xbc, 0x90, 0x15,
+ 0x81, 0x2c, 0xb5, 0xf0, 0xc6, 0x2b, 0xc7, 0x8c,
+ 0x56, 0xd1, 0xbf, 0x69, 0x6c, 0x07, 0xa0, 0xda,
+ 0x65, 0x27, 0xc9, 0x90, 0x3d, 0xef, 0x4b, 0x11,
+ 0x0f, 0x19, 0x07, 0xfd, 0x29, 0x92, 0xd9, 0xc8,
+ 0xf7, 0x99, 0x2e, 0x4a, 0xd0, 0xb8, 0x2c, 0xdc,
+ 0x93, 0xf5, 0x9e, 0x33, 0x78, 0xd1, 0x37, 0xc3,
+ 0x66, 0xd7, 0x5e, 0xbc, 0x44, 0xbf, 0x53, 0xa5,
+ 0xbc, 0xc4, 0xcb, 0x7b, 0x3a, 0x8e, 0x7f, 0x02,
+ 0xbd, 0xbb, 0xe7, 0xca, 0xa6, 0x6c, 0x6b, 0x93,
+ 0x21, 0x93, 0x10, 0x61, 0xe7, 0x69, 0xd0, 0x78,
+ 0xf3, 0x07, 0x5a, 0x1a, 0x8f, 0x73, 0xaa, 0xb1,
+ 0x4e, 0xd3, 0xda, 0x4f, 0xf3, 0x32, 0xe1, 0x66,
+ 0x3e, 0x6c, 0xc6, 0x13, 0xba, 0x06, 0x5b, 0xfc,
+ 0x6a, 0xe5, 0x6f, 0x60, 0xfb, 0x07, 0x40, 0xb0,
+ 0x8c, 0x9d, 0x84, 0x43, 0x6b, 0xc1, 0xf7, 0x8d,
+ 0x8d, 0x31, 0xf7, 0x7a, 0x39, 0x4d, 0x8f, 0x9a,
+ 0xeb
+};
+static const u8 enc_assoc006[] __initconst = {
+ 0x70, 0xd3, 0x33, 0xf3, 0x8b, 0x18, 0x0b
+};
+static const u8 enc_nonce006[] __initconst = {
+ 0xdf, 0x51, 0x84, 0x82, 0x42, 0x0c, 0x75, 0x9c
+};
+static const u8 enc_key006[] __initconst = {
+ 0x68, 0x7b, 0x8d, 0x8e, 0xe3, 0xc4, 0xdd, 0xae,
+ 0xdf, 0x72, 0x7f, 0x53, 0x72, 0x25, 0x1e, 0x78,
+ 0x91, 0xcb, 0x69, 0x76, 0x1f, 0x49, 0x93, 0xf9,
+ 0x6f, 0x21, 0xcc, 0x39, 0x9c, 0xad, 0xb1, 0x01
+};
+
+static const u8 enc_input007[] __initconst = {
+ 0x9b, 0x18, 0xdb, 0xdd, 0x9a, 0x0f, 0x3e, 0xa5,
+ 0x15, 0x17, 0xde, 0xdf, 0x08, 0x9d, 0x65, 0x0a,
+ 0x67, 0x30, 0x12, 0xe2, 0x34, 0x77, 0x4b, 0xc1,
+ 0xd9, 0xc6, 0x1f, 0xab, 0xc6, 0x18, 0x50, 0x17,
+ 0xa7, 0x9d, 0x3c, 0xa6, 0xc5, 0x35, 0x8c, 0x1c,
+ 0xc0, 0xa1, 0x7c, 0x9f, 0x03, 0x89, 0xca, 0xe1,
+ 0xe6, 0xe9, 0xd4, 0xd3, 0x88, 0xdb, 0xb4, 0x51,
+ 0x9d, 0xec, 0xb4, 0xfc, 0x52, 0xee, 0x6d, 0xf1,
+ 0x75, 0x42, 0xc6, 0xfd, 0xbd, 0x7a, 0x8e, 0x86,
+ 0xfc, 0x44, 0xb3, 0x4f, 0xf3, 0xea, 0x67, 0x5a,
+ 0x41, 0x13, 0xba, 0xb0, 0xdc, 0xe1, 0xd3, 0x2a,
+ 0x7c, 0x22, 0xb3, 0xca, 0xac, 0x6a, 0x37, 0x98,
+ 0x3e, 0x1d, 0x40, 0x97, 0xf7, 0x9b, 0x1d, 0x36,
+ 0x6b, 0xb3, 0x28, 0xbd, 0x60, 0x82, 0x47, 0x34,
+ 0xaa, 0x2f, 0x7d, 0xe9, 0xa8, 0x70, 0x81, 0x57,
+ 0xd4, 0xb9, 0x77, 0x0a, 0x9d, 0x29, 0xa7, 0x84,
+ 0x52, 0x4f, 0xc2, 0x4a, 0x40, 0x3b, 0x3c, 0xd4,
+ 0xc9, 0x2a, 0xdb, 0x4a, 0x53, 0xc4, 0xbe, 0x80,
+ 0xe9, 0x51, 0x7f, 0x8f, 0xc7, 0xa2, 0xce, 0x82,
+ 0x5c, 0x91, 0x1e, 0x74, 0xd9, 0xd0, 0xbd, 0xd5,
+ 0xf3, 0xfd, 0xda, 0x4d, 0x25, 0xb4, 0xbb, 0x2d,
+ 0xac, 0x2f, 0x3d, 0x71, 0x85, 0x7b, 0xcf, 0x3c,
+ 0x7b, 0x3e, 0x0e, 0x22, 0x78, 0x0c, 0x29, 0xbf,
+ 0xe4, 0xf4, 0x57, 0xb3, 0xcb, 0x49, 0xa0, 0xfc,
+ 0x1e, 0x05, 0x4e, 0x16, 0xbc, 0xd5, 0xa8, 0xa3,
+ 0xee, 0x05, 0x35, 0xc6, 0x7c, 0xab, 0x60, 0x14,
+ 0x55, 0x1a, 0x8e, 0xc5, 0x88, 0x5d, 0xd5, 0x81,
+ 0xc2, 0x81, 0xa5, 0xc4, 0x60, 0xdb, 0xaf, 0x77,
+ 0x91, 0xe1, 0xce, 0xa2, 0x7e, 0x7f, 0x42, 0xe3,
+ 0xb0, 0x13, 0x1c, 0x1f, 0x25, 0x60, 0x21, 0xe2,
+ 0x40, 0x5f, 0x99, 0xb7, 0x73, 0xec, 0x9b, 0x2b,
+ 0xf0, 0x65, 0x11, 0xc8, 0xd0, 0x0a, 0x9f, 0xd3
+};
+static const u8 enc_output007[] __initconst = {
+ 0x85, 0x04, 0xc2, 0xed, 0x8d, 0xfd, 0x97, 0x5c,
+ 0xd2, 0xb7, 0xe2, 0xc1, 0x6b, 0xa3, 0xba, 0xf8,
+ 0xc9, 0x50, 0xc3, 0xc6, 0xa5, 0xe3, 0xa4, 0x7c,
+ 0xc3, 0x23, 0x49, 0x5e, 0xa9, 0xb9, 0x32, 0xeb,
+ 0x8a, 0x7c, 0xca, 0xe5, 0xec, 0xfb, 0x7c, 0xc0,
+ 0xcb, 0x7d, 0xdc, 0x2c, 0x9d, 0x92, 0x55, 0x21,
+ 0x0a, 0xc8, 0x43, 0x63, 0x59, 0x0a, 0x31, 0x70,
+ 0x82, 0x67, 0x41, 0x03, 0xf8, 0xdf, 0xf2, 0xac,
+ 0xa7, 0x02, 0xd4, 0xd5, 0x8a, 0x2d, 0xc8, 0x99,
+ 0x19, 0x66, 0xd0, 0xf6, 0x88, 0x2c, 0x77, 0xd9,
+ 0xd4, 0x0d, 0x6c, 0xbd, 0x98, 0xde, 0xe7, 0x7f,
+ 0xad, 0x7e, 0x8a, 0xfb, 0xe9, 0x4b, 0xe5, 0xf7,
+ 0xe5, 0x50, 0xa0, 0x90, 0x3f, 0xd6, 0x22, 0x53,
+ 0xe3, 0xfe, 0x1b, 0xcc, 0x79, 0x3b, 0xec, 0x12,
+ 0x47, 0x52, 0xa7, 0xd6, 0x04, 0xe3, 0x52, 0xe6,
+ 0x93, 0x90, 0x91, 0x32, 0x73, 0x79, 0xb8, 0xd0,
+ 0x31, 0xde, 0x1f, 0x9f, 0x2f, 0x05, 0x38, 0x54,
+ 0x2f, 0x35, 0x04, 0x39, 0xe0, 0xa7, 0xba, 0xc6,
+ 0x52, 0xf6, 0x37, 0x65, 0x4c, 0x07, 0xa9, 0x7e,
+ 0xb3, 0x21, 0x6f, 0x74, 0x8c, 0xc9, 0xde, 0xdb,
+ 0x65, 0x1b, 0x9b, 0xaa, 0x60, 0xb1, 0x03, 0x30,
+ 0x6b, 0xb2, 0x03, 0xc4, 0x1c, 0x04, 0xf8, 0x0f,
+ 0x64, 0xaf, 0x46, 0xe4, 0x65, 0x99, 0x49, 0xe2,
+ 0xea, 0xce, 0x78, 0x00, 0xd8, 0x8b, 0xd5, 0x2e,
+ 0xcf, 0xfc, 0x40, 0x49, 0xe8, 0x58, 0xdc, 0x34,
+ 0x9c, 0x8c, 0x61, 0xbf, 0x0a, 0x8e, 0xec, 0x39,
+ 0xa9, 0x30, 0x05, 0x5a, 0xd2, 0x56, 0x01, 0xc7,
+ 0xda, 0x8f, 0x4e, 0xbb, 0x43, 0xa3, 0x3a, 0xf9,
+ 0x15, 0x2a, 0xd0, 0xa0, 0x7a, 0x87, 0x34, 0x82,
+ 0xfe, 0x8a, 0xd1, 0x2d, 0x5e, 0xc7, 0xbf, 0x04,
+ 0x53, 0x5f, 0x3b, 0x36, 0xd4, 0x25, 0x5c, 0x34,
+ 0x7a, 0x8d, 0xd5, 0x05, 0xce, 0x72, 0xca, 0xef,
+ 0x7a, 0x4b, 0xbc, 0xb0, 0x10, 0x5c, 0x96, 0x42,
+ 0x3a, 0x00, 0x98, 0xcd, 0x15, 0xe8, 0xb7, 0x53
+};
+static const u8 enc_assoc007[] __initconst = { };
+static const u8 enc_nonce007[] __initconst = {
+ 0xde, 0x7b, 0xef, 0xc3, 0x65, 0x1b, 0x68, 0xb0
+};
+static const u8 enc_key007[] __initconst = {
+ 0x8d, 0xb8, 0x91, 0x48, 0xf0, 0xe7, 0x0a, 0xbd,
+ 0xf9, 0x3f, 0xcd, 0xd9, 0xa0, 0x1e, 0x42, 0x4c,
+ 0xe7, 0xde, 0x25, 0x3d, 0xa3, 0xd7, 0x05, 0x80,
+ 0x8d, 0xf2, 0x82, 0xac, 0x44, 0x16, 0x51, 0x01
+};
+
+static const u8 enc_input008[] __initconst = {
+ 0xc3, 0x09, 0x94, 0x62, 0xe6, 0x46, 0x2e, 0x10,
+ 0xbe, 0x00, 0xe4, 0xfc, 0xf3, 0x40, 0xa3, 0xe2,
+ 0x0f, 0xc2, 0x8b, 0x28, 0xdc, 0xba, 0xb4, 0x3c,
+ 0xe4, 0x21, 0x58, 0x61, 0xcd, 0x8b, 0xcd, 0xfb,
+ 0xac, 0x94, 0xa1, 0x45, 0xf5, 0x1c, 0xe1, 0x12,
+ 0xe0, 0x3b, 0x67, 0x21, 0x54, 0x5e, 0x8c, 0xaa,
+ 0xcf, 0xdb, 0xb4, 0x51, 0xd4, 0x13, 0xda, 0xe6,
+ 0x83, 0x89, 0xb6, 0x92, 0xe9, 0x21, 0x76, 0xa4,
+ 0x93, 0x7d, 0x0e, 0xfd, 0x96, 0x36, 0x03, 0x91,
+ 0x43, 0x5c, 0x92, 0x49, 0x62, 0x61, 0x7b, 0xeb,
+ 0x43, 0x89, 0xb8, 0x12, 0x20, 0x43, 0xd4, 0x47,
+ 0x06, 0x84, 0xee, 0x47, 0xe9, 0x8a, 0x73, 0x15,
+ 0x0f, 0x72, 0xcf, 0xed, 0xce, 0x96, 0xb2, 0x7f,
+ 0x21, 0x45, 0x76, 0xeb, 0x26, 0x28, 0x83, 0x6a,
+ 0xad, 0xaa, 0xa6, 0x81, 0xd8, 0x55, 0xb1, 0xa3,
+ 0x85, 0xb3, 0x0c, 0xdf, 0xf1, 0x69, 0x2d, 0x97,
+ 0x05, 0x2a, 0xbc, 0x7c, 0x7b, 0x25, 0xf8, 0x80,
+ 0x9d, 0x39, 0x25, 0xf3, 0x62, 0xf0, 0x66, 0x5e,
+ 0xf4, 0xa0, 0xcf, 0xd8, 0xfd, 0x4f, 0xb1, 0x1f,
+ 0x60, 0x3a, 0x08, 0x47, 0xaf, 0xe1, 0xf6, 0x10,
+ 0x77, 0x09, 0xa7, 0x27, 0x8f, 0x9a, 0x97, 0x5a,
+ 0x26, 0xfa, 0xfe, 0x41, 0x32, 0x83, 0x10, 0xe0,
+ 0x1d, 0xbf, 0x64, 0x0d, 0xf4, 0x1c, 0x32, 0x35,
+ 0xe5, 0x1b, 0x36, 0xef, 0xd4, 0x4a, 0x93, 0x4d,
+ 0x00, 0x7c, 0xec, 0x02, 0x07, 0x8b, 0x5d, 0x7d,
+ 0x1b, 0x0e, 0xd1, 0xa6, 0xa5, 0x5d, 0x7d, 0x57,
+ 0x88, 0xa8, 0xcc, 0x81, 0xb4, 0x86, 0x4e, 0xb4,
+ 0x40, 0xe9, 0x1d, 0xc3, 0xb1, 0x24, 0x3e, 0x7f,
+ 0xcc, 0x8a, 0x24, 0x9b, 0xdf, 0x6d, 0xf0, 0x39,
+ 0x69, 0x3e, 0x4c, 0xc0, 0x96, 0xe4, 0x13, 0xda,
+ 0x90, 0xda, 0xf4, 0x95, 0x66, 0x8b, 0x17, 0x17,
+ 0xfe, 0x39, 0x43, 0x25, 0xaa, 0xda, 0xa0, 0x43,
+ 0x3c, 0xb1, 0x41, 0x02, 0xa3, 0xf0, 0xa7, 0x19,
+ 0x59, 0xbc, 0x1d, 0x7d, 0x6c, 0x6d, 0x91, 0x09,
+ 0x5c, 0xb7, 0x5b, 0x01, 0xd1, 0x6f, 0x17, 0x21,
+ 0x97, 0xbf, 0x89, 0x71, 0xa5, 0xb0, 0x6e, 0x07,
+ 0x45, 0xfd, 0x9d, 0xea, 0x07, 0xf6, 0x7a, 0x9f,
+ 0x10, 0x18, 0x22, 0x30, 0x73, 0xac, 0xd4, 0x6b,
+ 0x72, 0x44, 0xed, 0xd9, 0x19, 0x9b, 0x2d, 0x4a,
+ 0x41, 0xdd, 0xd1, 0x85, 0x5e, 0x37, 0x19, 0xed,
+ 0xd2, 0x15, 0x8f, 0x5e, 0x91, 0xdb, 0x33, 0xf2,
+ 0xe4, 0xdb, 0xff, 0x98, 0xfb, 0xa3, 0xb5, 0xca,
+ 0x21, 0x69, 0x08, 0xe7, 0x8a, 0xdf, 0x90, 0xff,
+ 0x3e, 0xe9, 0x20, 0x86, 0x3c, 0xe9, 0xfc, 0x0b,
+ 0xfe, 0x5c, 0x61, 0xaa, 0x13, 0x92, 0x7f, 0x7b,
+ 0xec, 0xe0, 0x6d, 0xa8, 0x23, 0x22, 0xf6, 0x6b,
+ 0x77, 0xc4, 0xfe, 0x40, 0x07, 0x3b, 0xb6, 0xf6,
+ 0x8e, 0x5f, 0xd4, 0xb9, 0xb7, 0x0f, 0x21, 0x04,
+ 0xef, 0x83, 0x63, 0x91, 0x69, 0x40, 0xa3, 0x48,
+ 0x5c, 0xd2, 0x60, 0xf9, 0x4f, 0x6c, 0x47, 0x8b,
+ 0x3b, 0xb1, 0x9f, 0x8e, 0xee, 0x16, 0x8a, 0x13,
+ 0xfc, 0x46, 0x17, 0xc3, 0xc3, 0x32, 0x56, 0xf8,
+ 0x3c, 0x85, 0x3a, 0xb6, 0x3e, 0xaa, 0x89, 0x4f,
+ 0xb3, 0xdf, 0x38, 0xfd, 0xf1, 0xe4, 0x3a, 0xc0,
+ 0xe6, 0x58, 0xb5, 0x8f, 0xc5, 0x29, 0xa2, 0x92,
+ 0x4a, 0xb6, 0xa0, 0x34, 0x7f, 0xab, 0xb5, 0x8a,
+ 0x90, 0xa1, 0xdb, 0x4d, 0xca, 0xb6, 0x2c, 0x41,
+ 0x3c, 0xf7, 0x2b, 0x21, 0xc3, 0xfd, 0xf4, 0x17,
+ 0x5c, 0xb5, 0x33, 0x17, 0x68, 0x2b, 0x08, 0x30,
+ 0xf3, 0xf7, 0x30, 0x3c, 0x96, 0xe6, 0x6a, 0x20,
+ 0x97, 0xe7, 0x4d, 0x10, 0x5f, 0x47, 0x5f, 0x49,
+ 0x96, 0x09, 0xf0, 0x27, 0x91, 0xc8, 0xf8, 0x5a,
+ 0x2e, 0x79, 0xb5, 0xe2, 0xb8, 0xe8, 0xb9, 0x7b,
+ 0xd5, 0x10, 0xcb, 0xff, 0x5d, 0x14, 0x73, 0xf3
+};
+static const u8 enc_output008[] __initconst = {
+ 0x14, 0xf6, 0x41, 0x37, 0xa6, 0xd4, 0x27, 0xcd,
+ 0xdb, 0x06, 0x3e, 0x9a, 0x4e, 0xab, 0xd5, 0xb1,
+ 0x1e, 0x6b, 0xd2, 0xbc, 0x11, 0xf4, 0x28, 0x93,
+ 0x63, 0x54, 0xef, 0xbb, 0x5e, 0x1d, 0x3a, 0x1d,
+ 0x37, 0x3c, 0x0a, 0x6c, 0x1e, 0xc2, 0xd1, 0x2c,
+ 0xb5, 0xa3, 0xb5, 0x7b, 0xb8, 0x8f, 0x25, 0xa6,
+ 0x1b, 0x61, 0x1c, 0xec, 0x28, 0x58, 0x26, 0xa4,
+ 0xa8, 0x33, 0x28, 0x25, 0x5c, 0x45, 0x05, 0xe5,
+ 0x6c, 0x99, 0xe5, 0x45, 0xc4, 0xa2, 0x03, 0x84,
+ 0x03, 0x73, 0x1e, 0x8c, 0x49, 0xac, 0x20, 0xdd,
+ 0x8d, 0xb3, 0xc4, 0xf5, 0xe7, 0x4f, 0xf1, 0xed,
+ 0xa1, 0x98, 0xde, 0xa4, 0x96, 0xdd, 0x2f, 0xab,
+ 0xab, 0x97, 0xcf, 0x3e, 0xd2, 0x9e, 0xb8, 0x13,
+ 0x07, 0x28, 0x29, 0x19, 0xaf, 0xfd, 0xf2, 0x49,
+ 0x43, 0xea, 0x49, 0x26, 0x91, 0xc1, 0x07, 0xd6,
+ 0xbb, 0x81, 0x75, 0x35, 0x0d, 0x24, 0x7f, 0xc8,
+ 0xda, 0xd4, 0xb7, 0xeb, 0xe8, 0x5c, 0x09, 0xa2,
+ 0x2f, 0xdc, 0x28, 0x7d, 0x3a, 0x03, 0xfa, 0x94,
+ 0xb5, 0x1d, 0x17, 0x99, 0x36, 0xc3, 0x1c, 0x18,
+ 0x34, 0xe3, 0x9f, 0xf5, 0x55, 0x7c, 0xb0, 0x60,
+ 0x9d, 0xff, 0xac, 0xd4, 0x61, 0xf2, 0xad, 0xf8,
+ 0xce, 0xc7, 0xbe, 0x5c, 0xd2, 0x95, 0xa8, 0x4b,
+ 0x77, 0x13, 0x19, 0x59, 0x26, 0xc9, 0xb7, 0x8f,
+ 0x6a, 0xcb, 0x2d, 0x37, 0x91, 0xea, 0x92, 0x9c,
+ 0x94, 0x5b, 0xda, 0x0b, 0xce, 0xfe, 0x30, 0x20,
+ 0xf8, 0x51, 0xad, 0xf2, 0xbe, 0xe7, 0xc7, 0xff,
+ 0xb3, 0x33, 0x91, 0x6a, 0xc9, 0x1a, 0x41, 0xc9,
+ 0x0f, 0xf3, 0x10, 0x0e, 0xfd, 0x53, 0xff, 0x6c,
+ 0x16, 0x52, 0xd9, 0xf3, 0xf7, 0x98, 0x2e, 0xc9,
+ 0x07, 0x31, 0x2c, 0x0c, 0x72, 0xd7, 0xc5, 0xc6,
+ 0x08, 0x2a, 0x7b, 0xda, 0xbd, 0x7e, 0x02, 0xea,
+ 0x1a, 0xbb, 0xf2, 0x04, 0x27, 0x61, 0x28, 0x8e,
+ 0xf5, 0x04, 0x03, 0x1f, 0x4c, 0x07, 0x55, 0x82,
+ 0xec, 0x1e, 0xd7, 0x8b, 0x2f, 0x65, 0x56, 0xd1,
+ 0xd9, 0x1e, 0x3c, 0xe9, 0x1f, 0x5e, 0x98, 0x70,
+ 0x38, 0x4a, 0x8c, 0x49, 0xc5, 0x43, 0xa0, 0xa1,
+ 0x8b, 0x74, 0x9d, 0x4c, 0x62, 0x0d, 0x10, 0x0c,
+ 0xf4, 0x6c, 0x8f, 0xe0, 0xaa, 0x9a, 0x8d, 0xb7,
+ 0xe0, 0xbe, 0x4c, 0x87, 0xf1, 0x98, 0x2f, 0xcc,
+ 0xed, 0xc0, 0x52, 0x29, 0xdc, 0x83, 0xf8, 0xfc,
+ 0x2c, 0x0e, 0xa8, 0x51, 0x4d, 0x80, 0x0d, 0xa3,
+ 0xfe, 0xd8, 0x37, 0xe7, 0x41, 0x24, 0xfc, 0xfb,
+ 0x75, 0xe3, 0x71, 0x7b, 0x57, 0x45, 0xf5, 0x97,
+ 0x73, 0x65, 0x63, 0x14, 0x74, 0xb8, 0x82, 0x9f,
+ 0xf8, 0x60, 0x2f, 0x8a, 0xf2, 0x4e, 0xf1, 0x39,
+ 0xda, 0x33, 0x91, 0xf8, 0x36, 0xe0, 0x8d, 0x3f,
+ 0x1f, 0x3b, 0x56, 0xdc, 0xa0, 0x8f, 0x3c, 0x9d,
+ 0x71, 0x52, 0xa7, 0xb8, 0xc0, 0xa5, 0xc6, 0xa2,
+ 0x73, 0xda, 0xf4, 0x4b, 0x74, 0x5b, 0x00, 0x3d,
+ 0x99, 0xd7, 0x96, 0xba, 0xe6, 0xe1, 0xa6, 0x96,
+ 0x38, 0xad, 0xb3, 0xc0, 0xd2, 0xba, 0x91, 0x6b,
+ 0xf9, 0x19, 0xdd, 0x3b, 0xbe, 0xbe, 0x9c, 0x20,
+ 0x50, 0xba, 0xa1, 0xd0, 0xce, 0x11, 0xbd, 0x95,
+ 0xd8, 0xd1, 0xdd, 0x33, 0x85, 0x74, 0xdc, 0xdb,
+ 0x66, 0x76, 0x44, 0xdc, 0x03, 0x74, 0x48, 0x35,
+ 0x98, 0xb1, 0x18, 0x47, 0x94, 0x7d, 0xff, 0x62,
+ 0xe4, 0x58, 0x78, 0xab, 0xed, 0x95, 0x36, 0xd9,
+ 0x84, 0x91, 0x82, 0x64, 0x41, 0xbb, 0x58, 0xe6,
+ 0x1c, 0x20, 0x6d, 0x15, 0x6b, 0x13, 0x96, 0xe8,
+ 0x35, 0x7f, 0xdc, 0x40, 0x2c, 0xe9, 0xbc, 0x8a,
+ 0x4f, 0x92, 0xec, 0x06, 0x2d, 0x50, 0xdf, 0x93,
+ 0x5d, 0x65, 0x5a, 0xa8, 0xfc, 0x20, 0x50, 0x14,
+ 0xa9, 0x8a, 0x7e, 0x1d, 0x08, 0x1f, 0xe2, 0x99,
+ 0xd0, 0xbe, 0xfb, 0x3a, 0x21, 0x9d, 0xad, 0x86,
+ 0x54, 0xfd, 0x0d, 0x98, 0x1c, 0x5a, 0x6f, 0x1f,
+ 0x9a, 0x40, 0xcd, 0xa2, 0xff, 0x6a, 0xf1, 0x54
+};
+static const u8 enc_assoc008[] __initconst = { };
+static const u8 enc_nonce008[] __initconst = {
+ 0x0e, 0x0d, 0x57, 0xbb, 0x7b, 0x40, 0x54, 0x02
+};
+static const u8 enc_key008[] __initconst = {
+ 0xf2, 0xaa, 0x4f, 0x99, 0xfd, 0x3e, 0xa8, 0x53,
+ 0xc1, 0x44, 0xe9, 0x81, 0x18, 0xdc, 0xf5, 0xf0,
+ 0x3e, 0x44, 0x15, 0x59, 0xe0, 0xc5, 0x44, 0x86,
+ 0xc3, 0x91, 0xa8, 0x75, 0xc0, 0x12, 0x46, 0xba
+};
+
+static const u8 enc_input009[] __initconst = {
+ 0xe6, 0xc3, 0xdb, 0x63, 0x55, 0x15, 0xe3, 0x5b,
+ 0xb7, 0x4b, 0x27, 0x8b, 0x5a, 0xdd, 0xc2, 0xe8,
+ 0x3a, 0x6b, 0xd7, 0x81, 0x96, 0x35, 0x97, 0xca,
+ 0xd7, 0x68, 0xe8, 0xef, 0xce, 0xab, 0xda, 0x09,
+ 0x6e, 0xd6, 0x8e, 0xcb, 0x55, 0xb5, 0xe1, 0xe5,
+ 0x57, 0xfd, 0xc4, 0xe3, 0xe0, 0x18, 0x4f, 0x85,
+ 0xf5, 0x3f, 0x7e, 0x4b, 0x88, 0xc9, 0x52, 0x44,
+ 0x0f, 0xea, 0xaf, 0x1f, 0x71, 0x48, 0x9f, 0x97,
+ 0x6d, 0xb9, 0x6f, 0x00, 0xa6, 0xde, 0x2b, 0x77,
+ 0x8b, 0x15, 0xad, 0x10, 0xa0, 0x2b, 0x7b, 0x41,
+ 0x90, 0x03, 0x2d, 0x69, 0xae, 0xcc, 0x77, 0x7c,
+ 0xa5, 0x9d, 0x29, 0x22, 0xc2, 0xea, 0xb4, 0x00,
+ 0x1a, 0xd2, 0x7a, 0x98, 0x8a, 0xf9, 0xf7, 0x82,
+ 0xb0, 0xab, 0xd8, 0xa6, 0x94, 0x8d, 0x58, 0x2f,
+ 0x01, 0x9e, 0x00, 0x20, 0xfc, 0x49, 0xdc, 0x0e,
+ 0x03, 0xe8, 0x45, 0x10, 0xd6, 0xa8, 0xda, 0x55,
+ 0x10, 0x9a, 0xdf, 0x67, 0x22, 0x8b, 0x43, 0xab,
+ 0x00, 0xbb, 0x02, 0xc8, 0xdd, 0x7b, 0x97, 0x17,
+ 0xd7, 0x1d, 0x9e, 0x02, 0x5e, 0x48, 0xde, 0x8e,
+ 0xcf, 0x99, 0x07, 0x95, 0x92, 0x3c, 0x5f, 0x9f,
+ 0xc5, 0x8a, 0xc0, 0x23, 0xaa, 0xd5, 0x8c, 0x82,
+ 0x6e, 0x16, 0x92, 0xb1, 0x12, 0x17, 0x07, 0xc3,
+ 0xfb, 0x36, 0xf5, 0x6c, 0x35, 0xd6, 0x06, 0x1f,
+ 0x9f, 0xa7, 0x94, 0xa2, 0x38, 0x63, 0x9c, 0xb0,
+ 0x71, 0xb3, 0xa5, 0xd2, 0xd8, 0xba, 0x9f, 0x08,
+ 0x01, 0xb3, 0xff, 0x04, 0x97, 0x73, 0x45, 0x1b,
+ 0xd5, 0xa9, 0x9c, 0x80, 0xaf, 0x04, 0x9a, 0x85,
+ 0xdb, 0x32, 0x5b, 0x5d, 0x1a, 0xc1, 0x36, 0x28,
+ 0x10, 0x79, 0xf1, 0x3c, 0xbf, 0x1a, 0x41, 0x5c,
+ 0x4e, 0xdf, 0xb2, 0x7c, 0x79, 0x3b, 0x7a, 0x62,
+ 0x3d, 0x4b, 0xc9, 0x9b, 0x2a, 0x2e, 0x7c, 0xa2,
+ 0xb1, 0x11, 0x98, 0xa7, 0x34, 0x1a, 0x00, 0xf3,
+ 0xd1, 0xbc, 0x18, 0x22, 0xba, 0x02, 0x56, 0x62,
+ 0x31, 0x10, 0x11, 0x6d, 0xe0, 0x54, 0x9d, 0x40,
+ 0x1f, 0x26, 0x80, 0x41, 0xca, 0x3f, 0x68, 0x0f,
+ 0x32, 0x1d, 0x0a, 0x8e, 0x79, 0xd8, 0xa4, 0x1b,
+ 0x29, 0x1c, 0x90, 0x8e, 0xc5, 0xe3, 0xb4, 0x91,
+ 0x37, 0x9a, 0x97, 0x86, 0x99, 0xd5, 0x09, 0xc5,
+ 0xbb, 0xa3, 0x3f, 0x21, 0x29, 0x82, 0x14, 0x5c,
+ 0xab, 0x25, 0xfb, 0xf2, 0x4f, 0x58, 0x26, 0xd4,
+ 0x83, 0xaa, 0x66, 0x89, 0x67, 0x7e, 0xc0, 0x49,
+ 0xe1, 0x11, 0x10, 0x7f, 0x7a, 0xda, 0x29, 0x04,
+ 0xff, 0xf0, 0xcb, 0x09, 0x7c, 0x9d, 0xfa, 0x03,
+ 0x6f, 0x81, 0x09, 0x31, 0x60, 0xfb, 0x08, 0xfa,
+ 0x74, 0xd3, 0x64, 0x44, 0x7c, 0x55, 0x85, 0xec,
+ 0x9c, 0x6e, 0x25, 0xb7, 0x6c, 0xc5, 0x37, 0xb6,
+ 0x83, 0x87, 0x72, 0x95, 0x8b, 0x9d, 0xe1, 0x69,
+ 0x5c, 0x31, 0x95, 0x42, 0xa6, 0x2c, 0xd1, 0x36,
+ 0x47, 0x1f, 0xec, 0x54, 0xab, 0xa2, 0x1c, 0xd8,
+ 0x00, 0xcc, 0xbc, 0x0d, 0x65, 0xe2, 0x67, 0xbf,
+ 0xbc, 0xea, 0xee, 0x9e, 0xe4, 0x36, 0x95, 0xbe,
+ 0x73, 0xd9, 0xa6, 0xd9, 0x0f, 0xa0, 0xcc, 0x82,
+ 0x76, 0x26, 0xad, 0x5b, 0x58, 0x6c, 0x4e, 0xab,
+ 0x29, 0x64, 0xd3, 0xd9, 0xa9, 0x08, 0x8c, 0x1d,
+ 0xa1, 0x4f, 0x80, 0xd8, 0x3f, 0x94, 0xfb, 0xd3,
+ 0x7b, 0xfc, 0xd1, 0x2b, 0xc3, 0x21, 0xeb, 0xe5,
+ 0x1c, 0x84, 0x23, 0x7f, 0x4b, 0xfa, 0xdb, 0x34,
+ 0x18, 0xa2, 0xc2, 0xe5, 0x13, 0xfe, 0x6c, 0x49,
+ 0x81, 0xd2, 0x73, 0xe7, 0xe2, 0xd7, 0xe4, 0x4f,
+ 0x4b, 0x08, 0x6e, 0xb1, 0x12, 0x22, 0x10, 0x9d,
+ 0xac, 0x51, 0x1e, 0x17, 0xd9, 0x8a, 0x0b, 0x42,
+ 0x88, 0x16, 0x81, 0x37, 0x7c, 0x6a, 0xf7, 0xef,
+ 0x2d, 0xe3, 0xd9, 0xf8, 0x5f, 0xe0, 0x53, 0x27,
+ 0x74, 0xb9, 0xe2, 0xd6, 0x1c, 0x80, 0x2c, 0x52,
+ 0x65
+};
+static const u8 enc_output009[] __initconst = {
+ 0xfd, 0x81, 0x8d, 0xd0, 0x3d, 0xb4, 0xd5, 0xdf,
+ 0xd3, 0x42, 0x47, 0x5a, 0x6d, 0x19, 0x27, 0x66,
+ 0x4b, 0x2e, 0x0c, 0x27, 0x9c, 0x96, 0x4c, 0x72,
+ 0x02, 0xa3, 0x65, 0xc3, 0xb3, 0x6f, 0x2e, 0xbd,
+ 0x63, 0x8a, 0x4a, 0x5d, 0x29, 0xa2, 0xd0, 0x28,
+ 0x48, 0xc5, 0x3d, 0x98, 0xa3, 0xbc, 0xe0, 0xbe,
+ 0x3b, 0x3f, 0xe6, 0x8a, 0xa4, 0x7f, 0x53, 0x06,
+ 0xfa, 0x7f, 0x27, 0x76, 0x72, 0x31, 0xa1, 0xf5,
+ 0xd6, 0x0c, 0x52, 0x47, 0xba, 0xcd, 0x4f, 0xd7,
+ 0xeb, 0x05, 0x48, 0x0d, 0x7c, 0x35, 0x4a, 0x09,
+ 0xc9, 0x76, 0x71, 0x02, 0xa3, 0xfb, 0xb7, 0x1a,
+ 0x65, 0xb7, 0xed, 0x98, 0xc6, 0x30, 0x8a, 0x00,
+ 0xae, 0xa1, 0x31, 0xe5, 0xb5, 0x9e, 0x6d, 0x62,
+ 0xda, 0xda, 0x07, 0x0f, 0x38, 0x38, 0xd3, 0xcb,
+ 0xc1, 0xb0, 0xad, 0xec, 0x72, 0xec, 0xb1, 0xa2,
+ 0x7b, 0x59, 0xf3, 0x3d, 0x2b, 0xef, 0xcd, 0x28,
+ 0x5b, 0x83, 0xcc, 0x18, 0x91, 0x88, 0xb0, 0x2e,
+ 0xf9, 0x29, 0x31, 0x18, 0xf9, 0x4e, 0xe9, 0x0a,
+ 0x91, 0x92, 0x9f, 0xae, 0x2d, 0xad, 0xf4, 0xe6,
+ 0x1a, 0xe2, 0xa4, 0xee, 0x47, 0x15, 0xbf, 0x83,
+ 0x6e, 0xd7, 0x72, 0x12, 0x3b, 0x2d, 0x24, 0xe9,
+ 0xb2, 0x55, 0xcb, 0x3c, 0x10, 0xf0, 0x24, 0x8a,
+ 0x4a, 0x02, 0xea, 0x90, 0x25, 0xf0, 0xb4, 0x79,
+ 0x3a, 0xef, 0x6e, 0xf5, 0x52, 0xdf, 0xb0, 0x0a,
+ 0xcd, 0x24, 0x1c, 0xd3, 0x2e, 0x22, 0x74, 0xea,
+ 0x21, 0x6f, 0xe9, 0xbd, 0xc8, 0x3e, 0x36, 0x5b,
+ 0x19, 0xf1, 0xca, 0x99, 0x0a, 0xb4, 0xa7, 0x52,
+ 0x1a, 0x4e, 0xf2, 0xad, 0x8d, 0x56, 0x85, 0xbb,
+ 0x64, 0x89, 0xba, 0x26, 0xf9, 0xc7, 0xe1, 0x89,
+ 0x19, 0x22, 0x77, 0xc3, 0xa8, 0xfc, 0xff, 0xad,
+ 0xfe, 0xb9, 0x48, 0xae, 0x12, 0x30, 0x9f, 0x19,
+ 0xfb, 0x1b, 0xef, 0x14, 0x87, 0x8a, 0x78, 0x71,
+ 0xf3, 0xf4, 0xb7, 0x00, 0x9c, 0x1d, 0xb5, 0x3d,
+ 0x49, 0x00, 0x0c, 0x06, 0xd4, 0x50, 0xf9, 0x54,
+ 0x45, 0xb2, 0x5b, 0x43, 0xdb, 0x6d, 0xcf, 0x1a,
+ 0xe9, 0x7a, 0x7a, 0xcf, 0xfc, 0x8a, 0x4e, 0x4d,
+ 0x0b, 0x07, 0x63, 0x28, 0xd8, 0xe7, 0x08, 0x95,
+ 0xdf, 0xa6, 0x72, 0x93, 0x2e, 0xbb, 0xa0, 0x42,
+ 0x89, 0x16, 0xf1, 0xd9, 0x0c, 0xf9, 0xa1, 0x16,
+ 0xfd, 0xd9, 0x03, 0xb4, 0x3b, 0x8a, 0xf5, 0xf6,
+ 0xe7, 0x6b, 0x2e, 0x8e, 0x4c, 0x3d, 0xe2, 0xaf,
+ 0x08, 0x45, 0x03, 0xff, 0x09, 0xb6, 0xeb, 0x2d,
+ 0xc6, 0x1b, 0x88, 0x94, 0xac, 0x3e, 0xf1, 0x9f,
+ 0x0e, 0x0e, 0x2b, 0xd5, 0x00, 0x4d, 0x3f, 0x3b,
+ 0x53, 0xae, 0xaf, 0x1c, 0x33, 0x5f, 0x55, 0x6e,
+ 0x8d, 0xaf, 0x05, 0x7a, 0x10, 0x34, 0xc9, 0xf4,
+ 0x66, 0xcb, 0x62, 0x12, 0xa6, 0xee, 0xe8, 0x1c,
+ 0x5d, 0x12, 0x86, 0xdb, 0x6f, 0x1c, 0x33, 0xc4,
+ 0x1c, 0xda, 0x82, 0x2d, 0x3b, 0x59, 0xfe, 0xb1,
+ 0xa4, 0x59, 0x41, 0x86, 0xd0, 0xef, 0xae, 0xfb,
+ 0xda, 0x6d, 0x11, 0xb8, 0xca, 0xe9, 0x6e, 0xff,
+ 0xf7, 0xa9, 0xd9, 0x70, 0x30, 0xfc, 0x53, 0xe2,
+ 0xd7, 0xa2, 0x4e, 0xc7, 0x91, 0xd9, 0x07, 0x06,
+ 0xaa, 0xdd, 0xb0, 0x59, 0x28, 0x1d, 0x00, 0x66,
+ 0xc5, 0x54, 0xc2, 0xfc, 0x06, 0xda, 0x05, 0x90,
+ 0x52, 0x1d, 0x37, 0x66, 0xee, 0xf0, 0xb2, 0x55,
+ 0x8a, 0x5d, 0xd2, 0x38, 0x86, 0x94, 0x9b, 0xfc,
+ 0x10, 0x4c, 0xa1, 0xb9, 0x64, 0x3e, 0x44, 0xb8,
+ 0x5f, 0xb0, 0x0c, 0xec, 0xe0, 0xc9, 0xe5, 0x62,
+ 0x75, 0x3f, 0x09, 0xd5, 0xf5, 0xd9, 0x26, 0xba,
+ 0x9e, 0xd2, 0xf4, 0xb9, 0x48, 0x0a, 0xbc, 0xa2,
+ 0xd6, 0x7c, 0x36, 0x11, 0x7d, 0x26, 0x81, 0x89,
+ 0xcf, 0xa4, 0xad, 0x73, 0x0e, 0xee, 0xcc, 0x06,
+ 0xa9, 0xdb, 0xb1, 0xfd, 0xfb, 0x09, 0x7f, 0x90,
+ 0x42, 0x37, 0x2f, 0xe1, 0x9c, 0x0f, 0x6f, 0xcf,
+ 0x43, 0xb5, 0xd9, 0x90, 0xe1, 0x85, 0xf5, 0xa8,
+ 0xae
+};
+static const u8 enc_assoc009[] __initconst = {
+ 0x5a, 0x27, 0xff, 0xeb, 0xdf, 0x84, 0xb2, 0x9e,
+ 0xef
+};
+static const u8 enc_nonce009[] __initconst = {
+ 0xef, 0x2d, 0x63, 0xee, 0x6b, 0x80, 0x8b, 0x78
+};
+static const u8 enc_key009[] __initconst = {
+ 0xea, 0xbc, 0x56, 0x99, 0xe3, 0x50, 0xff, 0xc5,
+ 0xcc, 0x1a, 0xd7, 0xc1, 0x57, 0x72, 0xea, 0x86,
+ 0x5b, 0x89, 0x88, 0x61, 0x3d, 0x2f, 0x9b, 0xb2,
+ 0xe7, 0x9c, 0xec, 0x74, 0x6e, 0x3e, 0xf4, 0x3b
+};
+
+static const u8 enc_input010[] __initconst = {
+ 0x42, 0x93, 0xe4, 0xeb, 0x97, 0xb0, 0x57, 0xbf,
+ 0x1a, 0x8b, 0x1f, 0xe4, 0x5f, 0x36, 0x20, 0x3c,
+ 0xef, 0x0a, 0xa9, 0x48, 0x5f, 0x5f, 0x37, 0x22,
+ 0x3a, 0xde, 0xe3, 0xae, 0xbe, 0xad, 0x07, 0xcc,
+ 0xb1, 0xf6, 0xf5, 0xf9, 0x56, 0xdd, 0xe7, 0x16,
+ 0x1e, 0x7f, 0xdf, 0x7a, 0x9e, 0x75, 0xb7, 0xc7,
+ 0xbe, 0xbe, 0x8a, 0x36, 0x04, 0xc0, 0x10, 0xf4,
+ 0x95, 0x20, 0x03, 0xec, 0xdc, 0x05, 0xa1, 0x7d,
+ 0xc4, 0xa9, 0x2c, 0x82, 0xd0, 0xbc, 0x8b, 0xc5,
+ 0xc7, 0x45, 0x50, 0xf6, 0xa2, 0x1a, 0xb5, 0x46,
+ 0x3b, 0x73, 0x02, 0xa6, 0x83, 0x4b, 0x73, 0x82,
+ 0x58, 0x5e, 0x3b, 0x65, 0x2f, 0x0e, 0xfd, 0x2b,
+ 0x59, 0x16, 0xce, 0xa1, 0x60, 0x9c, 0xe8, 0x3a,
+ 0x99, 0xed, 0x8d, 0x5a, 0xcf, 0xf6, 0x83, 0xaf,
+ 0xba, 0xd7, 0x73, 0x73, 0x40, 0x97, 0x3d, 0xca,
+ 0xef, 0x07, 0x57, 0xe6, 0xd9, 0x70, 0x0e, 0x95,
+ 0xae, 0xa6, 0x8d, 0x04, 0xcc, 0xee, 0xf7, 0x09,
+ 0x31, 0x77, 0x12, 0xa3, 0x23, 0x97, 0x62, 0xb3,
+ 0x7b, 0x32, 0xfb, 0x80, 0x14, 0x48, 0x81, 0xc3,
+ 0xe5, 0xea, 0x91, 0x39, 0x52, 0x81, 0xa2, 0x4f,
+ 0xe4, 0xb3, 0x09, 0xff, 0xde, 0x5e, 0xe9, 0x58,
+ 0x84, 0x6e, 0xf9, 0x3d, 0xdf, 0x25, 0xea, 0xad,
+ 0xae, 0xe6, 0x9a, 0xd1, 0x89, 0x55, 0xd3, 0xde,
+ 0x6c, 0x52, 0xdb, 0x70, 0xfe, 0x37, 0xce, 0x44,
+ 0x0a, 0xa8, 0x25, 0x5f, 0x92, 0xc1, 0x33, 0x4a,
+ 0x4f, 0x9b, 0x62, 0x35, 0xff, 0xce, 0xc0, 0xa9,
+ 0x60, 0xce, 0x52, 0x00, 0x97, 0x51, 0x35, 0x26,
+ 0x2e, 0xb9, 0x36, 0xa9, 0x87, 0x6e, 0x1e, 0xcc,
+ 0x91, 0x78, 0x53, 0x98, 0x86, 0x5b, 0x9c, 0x74,
+ 0x7d, 0x88, 0x33, 0xe1, 0xdf, 0x37, 0x69, 0x2b,
+ 0xbb, 0xf1, 0x4d, 0xf4, 0xd1, 0xf1, 0x39, 0x93,
+ 0x17, 0x51, 0x19, 0xe3, 0x19, 0x1e, 0x76, 0x37,
+ 0x25, 0xfb, 0x09, 0x27, 0x6a, 0xab, 0x67, 0x6f,
+ 0x14, 0x12, 0x64, 0xe7, 0xc4, 0x07, 0xdf, 0x4d,
+ 0x17, 0xbb, 0x6d, 0xe0, 0xe9, 0xb9, 0xab, 0xca,
+ 0x10, 0x68, 0xaf, 0x7e, 0xb7, 0x33, 0x54, 0x73,
+ 0x07, 0x6e, 0xf7, 0x81, 0x97, 0x9c, 0x05, 0x6f,
+ 0x84, 0x5f, 0xd2, 0x42, 0xfb, 0x38, 0xcf, 0xd1,
+ 0x2f, 0x14, 0x30, 0x88, 0x98, 0x4d, 0x5a, 0xa9,
+ 0x76, 0xd5, 0x4f, 0x3e, 0x70, 0x6c, 0x85, 0x76,
+ 0xd7, 0x01, 0xa0, 0x1a, 0xc8, 0x4e, 0xaa, 0xac,
+ 0x78, 0xfe, 0x46, 0xde, 0x6a, 0x05, 0x46, 0xa7,
+ 0x43, 0x0c, 0xb9, 0xde, 0xb9, 0x68, 0xfb, 0xce,
+ 0x42, 0x99, 0x07, 0x4d, 0x0b, 0x3b, 0x5a, 0x30,
+ 0x35, 0xa8, 0xf9, 0x3a, 0x73, 0xef, 0x0f, 0xdb,
+ 0x1e, 0x16, 0x42, 0xc4, 0xba, 0xae, 0x58, 0xaa,
+ 0xf8, 0xe5, 0x75, 0x2f, 0x1b, 0x15, 0x5c, 0xfd,
+ 0x0a, 0x97, 0xd0, 0xe4, 0x37, 0x83, 0x61, 0x5f,
+ 0x43, 0xa6, 0xc7, 0x3f, 0x38, 0x59, 0xe6, 0xeb,
+ 0xa3, 0x90, 0xc3, 0xaa, 0xaa, 0x5a, 0xd3, 0x34,
+ 0xd4, 0x17, 0xc8, 0x65, 0x3e, 0x57, 0xbc, 0x5e,
+ 0xdd, 0x9e, 0xb7, 0xf0, 0x2e, 0x5b, 0xb2, 0x1f,
+ 0x8a, 0x08, 0x0d, 0x45, 0x91, 0x0b, 0x29, 0x53,
+ 0x4f, 0x4c, 0x5a, 0x73, 0x56, 0xfe, 0xaf, 0x41,
+ 0x01, 0x39, 0x0a, 0x24, 0x3c, 0x7e, 0xbe, 0x4e,
+ 0x53, 0xf3, 0xeb, 0x06, 0x66, 0x51, 0x28, 0x1d,
+ 0xbd, 0x41, 0x0a, 0x01, 0xab, 0x16, 0x47, 0x27,
+ 0x47, 0x47, 0xf7, 0xcb, 0x46, 0x0a, 0x70, 0x9e,
+ 0x01, 0x9c, 0x09, 0xe1, 0x2a, 0x00, 0x1a, 0xd8,
+ 0xd4, 0x79, 0x9d, 0x80, 0x15, 0x8e, 0x53, 0x2a,
+ 0x65, 0x83, 0x78, 0x3e, 0x03, 0x00, 0x07, 0x12,
+ 0x1f, 0x33, 0x3e, 0x7b, 0x13, 0x37, 0xf1, 0xc3,
+ 0xef, 0xb7, 0xc1, 0x20, 0x3c, 0x3e, 0x67, 0x66,
+ 0x5d, 0x88, 0xa7, 0x7d, 0x33, 0x50, 0x77, 0xb0,
+ 0x28, 0x8e, 0xe7, 0x2c, 0x2e, 0x7a, 0xf4, 0x3c,
+ 0x8d, 0x74, 0x83, 0xaf, 0x8e, 0x87, 0x0f, 0xe4,
+ 0x50, 0xff, 0x84, 0x5c, 0x47, 0x0c, 0x6a, 0x49,
+ 0xbf, 0x42, 0x86, 0x77, 0x15, 0x48, 0xa5, 0x90,
+ 0x5d, 0x93, 0xd6, 0x2a, 0x11, 0xd5, 0xd5, 0x11,
+ 0xaa, 0xce, 0xe7, 0x6f, 0xa5, 0xb0, 0x09, 0x2c,
+ 0x8d, 0xd3, 0x92, 0xf0, 0x5a, 0x2a, 0xda, 0x5b,
+ 0x1e, 0xd5, 0x9a, 0xc4, 0xc4, 0xf3, 0x49, 0x74,
+ 0x41, 0xca, 0xe8, 0xc1, 0xf8, 0x44, 0xd6, 0x3c,
+ 0xae, 0x6c, 0x1d, 0x9a, 0x30, 0x04, 0x4d, 0x27,
+ 0x0e, 0xb1, 0x5f, 0x59, 0xa2, 0x24, 0xe8, 0xe1,
+ 0x98, 0xc5, 0x6a, 0x4c, 0xfe, 0x41, 0xd2, 0x27,
+ 0x42, 0x52, 0xe1, 0xe9, 0x7d, 0x62, 0xe4, 0x88,
+ 0x0f, 0xad, 0xb2, 0x70, 0xcb, 0x9d, 0x4c, 0x27,
+ 0x2e, 0x76, 0x1e, 0x1a, 0x63, 0x65, 0xf5, 0x3b,
+ 0xf8, 0x57, 0x69, 0xeb, 0x5b, 0x38, 0x26, 0x39,
+ 0x33, 0x25, 0x45, 0x3e, 0x91, 0xb8, 0xd8, 0xc7,
+ 0xd5, 0x42, 0xc0, 0x22, 0x31, 0x74, 0xf4, 0xbc,
+ 0x0c, 0x23, 0xf1, 0xca, 0xc1, 0x8d, 0xd7, 0xbe,
+ 0xc9, 0x62, 0xe4, 0x08, 0x1a, 0xcf, 0x36, 0xd5,
+ 0xfe, 0x55, 0x21, 0x59, 0x91, 0x87, 0x87, 0xdf,
+ 0x06, 0xdb, 0xdf, 0x96, 0x45, 0x58, 0xda, 0x05,
+ 0xcd, 0x50, 0x4d, 0xd2, 0x7d, 0x05, 0x18, 0x73,
+ 0x6a, 0x8d, 0x11, 0x85, 0xa6, 0x88, 0xe8, 0xda,
+ 0xe6, 0x30, 0x33, 0xa4, 0x89, 0x31, 0x75, 0xbe,
+ 0x69, 0x43, 0x84, 0x43, 0x50, 0x87, 0xdd, 0x71,
+ 0x36, 0x83, 0xc3, 0x78, 0x74, 0x24, 0x0a, 0xed,
+ 0x7b, 0xdb, 0xa4, 0x24, 0x0b, 0xb9, 0x7e, 0x5d,
+ 0xff, 0xde, 0xb1, 0xef, 0x61, 0x5a, 0x45, 0x33,
+ 0xf6, 0x17, 0x07, 0x08, 0x98, 0x83, 0x92, 0x0f,
+ 0x23, 0x6d, 0xe6, 0xaa, 0x17, 0x54, 0xad, 0x6a,
+ 0xc8, 0xdb, 0x26, 0xbe, 0xb8, 0xb6, 0x08, 0xfa,
+ 0x68, 0xf1, 0xd7, 0x79, 0x6f, 0x18, 0xb4, 0x9e,
+ 0x2d, 0x3f, 0x1b, 0x64, 0xaf, 0x8d, 0x06, 0x0e,
+ 0x49, 0x28, 0xe0, 0x5d, 0x45, 0x68, 0x13, 0x87,
+ 0xfa, 0xde, 0x40, 0x7b, 0xd2, 0xc3, 0x94, 0xd5,
+ 0xe1, 0xd9, 0xc2, 0xaf, 0x55, 0x89, 0xeb, 0xb4,
+ 0x12, 0x59, 0xa8, 0xd4, 0xc5, 0x29, 0x66, 0x38,
+ 0xe6, 0xac, 0x22, 0x22, 0xd9, 0x64, 0x9b, 0x34,
+ 0x0a, 0x32, 0x9f, 0xc2, 0xbf, 0x17, 0x6c, 0x3f,
+ 0x71, 0x7a, 0x38, 0x6b, 0x98, 0xfb, 0x49, 0x36,
+ 0x89, 0xc9, 0xe2, 0xd6, 0xc7, 0x5d, 0xd0, 0x69,
+ 0x5f, 0x23, 0x35, 0xc9, 0x30, 0xe2, 0xfd, 0x44,
+ 0x58, 0x39, 0xd7, 0x97, 0xfb, 0x5c, 0x00, 0xd5,
+ 0x4f, 0x7a, 0x1a, 0x95, 0x8b, 0x62, 0x4b, 0xce,
+ 0xe5, 0x91, 0x21, 0x7b, 0x30, 0x00, 0xd6, 0xdd,
+ 0x6d, 0x02, 0x86, 0x49, 0x0f, 0x3c, 0x1a, 0x27,
+ 0x3c, 0xd3, 0x0e, 0x71, 0xf2, 0xff, 0xf5, 0x2f,
+ 0x87, 0xac, 0x67, 0x59, 0x81, 0xa3, 0xf7, 0xf8,
+ 0xd6, 0x11, 0x0c, 0x84, 0xa9, 0x03, 0xee, 0x2a,
+ 0xc4, 0xf3, 0x22, 0xab, 0x7c, 0xe2, 0x25, 0xf5,
+ 0x67, 0xa3, 0xe4, 0x11, 0xe0, 0x59, 0xb3, 0xca,
+ 0x87, 0xa0, 0xae, 0xc9, 0xa6, 0x62, 0x1b, 0x6e,
+ 0x4d, 0x02, 0x6b, 0x07, 0x9d, 0xfd, 0xd0, 0x92,
+ 0x06, 0xe1, 0xb2, 0x9a, 0x4a, 0x1f, 0x1f, 0x13,
+ 0x49, 0x99, 0x97, 0x08, 0xde, 0x7f, 0x98, 0xaf,
+ 0x51, 0x98, 0xee, 0x2c, 0xcb, 0xf0, 0x0b, 0xc6,
+ 0xb6, 0xb7, 0x2d, 0x9a, 0xb1, 0xac, 0xa6, 0xe3,
+ 0x15, 0x77, 0x9d, 0x6b, 0x1a, 0xe4, 0xfc, 0x8b,
+ 0xf2, 0x17, 0x59, 0x08, 0x04, 0x58, 0x81, 0x9d,
+ 0x1b, 0x1b, 0x69, 0x55, 0xc2, 0xb4, 0x3c, 0x1f,
+ 0x50, 0xf1, 0x7f, 0x77, 0x90, 0x4c, 0x66, 0x40,
+ 0x5a, 0xc0, 0x33, 0x1f, 0xcb, 0x05, 0x6d, 0x5c,
+ 0x06, 0x87, 0x52, 0xa2, 0x8f, 0x26, 0xd5, 0x4f
+};
+static const u8 enc_output010[] __initconst = {
+ 0xe5, 0x26, 0xa4, 0x3d, 0xbd, 0x33, 0xd0, 0x4b,
+ 0x6f, 0x05, 0xa7, 0x6e, 0x12, 0x7a, 0xd2, 0x74,
+ 0xa6, 0xdd, 0xbd, 0x95, 0xeb, 0xf9, 0xa4, 0xf1,
+ 0x59, 0x93, 0x91, 0x70, 0xd9, 0xfe, 0x9a, 0xcd,
+ 0x53, 0x1f, 0x3a, 0xab, 0xa6, 0x7c, 0x9f, 0xa6,
+ 0x9e, 0xbd, 0x99, 0xd9, 0xb5, 0x97, 0x44, 0xd5,
+ 0x14, 0x48, 0x4d, 0x9d, 0xc0, 0xd0, 0x05, 0x96,
+ 0xeb, 0x4c, 0x78, 0x55, 0x09, 0x08, 0x01, 0x02,
+ 0x30, 0x90, 0x7b, 0x96, 0x7a, 0x7b, 0x5f, 0x30,
+ 0x41, 0x24, 0xce, 0x68, 0x61, 0x49, 0x86, 0x57,
+ 0x82, 0xdd, 0x53, 0x1c, 0x51, 0x28, 0x2b, 0x53,
+ 0x6e, 0x2d, 0xc2, 0x20, 0x4c, 0xdd, 0x8f, 0x65,
+ 0x10, 0x20, 0x50, 0xdd, 0x9d, 0x50, 0xe5, 0x71,
+ 0x40, 0x53, 0x69, 0xfc, 0x77, 0x48, 0x11, 0xb9,
+ 0xde, 0xa4, 0x8d, 0x58, 0xe4, 0xa6, 0x1a, 0x18,
+ 0x47, 0x81, 0x7e, 0xfc, 0xdd, 0xf6, 0xef, 0xce,
+ 0x2f, 0x43, 0x68, 0xd6, 0x06, 0xe2, 0x74, 0x6a,
+ 0xad, 0x90, 0xf5, 0x37, 0xf3, 0x3d, 0x82, 0x69,
+ 0x40, 0xe9, 0x6b, 0xa7, 0x3d, 0xa8, 0x1e, 0xd2,
+ 0x02, 0x7c, 0xb7, 0x9b, 0xe4, 0xda, 0x8f, 0x95,
+ 0x06, 0xc5, 0xdf, 0x73, 0xa3, 0x20, 0x9a, 0x49,
+ 0xde, 0x9c, 0xbc, 0xee, 0x14, 0x3f, 0x81, 0x5e,
+ 0xf8, 0x3b, 0x59, 0x3c, 0xe1, 0x68, 0x12, 0x5a,
+ 0x3a, 0x76, 0x3a, 0x3f, 0xf7, 0x87, 0x33, 0x0a,
+ 0x01, 0xb8, 0xd4, 0xed, 0xb6, 0xbe, 0x94, 0x5e,
+ 0x70, 0x40, 0x56, 0x67, 0x1f, 0x50, 0x44, 0x19,
+ 0xce, 0x82, 0x70, 0x10, 0x87, 0x13, 0x20, 0x0b,
+ 0x4c, 0x5a, 0xb6, 0xf6, 0xa7, 0xae, 0x81, 0x75,
+ 0x01, 0x81, 0xe6, 0x4b, 0x57, 0x7c, 0xdd, 0x6d,
+ 0xf8, 0x1c, 0x29, 0x32, 0xf7, 0xda, 0x3c, 0x2d,
+ 0xf8, 0x9b, 0x25, 0x6e, 0x00, 0xb4, 0xf7, 0x2f,
+ 0xf7, 0x04, 0xf7, 0xa1, 0x56, 0xac, 0x4f, 0x1a,
+ 0x64, 0xb8, 0x47, 0x55, 0x18, 0x7b, 0x07, 0x4d,
+ 0xbd, 0x47, 0x24, 0x80, 0x5d, 0xa2, 0x70, 0xc5,
+ 0xdd, 0x8e, 0x82, 0xd4, 0xeb, 0xec, 0xb2, 0x0c,
+ 0x39, 0xd2, 0x97, 0xc1, 0xcb, 0xeb, 0xf4, 0x77,
+ 0x59, 0xb4, 0x87, 0xef, 0xcb, 0x43, 0x2d, 0x46,
+ 0x54, 0xd1, 0xa7, 0xd7, 0x15, 0x99, 0x0a, 0x43,
+ 0xa1, 0xe0, 0x99, 0x33, 0x71, 0xc1, 0xed, 0xfe,
+ 0x72, 0x46, 0x33, 0x8e, 0x91, 0x08, 0x9f, 0xc8,
+ 0x2e, 0xca, 0xfa, 0xdc, 0x59, 0xd5, 0xc3, 0x76,
+ 0x84, 0x9f, 0xa3, 0x37, 0x68, 0xc3, 0xf0, 0x47,
+ 0x2c, 0x68, 0xdb, 0x5e, 0xc3, 0x49, 0x4c, 0xe8,
+ 0x92, 0x85, 0xe2, 0x23, 0xd3, 0x3f, 0xad, 0x32,
+ 0xe5, 0x2b, 0x82, 0xd7, 0x8f, 0x99, 0x0a, 0x59,
+ 0x5c, 0x45, 0xd9, 0xb4, 0x51, 0x52, 0xc2, 0xae,
+ 0xbf, 0x80, 0xcf, 0xc9, 0xc9, 0x51, 0x24, 0x2a,
+ 0x3b, 0x3a, 0x4d, 0xae, 0xeb, 0xbd, 0x22, 0xc3,
+ 0x0e, 0x0f, 0x59, 0x25, 0x92, 0x17, 0xe9, 0x74,
+ 0xc7, 0x8b, 0x70, 0x70, 0x36, 0x55, 0x95, 0x75,
+ 0x4b, 0xad, 0x61, 0x2b, 0x09, 0xbc, 0x82, 0xf2,
+ 0x6e, 0x94, 0x43, 0xae, 0xc3, 0xd5, 0xcd, 0x8e,
+ 0xfe, 0x5b, 0x9a, 0x88, 0x43, 0x01, 0x75, 0xb2,
+ 0x23, 0x09, 0xf7, 0x89, 0x83, 0xe7, 0xfa, 0xf9,
+ 0xb4, 0x9b, 0xf8, 0xef, 0xbd, 0x1c, 0x92, 0xc1,
+ 0xda, 0x7e, 0xfe, 0x05, 0xba, 0x5a, 0xcd, 0x07,
+ 0x6a, 0x78, 0x9e, 0x5d, 0xfb, 0x11, 0x2f, 0x79,
+ 0x38, 0xb6, 0xc2, 0x5b, 0x6b, 0x51, 0xb4, 0x71,
+ 0xdd, 0xf7, 0x2a, 0xe4, 0xf4, 0x72, 0x76, 0xad,
+ 0xc2, 0xdd, 0x64, 0x5d, 0x79, 0xb6, 0xf5, 0x7a,
+ 0x77, 0x20, 0x05, 0x3d, 0x30, 0x06, 0xd4, 0x4c,
+ 0x0a, 0x2c, 0x98, 0x5a, 0xb9, 0xd4, 0x98, 0xa9,
+ 0x3f, 0xc6, 0x12, 0xea, 0x3b, 0x4b, 0xc5, 0x79,
+ 0x64, 0x63, 0x6b, 0x09, 0x54, 0x3b, 0x14, 0x27,
+ 0xba, 0x99, 0x80, 0xc8, 0x72, 0xa8, 0x12, 0x90,
+ 0x29, 0xba, 0x40, 0x54, 0x97, 0x2b, 0x7b, 0xfe,
+ 0xeb, 0xcd, 0x01, 0x05, 0x44, 0x72, 0xdb, 0x99,
+ 0xe4, 0x61, 0xc9, 0x69, 0xd6, 0xb9, 0x28, 0xd1,
+ 0x05, 0x3e, 0xf9, 0x0b, 0x49, 0x0a, 0x49, 0xe9,
+ 0x8d, 0x0e, 0xa7, 0x4a, 0x0f, 0xaf, 0x32, 0xd0,
+ 0xe0, 0xb2, 0x3a, 0x55, 0x58, 0xfe, 0x5c, 0x28,
+ 0x70, 0x51, 0x23, 0xb0, 0x7b, 0x6a, 0x5f, 0x1e,
+ 0xb8, 0x17, 0xd7, 0x94, 0x15, 0x8f, 0xee, 0x20,
+ 0xc7, 0x42, 0x25, 0x3e, 0x9a, 0x14, 0xd7, 0x60,
+ 0x72, 0x39, 0x47, 0x48, 0xa9, 0xfe, 0xdd, 0x47,
+ 0x0a, 0xb1, 0xe6, 0x60, 0x28, 0x8c, 0x11, 0x68,
+ 0xe1, 0xff, 0xd7, 0xce, 0xc8, 0xbe, 0xb3, 0xfe,
+ 0x27, 0x30, 0x09, 0x70, 0xd7, 0xfa, 0x02, 0x33,
+ 0x3a, 0x61, 0x2e, 0xc7, 0xff, 0xa4, 0x2a, 0xa8,
+ 0x6e, 0xb4, 0x79, 0x35, 0x6d, 0x4c, 0x1e, 0x38,
+ 0xf8, 0xee, 0xd4, 0x84, 0x4e, 0x6e, 0x28, 0xa7,
+ 0xce, 0xc8, 0xc1, 0xcf, 0x80, 0x05, 0xf3, 0x04,
+ 0xef, 0xc8, 0x18, 0x28, 0x2e, 0x8d, 0x5e, 0x0c,
+ 0xdf, 0xb8, 0x5f, 0x96, 0xe8, 0xc6, 0x9c, 0x2f,
+ 0xe5, 0xa6, 0x44, 0xd7, 0xe7, 0x99, 0x44, 0x0c,
+ 0xec, 0xd7, 0x05, 0x60, 0x97, 0xbb, 0x74, 0x77,
+ 0x58, 0xd5, 0xbb, 0x48, 0xde, 0x5a, 0xb2, 0x54,
+ 0x7f, 0x0e, 0x46, 0x70, 0x6a, 0x6f, 0x78, 0xa5,
+ 0x08, 0x89, 0x05, 0x4e, 0x7e, 0xa0, 0x69, 0xb4,
+ 0x40, 0x60, 0x55, 0x77, 0x75, 0x9b, 0x19, 0xf2,
+ 0xd5, 0x13, 0x80, 0x77, 0xf9, 0x4b, 0x3f, 0x1e,
+ 0xee, 0xe6, 0x76, 0x84, 0x7b, 0x8c, 0xe5, 0x27,
+ 0xa8, 0x0a, 0x91, 0x01, 0x68, 0x71, 0x8a, 0x3f,
+ 0x06, 0xab, 0xf6, 0xa9, 0xa5, 0xe6, 0x72, 0x92,
+ 0xe4, 0x67, 0xe2, 0xa2, 0x46, 0x35, 0x84, 0x55,
+ 0x7d, 0xca, 0xa8, 0x85, 0xd0, 0xf1, 0x3f, 0xbe,
+ 0xd7, 0x34, 0x64, 0xfc, 0xae, 0xe3, 0xe4, 0x04,
+ 0x9f, 0x66, 0x02, 0xb9, 0x88, 0x10, 0xd9, 0xc4,
+ 0x4c, 0x31, 0x43, 0x7a, 0x93, 0xe2, 0x9b, 0x56,
+ 0x43, 0x84, 0xdc, 0xdc, 0xde, 0x1d, 0xa4, 0x02,
+ 0x0e, 0xc2, 0xef, 0xc3, 0xf8, 0x78, 0xd1, 0xb2,
+ 0x6b, 0x63, 0x18, 0xc9, 0xa9, 0xe5, 0x72, 0xd8,
+ 0xf3, 0xb9, 0xd1, 0x8a, 0xc7, 0x1a, 0x02, 0x27,
+ 0x20, 0x77, 0x10, 0xe5, 0xc8, 0xd4, 0x4a, 0x47,
+ 0xe5, 0xdf, 0x5f, 0x01, 0xaa, 0xb0, 0xd4, 0x10,
+ 0xbb, 0x69, 0xe3, 0x36, 0xc8, 0xe1, 0x3d, 0x43,
+ 0xfb, 0x86, 0xcd, 0xcc, 0xbf, 0xf4, 0x88, 0xe0,
+ 0x20, 0xca, 0xb7, 0x1b, 0xf1, 0x2f, 0x5c, 0xee,
+ 0xd4, 0xd3, 0xa3, 0xcc, 0xa4, 0x1e, 0x1c, 0x47,
+ 0xfb, 0xbf, 0xfc, 0xa2, 0x41, 0x55, 0x9d, 0xf6,
+ 0x5a, 0x5e, 0x65, 0x32, 0x34, 0x7b, 0x52, 0x8d,
+ 0xd5, 0xd0, 0x20, 0x60, 0x03, 0xab, 0x3f, 0x8c,
+ 0xd4, 0x21, 0xea, 0x2a, 0xd9, 0xc4, 0xd0, 0xd3,
+ 0x65, 0xd8, 0x7a, 0x13, 0x28, 0x62, 0x32, 0x4b,
+ 0x2c, 0x87, 0x93, 0xa8, 0xb4, 0x52, 0x45, 0x09,
+ 0x44, 0xec, 0xec, 0xc3, 0x17, 0xdb, 0x9a, 0x4d,
+ 0x5c, 0xa9, 0x11, 0xd4, 0x7d, 0xaf, 0x9e, 0xf1,
+ 0x2d, 0xb2, 0x66, 0xc5, 0x1d, 0xed, 0xb7, 0xcd,
+ 0x0b, 0x25, 0x5e, 0x30, 0x47, 0x3f, 0x40, 0xf4,
+ 0xa1, 0xa0, 0x00, 0x94, 0x10, 0xc5, 0x6a, 0x63,
+ 0x1a, 0xd5, 0x88, 0x92, 0x8e, 0x82, 0x39, 0x87,
+ 0x3c, 0x78, 0x65, 0x58, 0x42, 0x75, 0x5b, 0xdd,
+ 0x77, 0x3e, 0x09, 0x4e, 0x76, 0x5b, 0xe6, 0x0e,
+ 0x4d, 0x38, 0xb2, 0xc0, 0xb8, 0x95, 0x01, 0x7a,
+ 0x10, 0xe0, 0xfb, 0x07, 0xf2, 0xab, 0x2d, 0x8c,
+ 0x32, 0xed, 0x2b, 0xc0, 0x46, 0xc2, 0xf5, 0x38,
+ 0x83, 0xf0, 0x17, 0xec, 0xc1, 0x20, 0x6a, 0x9a,
+ 0x0b, 0x00, 0xa0, 0x98, 0x22, 0x50, 0x23, 0xd5,
+ 0x80, 0x6b, 0xf6, 0x1f, 0xc3, 0xcc, 0x97, 0xc9,
+ 0x24, 0x9f, 0xf3, 0xaf, 0x43, 0x14, 0xd5, 0xa0
+};
+static const u8 enc_assoc010[] __initconst = {
+ 0xd2, 0xa1, 0x70, 0xdb, 0x7a, 0xf8, 0xfa, 0x27,
+ 0xba, 0x73, 0x0f, 0xbf, 0x3d, 0x1e, 0x82, 0xb2
+};
+static const u8 enc_nonce010[] __initconst = {
+ 0xdb, 0x92, 0x0f, 0x7f, 0x17, 0x54, 0x0c, 0x30
+};
+static const u8 enc_key010[] __initconst = {
+ 0x47, 0x11, 0xeb, 0x86, 0x2b, 0x2c, 0xab, 0x44,
+ 0x34, 0xda, 0x7f, 0x57, 0x03, 0x39, 0x0c, 0xaf,
+ 0x2c, 0x14, 0xfd, 0x65, 0x23, 0xe9, 0x8e, 0x74,
+ 0xd5, 0x08, 0x68, 0x08, 0xe7, 0xb4, 0x72, 0xd7
+};
+
+static const u8 enc_input011[] __initconst = {
+ 0x7a, 0x57, 0xf2, 0xc7, 0x06, 0x3f, 0x50, 0x7b,
+ 0x36, 0x1a, 0x66, 0x5c, 0xb9, 0x0e, 0x5e, 0x3b,
+ 0x45, 0x60, 0xbe, 0x9a, 0x31, 0x9f, 0xff, 0x5d,
+ 0x66, 0x34, 0xb4, 0xdc, 0xfb, 0x9d, 0x8e, 0xee,
+ 0x6a, 0x33, 0xa4, 0x07, 0x3c, 0xf9, 0x4c, 0x30,
+ 0xa1, 0x24, 0x52, 0xf9, 0x50, 0x46, 0x88, 0x20,
+ 0x02, 0x32, 0x3a, 0x0e, 0x99, 0x63, 0xaf, 0x1f,
+ 0x15, 0x28, 0x2a, 0x05, 0xff, 0x57, 0x59, 0x5e,
+ 0x18, 0xa1, 0x1f, 0xd0, 0x92, 0x5c, 0x88, 0x66,
+ 0x1b, 0x00, 0x64, 0xa5, 0x93, 0x8d, 0x06, 0x46,
+ 0xb0, 0x64, 0x8b, 0x8b, 0xef, 0x99, 0x05, 0x35,
+ 0x85, 0xb3, 0xf3, 0x33, 0xbb, 0xec, 0x66, 0xb6,
+ 0x3d, 0x57, 0x42, 0xe3, 0xb4, 0xc6, 0xaa, 0xb0,
+ 0x41, 0x2a, 0xb9, 0x59, 0xa9, 0xf6, 0x3e, 0x15,
+ 0x26, 0x12, 0x03, 0x21, 0x4c, 0x74, 0x43, 0x13,
+ 0x2a, 0x03, 0x27, 0x09, 0xb4, 0xfb, 0xe7, 0xb7,
+ 0x40, 0xff, 0x5e, 0xce, 0x48, 0x9a, 0x60, 0xe3,
+ 0x8b, 0x80, 0x8c, 0x38, 0x2d, 0xcb, 0x93, 0x37,
+ 0x74, 0x05, 0x52, 0x6f, 0x73, 0x3e, 0xc3, 0xbc,
+ 0xca, 0x72, 0x0a, 0xeb, 0xf1, 0x3b, 0xa0, 0x95,
+ 0xdc, 0x8a, 0xc4, 0xa9, 0xdc, 0xca, 0x44, 0xd8,
+ 0x08, 0x63, 0x6a, 0x36, 0xd3, 0x3c, 0xb8, 0xac,
+ 0x46, 0x7d, 0xfd, 0xaa, 0xeb, 0x3e, 0x0f, 0x45,
+ 0x8f, 0x49, 0xda, 0x2b, 0xf2, 0x12, 0xbd, 0xaf,
+ 0x67, 0x8a, 0x63, 0x48, 0x4b, 0x55, 0x5f, 0x6d,
+ 0x8c, 0xb9, 0x76, 0x34, 0x84, 0xae, 0xc2, 0xfc,
+ 0x52, 0x64, 0x82, 0xf7, 0xb0, 0x06, 0xf0, 0x45,
+ 0x73, 0x12, 0x50, 0x30, 0x72, 0xea, 0x78, 0x9a,
+ 0xa8, 0xaf, 0xb5, 0xe3, 0xbb, 0x77, 0x52, 0xec,
+ 0x59, 0x84, 0xbf, 0x6b, 0x8f, 0xce, 0x86, 0x5e,
+ 0x1f, 0x23, 0xe9, 0xfb, 0x08, 0x86, 0xf7, 0x10,
+ 0xb9, 0xf2, 0x44, 0x96, 0x44, 0x63, 0xa9, 0xa8,
+ 0x78, 0x00, 0x23, 0xd6, 0xc7, 0xe7, 0x6e, 0x66,
+ 0x4f, 0xcc, 0xee, 0x15, 0xb3, 0xbd, 0x1d, 0xa0,
+ 0xe5, 0x9c, 0x1b, 0x24, 0x2c, 0x4d, 0x3c, 0x62,
+ 0x35, 0x9c, 0x88, 0x59, 0x09, 0xdd, 0x82, 0x1b,
+ 0xcf, 0x0a, 0x83, 0x6b, 0x3f, 0xae, 0x03, 0xc4,
+ 0xb4, 0xdd, 0x7e, 0x5b, 0x28, 0x76, 0x25, 0x96,
+ 0xd9, 0xc9, 0x9d, 0x5f, 0x86, 0xfa, 0xf6, 0xd7,
+ 0xd2, 0xe6, 0x76, 0x1d, 0x0f, 0xa1, 0xdc, 0x74,
+ 0x05, 0x1b, 0x1d, 0xe0, 0xcd, 0x16, 0xb0, 0xa8,
+ 0x8a, 0x34, 0x7b, 0x15, 0x11, 0x77, 0xe5, 0x7b,
+ 0x7e, 0x20, 0xf7, 0xda, 0x38, 0xda, 0xce, 0x70,
+ 0xe9, 0xf5, 0x6c, 0xd9, 0xbe, 0x0c, 0x4c, 0x95,
+ 0x4c, 0xc2, 0x9b, 0x34, 0x55, 0x55, 0xe1, 0xf3,
+ 0x46, 0x8e, 0x48, 0x74, 0x14, 0x4f, 0x9d, 0xc9,
+ 0xf5, 0xe8, 0x1a, 0xf0, 0x11, 0x4a, 0xc1, 0x8d,
+ 0xe0, 0x93, 0xa0, 0xbe, 0x09, 0x1c, 0x2b, 0x4e,
+ 0x0f, 0xb2, 0x87, 0x8b, 0x84, 0xfe, 0x92, 0x32,
+ 0x14, 0xd7, 0x93, 0xdf, 0xe7, 0x44, 0xbc, 0xc5,
+ 0xae, 0x53, 0x69, 0xd8, 0xb3, 0x79, 0x37, 0x80,
+ 0xe3, 0x17, 0x5c, 0xec, 0x53, 0x00, 0x9a, 0xe3,
+ 0x8e, 0xdc, 0x38, 0xb8, 0x66, 0xf0, 0xd3, 0xad,
+ 0x1d, 0x02, 0x96, 0x86, 0x3e, 0x9d, 0x3b, 0x5d,
+ 0xa5, 0x7f, 0x21, 0x10, 0xf1, 0x1f, 0x13, 0x20,
+ 0xf9, 0x57, 0x87, 0x20, 0xf5, 0x5f, 0xf1, 0x17,
+ 0x48, 0x0a, 0x51, 0x5a, 0xcd, 0x19, 0x03, 0xa6,
+ 0x5a, 0xd1, 0x12, 0x97, 0xe9, 0x48, 0xe2, 0x1d,
+ 0x83, 0x75, 0x50, 0xd9, 0x75, 0x7d, 0x6a, 0x82,
+ 0xa1, 0xf9, 0x4e, 0x54, 0x87, 0x89, 0xc9, 0x0c,
+ 0xb7, 0x5b, 0x6a, 0x91, 0xc1, 0x9c, 0xb2, 0xa9,
+ 0xdc, 0x9a, 0xa4, 0x49, 0x0a, 0x6d, 0x0d, 0xbb,
+ 0xde, 0x86, 0x44, 0xdd, 0x5d, 0x89, 0x2b, 0x96,
+ 0x0f, 0x23, 0x95, 0xad, 0xcc, 0xa2, 0xb3, 0xb9,
+ 0x7e, 0x74, 0x38, 0xba, 0x9f, 0x73, 0xae, 0x5f,
+ 0xf8, 0x68, 0xa2, 0xe0, 0xa9, 0xce, 0xbd, 0x40,
+ 0xd4, 0x4c, 0x6b, 0xd2, 0x56, 0x62, 0xb0, 0xcc,
+ 0x63, 0x7e, 0x5b, 0xd3, 0xae, 0xd1, 0x75, 0xce,
+ 0xbb, 0xb4, 0x5b, 0xa8, 0xf8, 0xb4, 0xac, 0x71,
+ 0x75, 0xaa, 0xc9, 0x9f, 0xbb, 0x6c, 0xad, 0x0f,
+ 0x55, 0x5d, 0xe8, 0x85, 0x7d, 0xf9, 0x21, 0x35,
+ 0xea, 0x92, 0x85, 0x2b, 0x00, 0xec, 0x84, 0x90,
+ 0x0a, 0x63, 0x96, 0xe4, 0x6b, 0xa9, 0x77, 0xb8,
+ 0x91, 0xf8, 0x46, 0x15, 0x72, 0x63, 0x70, 0x01,
+ 0x40, 0xa3, 0xa5, 0x76, 0x62, 0x2b, 0xbf, 0xf1,
+ 0xe5, 0x8d, 0x9f, 0xa3, 0xfa, 0x9b, 0x03, 0xbe,
+ 0xfe, 0x65, 0x6f, 0xa2, 0x29, 0x0d, 0x54, 0xb4,
+ 0x71, 0xce, 0xa9, 0xd6, 0x3d, 0x88, 0xf9, 0xaf,
+ 0x6b, 0xa8, 0x9e, 0xf4, 0x16, 0x96, 0x36, 0xb9,
+ 0x00, 0xdc, 0x10, 0xab, 0xb5, 0x08, 0x31, 0x1f,
+ 0x00, 0xb1, 0x3c, 0xd9, 0x38, 0x3e, 0xc6, 0x04,
+ 0xa7, 0x4e, 0xe8, 0xae, 0xed, 0x98, 0xc2, 0xf7,
+ 0xb9, 0x00, 0x5f, 0x8c, 0x60, 0xd1, 0xe5, 0x15,
+ 0xf7, 0xae, 0x1e, 0x84, 0x88, 0xd1, 0xf6, 0xbc,
+ 0x3a, 0x89, 0x35, 0x22, 0x83, 0x7c, 0xca, 0xf0,
+ 0x33, 0x82, 0x4c, 0x79, 0x3c, 0xfd, 0xb1, 0xae,
+ 0x52, 0x62, 0x55, 0xd2, 0x41, 0x60, 0xc6, 0xbb,
+ 0xfa, 0x0e, 0x59, 0xd6, 0xa8, 0xfe, 0x5d, 0xed,
+ 0x47, 0x3d, 0xe0, 0xea, 0x1f, 0x6e, 0x43, 0x51,
+ 0xec, 0x10, 0x52, 0x56, 0x77, 0x42, 0x6b, 0x52,
+ 0x87, 0xd8, 0xec, 0xe0, 0xaa, 0x76, 0xa5, 0x84,
+ 0x2a, 0x22, 0x24, 0xfd, 0x92, 0x40, 0x88, 0xd5,
+ 0x85, 0x1c, 0x1f, 0x6b, 0x47, 0xa0, 0xc4, 0xe4,
+ 0xef, 0xf4, 0xea, 0xd7, 0x59, 0xac, 0x2a, 0x9e,
+ 0x8c, 0xfa, 0x1f, 0x42, 0x08, 0xfe, 0x4f, 0x74,
+ 0xa0, 0x26, 0xf5, 0xb3, 0x84, 0xf6, 0x58, 0x5f,
+ 0x26, 0x66, 0x3e, 0xd7, 0xe4, 0x22, 0x91, 0x13,
+ 0xc8, 0xac, 0x25, 0x96, 0x23, 0xd8, 0x09, 0xea,
+ 0x45, 0x75, 0x23, 0xb8, 0x5f, 0xc2, 0x90, 0x8b,
+ 0x09, 0xc4, 0xfc, 0x47, 0x6c, 0x6d, 0x0a, 0xef,
+ 0x69, 0xa4, 0x38, 0x19, 0xcf, 0x7d, 0xf9, 0x09,
+ 0x73, 0x9b, 0x60, 0x5a, 0xf7, 0x37, 0xb5, 0xfe,
+ 0x9f, 0xe3, 0x2b, 0x4c, 0x0d, 0x6e, 0x19, 0xf1,
+ 0xd6, 0xc0, 0x70, 0xf3, 0x9d, 0x22, 0x3c, 0xf9,
+ 0x49, 0xce, 0x30, 0x8e, 0x44, 0xb5, 0x76, 0x15,
+ 0x8f, 0x52, 0xfd, 0xa5, 0x04, 0xb8, 0x55, 0x6a,
+ 0x36, 0x59, 0x7c, 0xc4, 0x48, 0xb8, 0xd7, 0xab,
+ 0x05, 0x66, 0xe9, 0x5e, 0x21, 0x6f, 0x6b, 0x36,
+ 0x29, 0xbb, 0xe9, 0xe3, 0xa2, 0x9a, 0xa8, 0xcd,
+ 0x55, 0x25, 0x11, 0xba, 0x5a, 0x58, 0xa0, 0xde,
+ 0xae, 0x19, 0x2a, 0x48, 0x5a, 0xff, 0x36, 0xcd,
+ 0x6d, 0x16, 0x7a, 0x73, 0x38, 0x46, 0xe5, 0x47,
+ 0x59, 0xc8, 0xa2, 0xf6, 0xe2, 0x6c, 0x83, 0xc5,
+ 0x36, 0x2c, 0x83, 0x7d, 0xb4, 0x01, 0x05, 0x69,
+ 0xe7, 0xaf, 0x5c, 0xc4, 0x64, 0x82, 0x12, 0x21,
+ 0xef, 0xf7, 0xd1, 0x7d, 0xb8, 0x8d, 0x8c, 0x98,
+ 0x7c, 0x5f, 0x7d, 0x92, 0x88, 0xb9, 0x94, 0x07,
+ 0x9c, 0xd8, 0xe9, 0x9c, 0x17, 0x38, 0xe3, 0x57,
+ 0x6c, 0xe0, 0xdc, 0xa5, 0x92, 0x42, 0xb3, 0xbd,
+ 0x50, 0xa2, 0x7e, 0xb5, 0xb1, 0x52, 0x72, 0x03,
+ 0x97, 0xd8, 0xaa, 0x9a, 0x1e, 0x75, 0x41, 0x11,
+ 0xa3, 0x4f, 0xcc, 0xd4, 0xe3, 0x73, 0xad, 0x96,
+ 0xdc, 0x47, 0x41, 0x9f, 0xb0, 0xbe, 0x79, 0x91,
+ 0xf5, 0xb6, 0x18, 0xfe, 0xc2, 0x83, 0x18, 0x7d,
+ 0x73, 0xd9, 0x4f, 0x83, 0x84, 0x03, 0xb3, 0xf0,
+ 0x77, 0x66, 0x3d, 0x83, 0x63, 0x2e, 0x2c, 0xf9,
+ 0xdd, 0xa6, 0x1f, 0x89, 0x82, 0xb8, 0x23, 0x42,
+ 0xeb, 0xe2, 0xca, 0x70, 0x82, 0x61, 0x41, 0x0a,
+ 0x6d, 0x5f, 0x75, 0xc5, 0xe2, 0xc4, 0x91, 0x18,
+ 0x44, 0x22, 0xfa, 0x34, 0x10, 0xf5, 0x20, 0xdc,
+ 0xb7, 0xdd, 0x2a, 0x20, 0x77, 0xf5, 0xf9, 0xce,
+ 0xdb, 0xa0, 0x0a, 0x52, 0x2a, 0x4e, 0xdd, 0xcc,
+ 0x97, 0xdf, 0x05, 0xe4, 0x5e, 0xb7, 0xaa, 0xf0,
+ 0xe2, 0x80, 0xff, 0xba, 0x1a, 0x0f, 0xac, 0xdf,
+ 0x02, 0x32, 0xe6, 0xf7, 0xc7, 0x17, 0x13, 0xb7,
+ 0xfc, 0x98, 0x48, 0x8c, 0x0d, 0x82, 0xc9, 0x80,
+ 0x7a, 0xe2, 0x0a, 0xc5, 0xb4, 0xde, 0x7c, 0x3c,
+ 0x79, 0x81, 0x0e, 0x28, 0x65, 0x79, 0x67, 0x82,
+ 0x69, 0x44, 0x66, 0x09, 0xf7, 0x16, 0x1a, 0xf9,
+ 0x7d, 0x80, 0xa1, 0x79, 0x14, 0xa9, 0xc8, 0x20,
+ 0xfb, 0xa2, 0x46, 0xbe, 0x08, 0x35, 0x17, 0x58,
+ 0xc1, 0x1a, 0xda, 0x2a, 0x6b, 0x2e, 0x1e, 0xe6,
+ 0x27, 0x55, 0x7b, 0x19, 0xe2, 0xfb, 0x64, 0xfc,
+ 0x5e, 0x15, 0x54, 0x3c, 0xe7, 0xc2, 0x11, 0x50,
+ 0x30, 0xb8, 0x72, 0x03, 0x0b, 0x1a, 0x9f, 0x86,
+ 0x27, 0x11, 0x5c, 0x06, 0x2b, 0xbd, 0x75, 0x1a,
+ 0x0a, 0xda, 0x01, 0xfa, 0x5c, 0x4a, 0xc1, 0x80,
+ 0x3a, 0x6e, 0x30, 0xc8, 0x2c, 0xeb, 0x56, 0xec,
+ 0x89, 0xfa, 0x35, 0x7b, 0xb2, 0xf0, 0x97, 0x08,
+ 0x86, 0x53, 0xbe, 0xbd, 0x40, 0x41, 0x38, 0x1c,
+ 0xb4, 0x8b, 0x79, 0x2e, 0x18, 0x96, 0x94, 0xde,
+ 0xe8, 0xca, 0xe5, 0x9f, 0x92, 0x9f, 0x15, 0x5d,
+ 0x56, 0x60, 0x5c, 0x09, 0xf9, 0x16, 0xf4, 0x17,
+ 0x0f, 0xf6, 0x4c, 0xda, 0xe6, 0x67, 0x89, 0x9f,
+ 0xca, 0x6c, 0xe7, 0x9b, 0x04, 0x62, 0x0e, 0x26,
+ 0xa6, 0x52, 0xbd, 0x29, 0xff, 0xc7, 0xa4, 0x96,
+ 0xe6, 0x6a, 0x02, 0xa5, 0x2e, 0x7b, 0xfe, 0x97,
+ 0x68, 0x3e, 0x2e, 0x5f, 0x3b, 0x0f, 0x36, 0xd6,
+ 0x98, 0x19, 0x59, 0x48, 0xd2, 0xc6, 0xe1, 0x55,
+ 0x1a, 0x6e, 0xd6, 0xed, 0x2c, 0xba, 0xc3, 0x9e,
+ 0x64, 0xc9, 0x95, 0x86, 0x35, 0x5e, 0x3e, 0x88,
+ 0x69, 0x99, 0x4b, 0xee, 0xbe, 0x9a, 0x99, 0xb5,
+ 0x6e, 0x58, 0xae, 0xdd, 0x22, 0xdb, 0xdd, 0x6b,
+ 0xfc, 0xaf, 0x90, 0xa3, 0x3d, 0xa4, 0xc1, 0x15,
+ 0x92, 0x18, 0x8d, 0xd2, 0x4b, 0x7b, 0x06, 0xd1,
+ 0x37, 0xb5, 0xe2, 0x7c, 0x2c, 0xf0, 0x25, 0xe4,
+ 0x94, 0x2a, 0xbd, 0xe3, 0x82, 0x70, 0x78, 0xa3,
+ 0x82, 0x10, 0x5a, 0x90, 0xd7, 0xa4, 0xfa, 0xaf,
+ 0x1a, 0x88, 0x59, 0xdc, 0x74, 0x12, 0xb4, 0x8e,
+ 0xd7, 0x19, 0x46, 0xf4, 0x84, 0x69, 0x9f, 0xbb,
+ 0x70, 0xa8, 0x4c, 0x52, 0x81, 0xa9, 0xff, 0x76,
+ 0x1c, 0xae, 0xd8, 0x11, 0x3d, 0x7f, 0x7d, 0xc5,
+ 0x12, 0x59, 0x28, 0x18, 0xc2, 0xa2, 0xb7, 0x1c,
+ 0x88, 0xf8, 0xd6, 0x1b, 0xa6, 0x7d, 0x9e, 0xde,
+ 0x29, 0xf8, 0xed, 0xff, 0xeb, 0x92, 0x24, 0x4f,
+ 0x05, 0xaa, 0xd9, 0x49, 0xba, 0x87, 0x59, 0x51,
+ 0xc9, 0x20, 0x5c, 0x9b, 0x74, 0xcf, 0x03, 0xd9,
+ 0x2d, 0x34, 0xc7, 0x5b, 0xa5, 0x40, 0xb2, 0x99,
+ 0xf5, 0xcb, 0xb4, 0xf6, 0xb7, 0x72, 0x4a, 0xd6,
+ 0xbd, 0xb0, 0xf3, 0x93, 0xe0, 0x1b, 0xa8, 0x04,
+ 0x1e, 0x35, 0xd4, 0x80, 0x20, 0xf4, 0x9c, 0x31,
+ 0x6b, 0x45, 0xb9, 0x15, 0xb0, 0x5e, 0xdd, 0x0a,
+ 0x33, 0x9c, 0x83, 0xcd, 0x58, 0x89, 0x50, 0x56,
+ 0xbb, 0x81, 0x00, 0x91, 0x32, 0xf3, 0x1b, 0x3e,
+ 0xcf, 0x45, 0xe1, 0xf9, 0xe1, 0x2c, 0x26, 0x78,
+ 0x93, 0x9a, 0x60, 0x46, 0xc9, 0xb5, 0x5e, 0x6a,
+ 0x28, 0x92, 0x87, 0x3f, 0x63, 0x7b, 0xdb, 0xf7,
+ 0xd0, 0x13, 0x9d, 0x32, 0x40, 0x5e, 0xcf, 0xfb,
+ 0x79, 0x68, 0x47, 0x4c, 0xfd, 0x01, 0x17, 0xe6,
+ 0x97, 0x93, 0x78, 0xbb, 0xa6, 0x27, 0xa3, 0xe8,
+ 0x1a, 0xe8, 0x94, 0x55, 0x7d, 0x08, 0xe5, 0xdc,
+ 0x66, 0xa3, 0x69, 0xc8, 0xca, 0xc5, 0xa1, 0x84,
+ 0x55, 0xde, 0x08, 0x91, 0x16, 0x3a, 0x0c, 0x86,
+ 0xab, 0x27, 0x2b, 0x64, 0x34, 0x02, 0x6c, 0x76,
+ 0x8b, 0xc6, 0xaf, 0xcc, 0xe1, 0xd6, 0x8c, 0x2a,
+ 0x18, 0x3d, 0xa6, 0x1b, 0x37, 0x75, 0x45, 0x73,
+ 0xc2, 0x75, 0xd7, 0x53, 0x78, 0x3a, 0xd6, 0xe8,
+ 0x29, 0xd2, 0x4a, 0xa8, 0x1e, 0x82, 0xf6, 0xb6,
+ 0x81, 0xde, 0x21, 0xed, 0x2b, 0x56, 0xbb, 0xf2,
+ 0xd0, 0x57, 0xc1, 0x7c, 0xd2, 0x6a, 0xd2, 0x56,
+ 0xf5, 0x13, 0x5f, 0x1c, 0x6a, 0x0b, 0x74, 0xfb,
+ 0xe9, 0xfe, 0x9e, 0xea, 0x95, 0xb2, 0x46, 0xab,
+ 0x0a, 0xfc, 0xfd, 0xf3, 0xbb, 0x04, 0x2b, 0x76,
+ 0x1b, 0xa4, 0x74, 0xb0, 0xc1, 0x78, 0xc3, 0x69,
+ 0xe2, 0xb0, 0x01, 0xe1, 0xde, 0x32, 0x4c, 0x8d,
+ 0x1a, 0xb3, 0x38, 0x08, 0xd5, 0xfc, 0x1f, 0xdc,
+ 0x0e, 0x2c, 0x9c, 0xb1, 0xa1, 0x63, 0x17, 0x22,
+ 0xf5, 0x6c, 0x93, 0x70, 0x74, 0x00, 0xf8, 0x39,
+ 0x01, 0x94, 0xd1, 0x32, 0x23, 0x56, 0x5d, 0xa6,
+ 0x02, 0x76, 0x76, 0x93, 0xce, 0x2f, 0x19, 0xe9,
+ 0x17, 0x52, 0xae, 0x6e, 0x2c, 0x6d, 0x61, 0x7f,
+ 0x3b, 0xaa, 0xe0, 0x52, 0x85, 0xc5, 0x65, 0xc1,
+ 0xbb, 0x8e, 0x5b, 0x21, 0xd5, 0xc9, 0x78, 0x83,
+ 0x07, 0x97, 0x4c, 0x62, 0x61, 0x41, 0xd4, 0xfc,
+ 0xc9, 0x39, 0xe3, 0x9b, 0xd0, 0xcc, 0x75, 0xc4,
+ 0x97, 0xe6, 0xdd, 0x2a, 0x5f, 0xa6, 0xe8, 0x59,
+ 0x6c, 0x98, 0xb9, 0x02, 0xe2, 0xa2, 0xd6, 0x68,
+ 0xee, 0x3b, 0x1d, 0xe3, 0x4d, 0x5b, 0x30, 0xef,
+ 0x03, 0xf2, 0xeb, 0x18, 0x57, 0x36, 0xe8, 0xa1,
+ 0xf4, 0x47, 0xfb, 0xcb, 0x8f, 0xcb, 0xc8, 0xf3,
+ 0x4f, 0x74, 0x9d, 0x9d, 0xb1, 0x8d, 0x14, 0x44,
+ 0xd9, 0x19, 0xb4, 0x54, 0x4f, 0x75, 0x19, 0x09,
+ 0xa0, 0x75, 0xbc, 0x3b, 0x82, 0xc6, 0x3f, 0xb8,
+ 0x83, 0x19, 0x6e, 0xd6, 0x37, 0xfe, 0x6e, 0x8a,
+ 0x4e, 0xe0, 0x4a, 0xab, 0x7b, 0xc8, 0xb4, 0x1d,
+ 0xf4, 0xed, 0x27, 0x03, 0x65, 0xa2, 0xa1, 0xae,
+ 0x11, 0xe7, 0x98, 0x78, 0x48, 0x91, 0xd2, 0xd2,
+ 0xd4, 0x23, 0x78, 0x50, 0xb1, 0x5b, 0x85, 0x10,
+ 0x8d, 0xca, 0x5f, 0x0f, 0x71, 0xae, 0x72, 0x9a,
+ 0xf6, 0x25, 0x19, 0x60, 0x06, 0xf7, 0x10, 0x34,
+ 0x18, 0x0d, 0xc9, 0x9f, 0x7b, 0x0c, 0x9b, 0x8f,
+ 0x91, 0x1b, 0x9f, 0xcd, 0x10, 0xee, 0x75, 0xf9,
+ 0x97, 0x66, 0xfc, 0x4d, 0x33, 0x6e, 0x28, 0x2b,
+ 0x92, 0x85, 0x4f, 0xab, 0x43, 0x8d, 0x8f, 0x7d,
+ 0x86, 0xa7, 0xc7, 0xd8, 0xd3, 0x0b, 0x8b, 0x57,
+ 0xb6, 0x1d, 0x95, 0x0d, 0xe9, 0xbc, 0xd9, 0x03,
+ 0xd9, 0x10, 0x19, 0xc3, 0x46, 0x63, 0x55, 0x87,
+ 0x61, 0x79, 0x6c, 0x95, 0x0e, 0x9c, 0xdd, 0xca,
+ 0xc3, 0xf3, 0x64, 0xf0, 0x7d, 0x76, 0xb7, 0x53,
+ 0x67, 0x2b, 0x1e, 0x44, 0x56, 0x81, 0xea, 0x8f,
+ 0x5c, 0x42, 0x16, 0xb8, 0x28, 0xeb, 0x1b, 0x61,
+ 0x10, 0x1e, 0xbf, 0xec, 0xa8
+};
+static const u8 enc_output011[] __initconst = {
+ 0x6a, 0xfc, 0x4b, 0x25, 0xdf, 0xc0, 0xe4, 0xe8,
+ 0x17, 0x4d, 0x4c, 0xc9, 0x7e, 0xde, 0x3a, 0xcc,
+ 0x3c, 0xba, 0x6a, 0x77, 0x47, 0xdb, 0xe3, 0x74,
+ 0x7a, 0x4d, 0x5f, 0x8d, 0x37, 0x55, 0x80, 0x73,
+ 0x90, 0x66, 0x5d, 0x3a, 0x7d, 0x5d, 0x86, 0x5e,
+ 0x8d, 0xfd, 0x83, 0xff, 0x4e, 0x74, 0x6f, 0xf9,
+ 0xe6, 0x70, 0x17, 0x70, 0x3e, 0x96, 0xa7, 0x7e,
+ 0xcb, 0xab, 0x8f, 0x58, 0x24, 0x9b, 0x01, 0xfd,
+ 0xcb, 0xe6, 0x4d, 0x9b, 0xf0, 0x88, 0x94, 0x57,
+ 0x66, 0xef, 0x72, 0x4c, 0x42, 0x6e, 0x16, 0x19,
+ 0x15, 0xea, 0x70, 0x5b, 0xac, 0x13, 0xdb, 0x9f,
+ 0x18, 0xe2, 0x3c, 0x26, 0x97, 0xbc, 0xdc, 0x45,
+ 0x8c, 0x6c, 0x24, 0x69, 0x9c, 0xf7, 0x65, 0x1e,
+ 0x18, 0x59, 0x31, 0x7c, 0xe4, 0x73, 0xbc, 0x39,
+ 0x62, 0xc6, 0x5c, 0x9f, 0xbf, 0xfa, 0x90, 0x03,
+ 0xc9, 0x72, 0x26, 0xb6, 0x1b, 0xc2, 0xb7, 0x3f,
+ 0xf2, 0x13, 0x77, 0xf2, 0x8d, 0xb9, 0x47, 0xd0,
+ 0x53, 0xdd, 0xc8, 0x91, 0x83, 0x8b, 0xb1, 0xce,
+ 0xa3, 0xfe, 0xcd, 0xd9, 0xdd, 0x92, 0x7b, 0xdb,
+ 0xb8, 0xfb, 0xc9, 0x2d, 0x01, 0x59, 0x39, 0x52,
+ 0xad, 0x1b, 0xec, 0xcf, 0xd7, 0x70, 0x13, 0x21,
+ 0xf5, 0x47, 0xaa, 0x18, 0x21, 0x5c, 0xc9, 0x9a,
+ 0xd2, 0x6b, 0x05, 0x9c, 0x01, 0xa1, 0xda, 0x35,
+ 0x5d, 0xb3, 0x70, 0xe6, 0xa9, 0x80, 0x8b, 0x91,
+ 0xb7, 0xb3, 0x5f, 0x24, 0x9a, 0xb7, 0xd1, 0x6b,
+ 0xa1, 0x1c, 0x50, 0xba, 0x49, 0xe0, 0xee, 0x2e,
+ 0x75, 0xac, 0x69, 0xc0, 0xeb, 0x03, 0xdd, 0x19,
+ 0xe5, 0xf6, 0x06, 0xdd, 0xc3, 0xd7, 0x2b, 0x07,
+ 0x07, 0x30, 0xa7, 0x19, 0x0c, 0xbf, 0xe6, 0x18,
+ 0xcc, 0xb1, 0x01, 0x11, 0x85, 0x77, 0x1d, 0x96,
+ 0xa7, 0xa3, 0x00, 0x84, 0x02, 0xa2, 0x83, 0x68,
+ 0xda, 0x17, 0x27, 0xc8, 0x7f, 0x23, 0xb7, 0xf4,
+ 0x13, 0x85, 0xcf, 0xdd, 0x7a, 0x7d, 0x24, 0x57,
+ 0xfe, 0x05, 0x93, 0xf5, 0x74, 0xce, 0xed, 0x0c,
+ 0x20, 0x98, 0x8d, 0x92, 0x30, 0xa1, 0x29, 0x23,
+ 0x1a, 0xa0, 0x4f, 0x69, 0x56, 0x4c, 0xe1, 0xc8,
+ 0xce, 0xf6, 0x9a, 0x0c, 0xa4, 0xfa, 0x04, 0xf6,
+ 0x62, 0x95, 0xf2, 0xfa, 0xc7, 0x40, 0x68, 0x40,
+ 0x8f, 0x41, 0xda, 0xb4, 0x26, 0x6f, 0x70, 0xab,
+ 0x40, 0x61, 0xa4, 0x0e, 0x75, 0xfb, 0x86, 0xeb,
+ 0x9d, 0x9a, 0x1f, 0xec, 0x76, 0x99, 0xe7, 0xea,
+ 0xaa, 0x1e, 0x2d, 0xb5, 0xd4, 0xa6, 0x1a, 0xb8,
+ 0x61, 0x0a, 0x1d, 0x16, 0x5b, 0x98, 0xc2, 0x31,
+ 0x40, 0xe7, 0x23, 0x1d, 0x66, 0x99, 0xc8, 0xc0,
+ 0xd7, 0xce, 0xf3, 0x57, 0x40, 0x04, 0x3f, 0xfc,
+ 0xea, 0xb3, 0xfc, 0xd2, 0xd3, 0x99, 0xa4, 0x94,
+ 0x69, 0xa0, 0xef, 0xd1, 0x85, 0xb3, 0xa6, 0xb1,
+ 0x28, 0xbf, 0x94, 0x67, 0x22, 0xc3, 0x36, 0x46,
+ 0xf8, 0xd2, 0x0f, 0x5f, 0xf4, 0x59, 0x80, 0xe6,
+ 0x2d, 0x43, 0x08, 0x7d, 0x19, 0x09, 0x97, 0xa7,
+ 0x4c, 0x3d, 0x8d, 0xba, 0x65, 0x62, 0xa3, 0x71,
+ 0x33, 0x29, 0x62, 0xdb, 0xc1, 0x33, 0x34, 0x1a,
+ 0x63, 0x33, 0x16, 0xb6, 0x64, 0x7e, 0xab, 0x33,
+ 0xf0, 0xe6, 0x26, 0x68, 0xba, 0x1d, 0x2e, 0x38,
+ 0x08, 0xe6, 0x02, 0xd3, 0x25, 0x2c, 0x47, 0x23,
+ 0x58, 0x34, 0x0f, 0x9d, 0x63, 0x4f, 0x63, 0xbb,
+ 0x7f, 0x3b, 0x34, 0x38, 0xa7, 0xb5, 0x8d, 0x65,
+ 0xd9, 0x9f, 0x79, 0x55, 0x3e, 0x4d, 0xe7, 0x73,
+ 0xd8, 0xf6, 0x98, 0x97, 0x84, 0x60, 0x9c, 0xc8,
+ 0xa9, 0x3c, 0xf6, 0xdc, 0x12, 0x5c, 0xe1, 0xbb,
+ 0x0b, 0x8b, 0x98, 0x9c, 0x9d, 0x26, 0x7c, 0x4a,
+ 0xe6, 0x46, 0x36, 0x58, 0x21, 0x4a, 0xee, 0xca,
+ 0xd7, 0x3b, 0xc2, 0x6c, 0x49, 0x2f, 0xe5, 0xd5,
+ 0x03, 0x59, 0x84, 0x53, 0xcb, 0xfe, 0x92, 0x71,
+ 0x2e, 0x7c, 0x21, 0xcc, 0x99, 0x85, 0x7f, 0xb8,
+ 0x74, 0x90, 0x13, 0x42, 0x3f, 0xe0, 0x6b, 0x1d,
+ 0xf2, 0x4d, 0x54, 0xd4, 0xfc, 0x3a, 0x05, 0xe6,
+ 0x74, 0xaf, 0xa6, 0xa0, 0x2a, 0x20, 0x23, 0x5d,
+ 0x34, 0x5c, 0xd9, 0x3e, 0x4e, 0xfa, 0x93, 0xe7,
+ 0xaa, 0xe9, 0x6f, 0x08, 0x43, 0x67, 0x41, 0xc5,
+ 0xad, 0xfb, 0x31, 0x95, 0x82, 0x73, 0x32, 0xd8,
+ 0xa6, 0xa3, 0xed, 0x0e, 0x2d, 0xf6, 0x5f, 0xfd,
+ 0x80, 0xa6, 0x7a, 0xe0, 0xdf, 0x78, 0x15, 0x29,
+ 0x74, 0x33, 0xd0, 0x9e, 0x83, 0x86, 0x72, 0x22,
+ 0x57, 0x29, 0xb9, 0x9e, 0x5d, 0xd3, 0x1a, 0xb5,
+ 0x96, 0x72, 0x41, 0x3d, 0xf1, 0x64, 0x43, 0x67,
+ 0xee, 0xaa, 0x5c, 0xd3, 0x9a, 0x96, 0x13, 0x11,
+ 0x5d, 0xf3, 0x0c, 0x87, 0x82, 0x1e, 0x41, 0x9e,
+ 0xd0, 0x27, 0xd7, 0x54, 0x3b, 0x67, 0x73, 0x09,
+ 0x91, 0xe9, 0xd5, 0x36, 0xa7, 0xb5, 0x55, 0xe4,
+ 0xf3, 0x21, 0x51, 0x49, 0x22, 0x07, 0x55, 0x4f,
+ 0x44, 0x4b, 0xd2, 0x15, 0x93, 0x17, 0x2a, 0xfa,
+ 0x4d, 0x4a, 0x57, 0xdb, 0x4c, 0xa6, 0xeb, 0xec,
+ 0x53, 0x25, 0x6c, 0x21, 0xed, 0x00, 0x4c, 0x3b,
+ 0xca, 0x14, 0x57, 0xa9, 0xd6, 0x6a, 0xcd, 0x8d,
+ 0x5e, 0x74, 0xac, 0x72, 0xc1, 0x97, 0xe5, 0x1b,
+ 0x45, 0x4e, 0xda, 0xfc, 0xcc, 0x40, 0xe8, 0x48,
+ 0x88, 0x0b, 0xa3, 0xe3, 0x8d, 0x83, 0x42, 0xc3,
+ 0x23, 0xfd, 0x68, 0xb5, 0x8e, 0xf1, 0x9d, 0x63,
+ 0x77, 0xe9, 0xa3, 0x8e, 0x8c, 0x26, 0x6b, 0xbd,
+ 0x72, 0x73, 0x35, 0x0c, 0x03, 0xf8, 0x43, 0x78,
+ 0x52, 0x71, 0x15, 0x1f, 0x71, 0x5d, 0x6e, 0xed,
+ 0xb9, 0xcc, 0x86, 0x30, 0xdb, 0x2b, 0xd3, 0x82,
+ 0x88, 0x23, 0x71, 0x90, 0x53, 0x5c, 0xa9, 0x2f,
+ 0x76, 0x01, 0xb7, 0x9a, 0xfe, 0x43, 0x55, 0xa3,
+ 0x04, 0x9b, 0x0e, 0xe4, 0x59, 0xdf, 0xc9, 0xe9,
+ 0xb1, 0xea, 0x29, 0x28, 0x3c, 0x5c, 0xae, 0x72,
+ 0x84, 0xb6, 0xc6, 0xeb, 0x0c, 0x27, 0x07, 0x74,
+ 0x90, 0x0d, 0x31, 0xb0, 0x00, 0x77, 0xe9, 0x40,
+ 0x70, 0x6f, 0x68, 0xa7, 0xfd, 0x06, 0xec, 0x4b,
+ 0xc0, 0xb7, 0xac, 0xbc, 0x33, 0xb7, 0x6d, 0x0a,
+ 0xbd, 0x12, 0x1b, 0x59, 0xcb, 0xdd, 0x32, 0xf5,
+ 0x1d, 0x94, 0x57, 0x76, 0x9e, 0x0c, 0x18, 0x98,
+ 0x71, 0xd7, 0x2a, 0xdb, 0x0b, 0x7b, 0xa7, 0x71,
+ 0xb7, 0x67, 0x81, 0x23, 0x96, 0xae, 0xb9, 0x7e,
+ 0x32, 0x43, 0x92, 0x8a, 0x19, 0xa0, 0xc4, 0xd4,
+ 0x3b, 0x57, 0xf9, 0x4a, 0x2c, 0xfb, 0x51, 0x46,
+ 0xbb, 0xcb, 0x5d, 0xb3, 0xef, 0x13, 0x93, 0x6e,
+ 0x68, 0x42, 0x54, 0x57, 0xd3, 0x6a, 0x3a, 0x8f,
+ 0x9d, 0x66, 0xbf, 0xbd, 0x36, 0x23, 0xf5, 0x93,
+ 0x83, 0x7b, 0x9c, 0xc0, 0xdd, 0xc5, 0x49, 0xc0,
+ 0x64, 0xed, 0x07, 0x12, 0xb3, 0xe6, 0xe4, 0xe5,
+ 0x38, 0x95, 0x23, 0xb1, 0xa0, 0x3b, 0x1a, 0x61,
+ 0xda, 0x17, 0xac, 0xc3, 0x58, 0xdd, 0x74, 0x64,
+ 0x22, 0x11, 0xe8, 0x32, 0x1d, 0x16, 0x93, 0x85,
+ 0x99, 0xa5, 0x9c, 0x34, 0x55, 0xb1, 0xe9, 0x20,
+ 0x72, 0xc9, 0x28, 0x7b, 0x79, 0x00, 0xa1, 0xa6,
+ 0xa3, 0x27, 0x40, 0x18, 0x8a, 0x54, 0xe0, 0xcc,
+ 0xe8, 0x4e, 0x8e, 0x43, 0x96, 0xe7, 0x3f, 0xc8,
+ 0xe9, 0xb2, 0xf9, 0xc9, 0xda, 0x04, 0x71, 0x50,
+ 0x47, 0xe4, 0xaa, 0xce, 0xa2, 0x30, 0xc8, 0xe4,
+ 0xac, 0xc7, 0x0d, 0x06, 0x2e, 0xe6, 0xe8, 0x80,
+ 0x36, 0x29, 0x9e, 0x01, 0xb8, 0xc3, 0xf0, 0xa0,
+ 0x5d, 0x7a, 0xca, 0x4d, 0xa0, 0x57, 0xbd, 0x2a,
+ 0x45, 0xa7, 0x7f, 0x9c, 0x93, 0x07, 0x8f, 0x35,
+ 0x67, 0x92, 0xe3, 0xe9, 0x7f, 0xa8, 0x61, 0x43,
+ 0x9e, 0x25, 0x4f, 0x33, 0x76, 0x13, 0x6e, 0x12,
+ 0xb9, 0xdd, 0xa4, 0x7c, 0x08, 0x9f, 0x7c, 0xe7,
+ 0x0a, 0x8d, 0x84, 0x06, 0xa4, 0x33, 0x17, 0x34,
+ 0x5e, 0x10, 0x7c, 0xc0, 0xa8, 0x3d, 0x1f, 0x42,
+ 0x20, 0x51, 0x65, 0x5d, 0x09, 0xc3, 0xaa, 0xc0,
+ 0xc8, 0x0d, 0xf0, 0x79, 0xbc, 0x20, 0x1b, 0x95,
+ 0xe7, 0x06, 0x7d, 0x47, 0x20, 0x03, 0x1a, 0x74,
+ 0xdd, 0xe2, 0xd4, 0xae, 0x38, 0x71, 0x9b, 0xf5,
+ 0x80, 0xec, 0x08, 0x4e, 0x56, 0xba, 0x76, 0x12,
+ 0x1a, 0xdf, 0x48, 0xf3, 0xae, 0xb3, 0xe6, 0xe6,
+ 0xbe, 0xc0, 0x91, 0x2e, 0x01, 0xb3, 0x01, 0x86,
+ 0xa2, 0xb9, 0x52, 0xd1, 0x21, 0xae, 0xd4, 0x97,
+ 0x1d, 0xef, 0x41, 0x12, 0x95, 0x3d, 0x48, 0x45,
+ 0x1c, 0x56, 0x32, 0x8f, 0xb8, 0x43, 0xbb, 0x19,
+ 0xf3, 0xca, 0xe9, 0xeb, 0x6d, 0x84, 0xbe, 0x86,
+ 0x06, 0xe2, 0x36, 0xb2, 0x62, 0x9d, 0xd3, 0x4c,
+ 0x48, 0x18, 0x54, 0x13, 0x4e, 0xcf, 0xfd, 0xba,
+ 0x84, 0xb9, 0x30, 0x53, 0xcf, 0xfb, 0xb9, 0x29,
+ 0x8f, 0xdc, 0x9f, 0xef, 0x60, 0x0b, 0x64, 0xf6,
+ 0x8b, 0xee, 0xa6, 0x91, 0xc2, 0x41, 0x6c, 0xf6,
+ 0xfa, 0x79, 0x67, 0x4b, 0xc1, 0x3f, 0xaf, 0x09,
+ 0x81, 0xd4, 0x5d, 0xcb, 0x09, 0xdf, 0x36, 0x31,
+ 0xc0, 0x14, 0x3c, 0x7c, 0x0e, 0x65, 0x95, 0x99,
+ 0x6d, 0xa3, 0xf4, 0xd7, 0x38, 0xee, 0x1a, 0x2b,
+ 0x37, 0xe2, 0xa4, 0x3b, 0x4b, 0xd0, 0x65, 0xca,
+ 0xf8, 0xc3, 0xe8, 0x15, 0x20, 0xef, 0xf2, 0x00,
+ 0xfd, 0x01, 0x09, 0xc5, 0xc8, 0x17, 0x04, 0x93,
+ 0xd0, 0x93, 0x03, 0x55, 0xc5, 0xfe, 0x32, 0xa3,
+ 0x3e, 0x28, 0x2d, 0x3b, 0x93, 0x8a, 0xcc, 0x07,
+ 0x72, 0x80, 0x8b, 0x74, 0x16, 0x24, 0xbb, 0xda,
+ 0x94, 0x39, 0x30, 0x8f, 0xb1, 0xcd, 0x4a, 0x90,
+ 0x92, 0x7c, 0x14, 0x8f, 0x95, 0x4e, 0xac, 0x9b,
+ 0xd8, 0x8f, 0x1a, 0x87, 0xa4, 0x32, 0x27, 0x8a,
+ 0xba, 0xf7, 0x41, 0xcf, 0x84, 0x37, 0x19, 0xe6,
+ 0x06, 0xf5, 0x0e, 0xcf, 0x36, 0xf5, 0x9e, 0x6c,
+ 0xde, 0xbc, 0xff, 0x64, 0x7e, 0x4e, 0x59, 0x57,
+ 0x48, 0xfe, 0x14, 0xf7, 0x9c, 0x93, 0x5d, 0x15,
+ 0xad, 0xcc, 0x11, 0xb1, 0x17, 0x18, 0xb2, 0x7e,
+ 0xcc, 0xab, 0xe9, 0xce, 0x7d, 0x77, 0x5b, 0x51,
+ 0x1b, 0x1e, 0x20, 0xa8, 0x32, 0x06, 0x0e, 0x75,
+ 0x93, 0xac, 0xdb, 0x35, 0x37, 0x1f, 0xe9, 0x19,
+ 0x1d, 0xb4, 0x71, 0x97, 0xd6, 0x4e, 0x2c, 0x08,
+ 0xa5, 0x13, 0xf9, 0x0e, 0x7e, 0x78, 0x6e, 0x14,
+ 0xe0, 0xa9, 0xb9, 0x96, 0x4c, 0x80, 0x82, 0xba,
+ 0x17, 0xb3, 0x9d, 0x69, 0xb0, 0x84, 0x46, 0xff,
+ 0xf9, 0x52, 0x79, 0x94, 0x58, 0x3a, 0x62, 0x90,
+ 0x15, 0x35, 0x71, 0x10, 0x37, 0xed, 0xa1, 0x8e,
+ 0x53, 0x6e, 0xf4, 0x26, 0x57, 0x93, 0x15, 0x93,
+ 0xf6, 0x81, 0x2c, 0x5a, 0x10, 0xda, 0x92, 0xad,
+ 0x2f, 0xdb, 0x28, 0x31, 0x2d, 0x55, 0x04, 0xd2,
+ 0x06, 0x28, 0x8c, 0x1e, 0xdc, 0xea, 0x54, 0xac,
+ 0xff, 0xb7, 0x6c, 0x30, 0x15, 0xd4, 0xb4, 0x0d,
+ 0x00, 0x93, 0x57, 0xdd, 0xd2, 0x07, 0x07, 0x06,
+ 0xd9, 0x43, 0x9b, 0xcd, 0x3a, 0xf4, 0x7d, 0x4c,
+ 0x36, 0x5d, 0x23, 0xa2, 0xcc, 0x57, 0x40, 0x91,
+ 0xe9, 0x2c, 0x2f, 0x2c, 0xd5, 0x30, 0x9b, 0x17,
+ 0xb0, 0xc9, 0xf7, 0xa7, 0x2f, 0xd1, 0x93, 0x20,
+ 0x6b, 0xc6, 0xc1, 0xe4, 0x6f, 0xcb, 0xd1, 0xe7,
+ 0x09, 0x0f, 0x9e, 0xdc, 0xaa, 0x9f, 0x2f, 0xdf,
+ 0x56, 0x9f, 0xd4, 0x33, 0x04, 0xaf, 0xd3, 0x6c,
+ 0x58, 0x61, 0xf0, 0x30, 0xec, 0xf2, 0x7f, 0xf2,
+ 0x9c, 0xdf, 0x39, 0xbb, 0x6f, 0xa2, 0x8c, 0x7e,
+ 0xc4, 0x22, 0x51, 0x71, 0xc0, 0x4d, 0x14, 0x1a,
+ 0xc4, 0xcd, 0x04, 0xd9, 0x87, 0x08, 0x50, 0x05,
+ 0xcc, 0xaf, 0xf6, 0xf0, 0x8f, 0x92, 0x54, 0x58,
+ 0xc2, 0xc7, 0x09, 0x7a, 0x59, 0x02, 0x05, 0xe8,
+ 0xb0, 0x86, 0xd9, 0xbf, 0x7b, 0x35, 0x51, 0x4d,
+ 0xaf, 0x08, 0x97, 0x2c, 0x65, 0xda, 0x2a, 0x71,
+ 0x3a, 0xa8, 0x51, 0xcc, 0xf2, 0x73, 0x27, 0xc3,
+ 0xfd, 0x62, 0xcf, 0xe3, 0xb2, 0xca, 0xcb, 0xbe,
+ 0x1a, 0x0a, 0xa1, 0x34, 0x7b, 0x77, 0xc4, 0x62,
+ 0x68, 0x78, 0x5f, 0x94, 0x07, 0x04, 0x65, 0x16,
+ 0x4b, 0x61, 0xcb, 0xff, 0x75, 0x26, 0x50, 0x66,
+ 0x1f, 0x6e, 0x93, 0xf8, 0xc5, 0x51, 0xeb, 0xa4,
+ 0x4a, 0x48, 0x68, 0x6b, 0xe2, 0x5e, 0x44, 0xb2,
+ 0x50, 0x2c, 0x6c, 0xae, 0x79, 0x4e, 0x66, 0x35,
+ 0x81, 0x50, 0xac, 0xbc, 0x3f, 0xb1, 0x0c, 0xf3,
+ 0x05, 0x3c, 0x4a, 0xa3, 0x6c, 0x2a, 0x79, 0xb4,
+ 0xb7, 0xab, 0xca, 0xc7, 0x9b, 0x8e, 0xcd, 0x5f,
+ 0x11, 0x03, 0xcb, 0x30, 0xa3, 0xab, 0xda, 0xfe,
+ 0x64, 0xb9, 0xbb, 0xd8, 0x5e, 0x3a, 0x1a, 0x56,
+ 0xe5, 0x05, 0x48, 0x90, 0x1e, 0x61, 0x69, 0x1b,
+ 0x22, 0xe6, 0x1a, 0x3c, 0x75, 0xad, 0x1f, 0x37,
+ 0x28, 0xdc, 0xe4, 0x6d, 0xbd, 0x42, 0xdc, 0xd3,
+ 0xc8, 0xb6, 0x1c, 0x48, 0xfe, 0x94, 0x77, 0x7f,
+ 0xbd, 0x62, 0xac, 0xa3, 0x47, 0x27, 0xcf, 0x5f,
+ 0xd9, 0xdb, 0xaf, 0xec, 0xf7, 0x5e, 0xc1, 0xb0,
+ 0x9d, 0x01, 0x26, 0x99, 0x7e, 0x8f, 0x03, 0x70,
+ 0xb5, 0x42, 0xbe, 0x67, 0x28, 0x1b, 0x7c, 0xbd,
+ 0x61, 0x21, 0x97, 0xcc, 0x5c, 0xe1, 0x97, 0x8f,
+ 0x8d, 0xde, 0x2b, 0xaa, 0xa7, 0x71, 0x1d, 0x1e,
+ 0x02, 0x73, 0x70, 0x58, 0x32, 0x5b, 0x1d, 0x67,
+ 0x3d, 0xe0, 0x74, 0x4f, 0x03, 0xf2, 0x70, 0x51,
+ 0x79, 0xf1, 0x61, 0x70, 0x15, 0x74, 0x9d, 0x23,
+ 0x89, 0xde, 0xac, 0xfd, 0xde, 0xd0, 0x1f, 0xc3,
+ 0x87, 0x44, 0x35, 0x4b, 0xe5, 0xb0, 0x60, 0xc5,
+ 0x22, 0xe4, 0x9e, 0xca, 0xeb, 0xd5, 0x3a, 0x09,
+ 0x45, 0xa4, 0xdb, 0xfa, 0x3f, 0xeb, 0x1b, 0xc7,
+ 0xc8, 0x14, 0x99, 0x51, 0x92, 0x10, 0xed, 0xed,
+ 0x28, 0xe0, 0xa1, 0xf8, 0x26, 0xcf, 0xcd, 0xcb,
+ 0x63, 0xa1, 0x3b, 0xe3, 0xdf, 0x7e, 0xfe, 0xa6,
+ 0xf0, 0x81, 0x9a, 0xbf, 0x55, 0xde, 0x54, 0xd5,
+ 0x56, 0x60, 0x98, 0x10, 0x68, 0xf4, 0x38, 0x96,
+ 0x8e, 0x6f, 0x1d, 0x44, 0x7f, 0xd6, 0x2f, 0xfe,
+ 0x55, 0xfb, 0x0c, 0x7e, 0x67, 0xe2, 0x61, 0x44,
+ 0xed, 0xf2, 0x35, 0x30, 0x5d, 0xe9, 0xc7, 0xd6,
+ 0x6d, 0xe0, 0xa0, 0xed, 0xf3, 0xfc, 0xd8, 0x3e,
+ 0x0a, 0x7b, 0xcd, 0xaf, 0x65, 0x68, 0x18, 0xc0,
+ 0xec, 0x04, 0x1c, 0x74, 0x6d, 0xe2, 0x6e, 0x79,
+ 0xd4, 0x11, 0x2b, 0x62, 0xd5, 0x27, 0xad, 0x4f,
+ 0x01, 0x59, 0x73, 0xcc, 0x6a, 0x53, 0xfb, 0x2d,
+ 0xd5, 0x4e, 0x99, 0x21, 0x65, 0x4d, 0xf5, 0x82,
+ 0xf7, 0xd8, 0x42, 0xce, 0x6f, 0x3d, 0x36, 0x47,
+ 0xf1, 0x05, 0x16, 0xe8, 0x1b, 0x6a, 0x8f, 0x93,
+ 0xf2, 0x8f, 0x37, 0x40, 0x12, 0x28, 0xa3, 0xe6,
+ 0xb9, 0x17, 0x4a, 0x1f, 0xb1, 0xd1, 0x66, 0x69,
+ 0x86, 0xc4, 0xfc, 0x97, 0xae, 0x3f, 0x8f, 0x1e,
+ 0x2b, 0xdf, 0xcd, 0xf9, 0x3c
+};
+static const u8 enc_assoc011[] __initconst = {
+ 0xd6, 0x31, 0xda, 0x5d, 0x42, 0x5e, 0xd7
+};
+static const u8 enc_nonce011[] __initconst = {
+ 0xfd, 0x87, 0xd4, 0xd8, 0x62, 0xfd, 0xec, 0xaa
+};
+static const u8 enc_key011[] __initconst = {
+ 0x35, 0x4e, 0xb5, 0x70, 0x50, 0x42, 0x8a, 0x85,
+ 0xf2, 0xfb, 0xed, 0x7b, 0xd0, 0x9e, 0x97, 0xca,
+ 0xfa, 0x98, 0x66, 0x63, 0xee, 0x37, 0xcc, 0x52,
+ 0xfe, 0xd1, 0xdf, 0x95, 0x15, 0x34, 0x29, 0x38
+};
+
+static const u8 enc_input012[] __initconst = {
+ 0x74, 0xa6, 0x3e, 0xe4, 0xb1, 0xcb, 0xaf, 0xb0,
+ 0x40, 0xe5, 0x0f, 0x9e, 0xf1, 0xf2, 0x89, 0xb5,
+ 0x42, 0x34, 0x8a, 0xa1, 0x03, 0xb7, 0xe9, 0x57,
+ 0x46, 0xbe, 0x20, 0xe4, 0x6e, 0xb0, 0xeb, 0xff,
+ 0xea, 0x07, 0x7e, 0xef, 0xe2, 0x55, 0x9f, 0xe5,
+ 0x78, 0x3a, 0xb7, 0x83, 0xc2, 0x18, 0x40, 0x7b,
+ 0xeb, 0xcd, 0x81, 0xfb, 0x90, 0x12, 0x9e, 0x46,
+ 0xa9, 0xd6, 0x4a, 0xba, 0xb0, 0x62, 0xdb, 0x6b,
+ 0x99, 0xc4, 0xdb, 0x54, 0x4b, 0xb8, 0xa5, 0x71,
+ 0xcb, 0xcd, 0x63, 0x32, 0x55, 0xfb, 0x31, 0xf0,
+ 0x38, 0xf5, 0xbe, 0x78, 0xe4, 0x45, 0xce, 0x1b,
+ 0x6a, 0x5b, 0x0e, 0xf4, 0x16, 0xe4, 0xb1, 0x3d,
+ 0xf6, 0x63, 0x7b, 0xa7, 0x0c, 0xde, 0x6f, 0x8f,
+ 0x74, 0xdf, 0xe0, 0x1e, 0x9d, 0xce, 0x8f, 0x24,
+ 0xef, 0x23, 0x35, 0x33, 0x7b, 0x83, 0x34, 0x23,
+ 0x58, 0x74, 0x14, 0x77, 0x1f, 0xc2, 0x4f, 0x4e,
+ 0xc6, 0x89, 0xf9, 0x52, 0x09, 0x37, 0x64, 0x14,
+ 0xc4, 0x01, 0x6b, 0x9d, 0x77, 0xe8, 0x90, 0x5d,
+ 0xa8, 0x4a, 0x2a, 0xef, 0x5c, 0x7f, 0xeb, 0xbb,
+ 0xb2, 0xc6, 0x93, 0x99, 0x66, 0xdc, 0x7f, 0xd4,
+ 0x9e, 0x2a, 0xca, 0x8d, 0xdb, 0xe7, 0x20, 0xcf,
+ 0xe4, 0x73, 0xae, 0x49, 0x7d, 0x64, 0x0f, 0x0e,
+ 0x28, 0x46, 0xa9, 0xa8, 0x32, 0xe4, 0x0e, 0xf6,
+ 0x51, 0x53, 0xb8, 0x3c, 0xb1, 0xff, 0xa3, 0x33,
+ 0x41, 0x75, 0xff, 0xf1, 0x6f, 0xf1, 0xfb, 0xbb,
+ 0x83, 0x7f, 0x06, 0x9b, 0xe7, 0x1b, 0x0a, 0xe0,
+ 0x5c, 0x33, 0x60, 0x5b, 0xdb, 0x5b, 0xed, 0xfe,
+ 0xa5, 0x16, 0x19, 0x72, 0xa3, 0x64, 0x23, 0x00,
+ 0x02, 0xc7, 0xf3, 0x6a, 0x81, 0x3e, 0x44, 0x1d,
+ 0x79, 0x15, 0x5f, 0x9a, 0xde, 0xe2, 0xfd, 0x1b,
+ 0x73, 0xc1, 0xbc, 0x23, 0xba, 0x31, 0xd2, 0x50,
+ 0xd5, 0xad, 0x7f, 0x74, 0xa7, 0xc9, 0xf8, 0x3e,
+ 0x2b, 0x26, 0x10, 0xf6, 0x03, 0x36, 0x74, 0xe4,
+ 0x0e, 0x6a, 0x72, 0xb7, 0x73, 0x0a, 0x42, 0x28,
+ 0xc2, 0xad, 0x5e, 0x03, 0xbe, 0xb8, 0x0b, 0xa8,
+ 0x5b, 0xd4, 0xb8, 0xba, 0x52, 0x89, 0xb1, 0x9b,
+ 0xc1, 0xc3, 0x65, 0x87, 0xed, 0xa5, 0xf4, 0x86,
+ 0xfd, 0x41, 0x80, 0x91, 0x27, 0x59, 0x53, 0x67,
+ 0x15, 0x78, 0x54, 0x8b, 0x2d, 0x3d, 0xc7, 0xff,
+ 0x02, 0x92, 0x07, 0x5f, 0x7a, 0x4b, 0x60, 0x59,
+ 0x3c, 0x6f, 0x5c, 0xd8, 0xec, 0x95, 0xd2, 0xfe,
+ 0xa0, 0x3b, 0xd8, 0x3f, 0xd1, 0x69, 0xa6, 0xd6,
+ 0x41, 0xb2, 0xf4, 0x4d, 0x12, 0xf4, 0x58, 0x3e,
+ 0x66, 0x64, 0x80, 0x31, 0x9b, 0xa8, 0x4c, 0x8b,
+ 0x07, 0xb2, 0xec, 0x66, 0x94, 0x66, 0x47, 0x50,
+ 0x50, 0x5f, 0x18, 0x0b, 0x0e, 0xd6, 0xc0, 0x39,
+ 0x21, 0x13, 0x9e, 0x33, 0xbc, 0x79, 0x36, 0x02,
+ 0x96, 0x70, 0xf0, 0x48, 0x67, 0x2f, 0x26, 0xe9,
+ 0x6d, 0x10, 0xbb, 0xd6, 0x3f, 0xd1, 0x64, 0x7a,
+ 0x2e, 0xbe, 0x0c, 0x61, 0xf0, 0x75, 0x42, 0x38,
+ 0x23, 0xb1, 0x9e, 0x9f, 0x7c, 0x67, 0x66, 0xd9,
+ 0x58, 0x9a, 0xf1, 0xbb, 0x41, 0x2a, 0x8d, 0x65,
+ 0x84, 0x94, 0xfc, 0xdc, 0x6a, 0x50, 0x64, 0xdb,
+ 0x56, 0x33, 0x76, 0x00, 0x10, 0xed, 0xbe, 0xd2,
+ 0x12, 0xf6, 0xf6, 0x1b, 0xa2, 0x16, 0xde, 0xae,
+ 0x31, 0x95, 0xdd, 0xb1, 0x08, 0x7e, 0x4e, 0xee,
+ 0xe7, 0xf9, 0xa5, 0xfb, 0x5b, 0x61, 0x43, 0x00,
+ 0x40, 0xf6, 0x7e, 0x02, 0x04, 0x32, 0x4e, 0x0c,
+ 0xe2, 0x66, 0x0d, 0xd7, 0x07, 0x98, 0x0e, 0xf8,
+ 0x72, 0x34, 0x6d, 0x95, 0x86, 0xd7, 0xcb, 0x31,
+ 0x54, 0x47, 0xd0, 0x38, 0x29, 0x9c, 0x5a, 0x68,
+ 0xd4, 0x87, 0x76, 0xc9, 0xe7, 0x7e, 0xe3, 0xf4,
+ 0x81, 0x6d, 0x18, 0xcb, 0xc9, 0x05, 0xaf, 0xa0,
+ 0xfb, 0x66, 0xf7, 0xf1, 0x1c, 0xc6, 0x14, 0x11,
+ 0x4f, 0x2b, 0x79, 0x42, 0x8b, 0xbc, 0xac, 0xe7,
+ 0x6c, 0xfe, 0x0f, 0x58, 0xe7, 0x7c, 0x78, 0x39,
+ 0x30, 0xb0, 0x66, 0x2c, 0x9b, 0x6d, 0x3a, 0xe1,
+ 0xcf, 0xc9, 0xa4, 0x0e, 0x6d, 0x6d, 0x8a, 0xa1,
+ 0x3a, 0xe7, 0x28, 0xd4, 0x78, 0x4c, 0xa6, 0xa2,
+ 0x2a, 0xa6, 0x03, 0x30, 0xd7, 0xa8, 0x25, 0x66,
+ 0x87, 0x2f, 0x69, 0x5c, 0x4e, 0xdd, 0xa5, 0x49,
+ 0x5d, 0x37, 0x4a, 0x59, 0xc4, 0xaf, 0x1f, 0xa2,
+ 0xe4, 0xf8, 0xa6, 0x12, 0x97, 0xd5, 0x79, 0xf5,
+ 0xe2, 0x4a, 0x2b, 0x5f, 0x61, 0xe4, 0x9e, 0xe3,
+ 0xee, 0xb8, 0xa7, 0x5b, 0x2f, 0xf4, 0x9e, 0x6c,
+ 0xfb, 0xd1, 0xc6, 0x56, 0x77, 0xba, 0x75, 0xaa,
+ 0x3d, 0x1a, 0xa8, 0x0b, 0xb3, 0x68, 0x24, 0x00,
+ 0x10, 0x7f, 0xfd, 0xd7, 0xa1, 0x8d, 0x83, 0x54,
+ 0x4f, 0x1f, 0xd8, 0x2a, 0xbe, 0x8a, 0x0c, 0x87,
+ 0xab, 0xa2, 0xde, 0xc3, 0x39, 0xbf, 0x09, 0x03,
+ 0xa5, 0xf3, 0x05, 0x28, 0xe1, 0xe1, 0xee, 0x39,
+ 0x70, 0x9c, 0xd8, 0x81, 0x12, 0x1e, 0x02, 0x40,
+ 0xd2, 0x6e, 0xf0, 0xeb, 0x1b, 0x3d, 0x22, 0xc6,
+ 0xe5, 0xe3, 0xb4, 0x5a, 0x98, 0xbb, 0xf0, 0x22,
+ 0x28, 0x8d, 0xe5, 0xd3, 0x16, 0x48, 0x24, 0xa5,
+ 0xe6, 0x66, 0x0c, 0xf9, 0x08, 0xf9, 0x7e, 0x1e,
+ 0xe1, 0x28, 0x26, 0x22, 0xc7, 0xc7, 0x0a, 0x32,
+ 0x47, 0xfa, 0xa3, 0xbe, 0x3c, 0xc4, 0xc5, 0x53,
+ 0x0a, 0xd5, 0x94, 0x4a, 0xd7, 0x93, 0xd8, 0x42,
+ 0x99, 0xb9, 0x0a, 0xdb, 0x56, 0xf7, 0xb9, 0x1c,
+ 0x53, 0x4f, 0xfa, 0xd3, 0x74, 0xad, 0xd9, 0x68,
+ 0xf1, 0x1b, 0xdf, 0x61, 0xc6, 0x5e, 0xa8, 0x48,
+ 0xfc, 0xd4, 0x4a, 0x4c, 0x3c, 0x32, 0xf7, 0x1c,
+ 0x96, 0x21, 0x9b, 0xf9, 0xa3, 0xcc, 0x5a, 0xce,
+ 0xd5, 0xd7, 0x08, 0x24, 0xf6, 0x1c, 0xfd, 0xdd,
+ 0x38, 0xc2, 0x32, 0xe9, 0xb8, 0xe7, 0xb6, 0xfa,
+ 0x9d, 0x45, 0x13, 0x2c, 0x83, 0xfd, 0x4a, 0x69,
+ 0x82, 0xcd, 0xdc, 0xb3, 0x76, 0x0c, 0x9e, 0xd8,
+ 0xf4, 0x1b, 0x45, 0x15, 0xb4, 0x97, 0xe7, 0x58,
+ 0x34, 0xe2, 0x03, 0x29, 0x5a, 0xbf, 0xb6, 0xe0,
+ 0x5d, 0x13, 0xd9, 0x2b, 0xb4, 0x80, 0xb2, 0x45,
+ 0x81, 0x6a, 0x2e, 0x6c, 0x89, 0x7d, 0xee, 0xbb,
+ 0x52, 0xdd, 0x1f, 0x18, 0xe7, 0x13, 0x6b, 0x33,
+ 0x0e, 0xea, 0x36, 0x92, 0x77, 0x7b, 0x6d, 0x9c,
+ 0x5a, 0x5f, 0x45, 0x7b, 0x7b, 0x35, 0x62, 0x23,
+ 0xd1, 0xbf, 0x0f, 0xd0, 0x08, 0x1b, 0x2b, 0x80,
+ 0x6b, 0x7e, 0xf1, 0x21, 0x47, 0xb0, 0x57, 0xd1,
+ 0x98, 0x72, 0x90, 0x34, 0x1c, 0x20, 0x04, 0xff,
+ 0x3d, 0x5c, 0xee, 0x0e, 0x57, 0x5f, 0x6f, 0x24,
+ 0x4e, 0x3c, 0xea, 0xfc, 0xa5, 0xa9, 0x83, 0xc9,
+ 0x61, 0xb4, 0x51, 0x24, 0xf8, 0x27, 0x5e, 0x46,
+ 0x8c, 0xb1, 0x53, 0x02, 0x96, 0x35, 0xba, 0xb8,
+ 0x4c, 0x71, 0xd3, 0x15, 0x59, 0x35, 0x22, 0x20,
+ 0xad, 0x03, 0x9f, 0x66, 0x44, 0x3b, 0x9c, 0x35,
+ 0x37, 0x1f, 0x9b, 0xbb, 0xf3, 0xdb, 0x35, 0x63,
+ 0x30, 0x64, 0xaa, 0xa2, 0x06, 0xa8, 0x5d, 0xbb,
+ 0xe1, 0x9f, 0x70, 0xec, 0x82, 0x11, 0x06, 0x36,
+ 0xec, 0x8b, 0x69, 0x66, 0x24, 0x44, 0xc9, 0x4a,
+ 0x57, 0xbb, 0x9b, 0x78, 0x13, 0xce, 0x9c, 0x0c,
+ 0xba, 0x92, 0x93, 0x63, 0xb8, 0xe2, 0x95, 0x0f,
+ 0x0f, 0x16, 0x39, 0x52, 0xfd, 0x3a, 0x6d, 0x02,
+ 0x4b, 0xdf, 0x13, 0xd3, 0x2a, 0x22, 0xb4, 0x03,
+ 0x7c, 0x54, 0x49, 0x96, 0x68, 0x54, 0x10, 0xfa,
+ 0xef, 0xaa, 0x6c, 0xe8, 0x22, 0xdc, 0x71, 0x16,
+ 0x13, 0x1a, 0xf6, 0x28, 0xe5, 0x6d, 0x77, 0x3d,
+ 0xcd, 0x30, 0x63, 0xb1, 0x70, 0x52, 0xa1, 0xc5,
+ 0x94, 0x5f, 0xcf, 0xe8, 0xb8, 0x26, 0x98, 0xf7,
+ 0x06, 0xa0, 0x0a, 0x70, 0xfa, 0x03, 0x80, 0xac,
+ 0xc1, 0xec, 0xd6, 0x4c, 0x54, 0xd7, 0xfe, 0x47,
+ 0xb6, 0x88, 0x4a, 0xf7, 0x71, 0x24, 0xee, 0xf3,
+ 0xd2, 0xc2, 0x4a, 0x7f, 0xfe, 0x61, 0xc7, 0x35,
+ 0xc9, 0x37, 0x67, 0xcb, 0x24, 0x35, 0xda, 0x7e,
+ 0xca, 0x5f, 0xf3, 0x8d, 0xd4, 0x13, 0x8e, 0xd6,
+ 0xcb, 0x4d, 0x53, 0x8f, 0x53, 0x1f, 0xc0, 0x74,
+ 0xf7, 0x53, 0xb9, 0x5e, 0x23, 0x37, 0xba, 0x6e,
+ 0xe3, 0x9d, 0x07, 0x55, 0x25, 0x7b, 0xe6, 0x2a,
+ 0x64, 0xd1, 0x32, 0xdd, 0x54, 0x1b, 0x4b, 0xc0,
+ 0xe1, 0xd7, 0x69, 0x58, 0xf8, 0x93, 0x29, 0xc4,
+ 0xdd, 0x23, 0x2f, 0xa5, 0xfc, 0x9d, 0x7e, 0xf8,
+ 0xd4, 0x90, 0xcd, 0x82, 0x55, 0xdc, 0x16, 0x16,
+ 0x9f, 0x07, 0x52, 0x9b, 0x9d, 0x25, 0xed, 0x32,
+ 0xc5, 0x7b, 0xdf, 0xf6, 0x83, 0x46, 0x3d, 0x65,
+ 0xb7, 0xef, 0x87, 0x7a, 0x12, 0x69, 0x8f, 0x06,
+ 0x7c, 0x51, 0x15, 0x4a, 0x08, 0xe8, 0xac, 0x9a,
+ 0x0c, 0x24, 0xa7, 0x27, 0xd8, 0x46, 0x2f, 0xe7,
+ 0x01, 0x0e, 0x1c, 0xc6, 0x91, 0xb0, 0x6e, 0x85,
+ 0x65, 0xf0, 0x29, 0x0d, 0x2e, 0x6b, 0x3b, 0xfb,
+ 0x4b, 0xdf, 0xe4, 0x80, 0x93, 0x03, 0x66, 0x46,
+ 0x3e, 0x8a, 0x6e, 0xf3, 0x5e, 0x4d, 0x62, 0x0e,
+ 0x49, 0x05, 0xaf, 0xd4, 0xf8, 0x21, 0x20, 0x61,
+ 0x1d, 0x39, 0x17, 0xf4, 0x61, 0x47, 0x95, 0xfb,
+ 0x15, 0x2e, 0xb3, 0x4f, 0xd0, 0x5d, 0xf5, 0x7d,
+ 0x40, 0xda, 0x90, 0x3c, 0x6b, 0xcb, 0x17, 0x00,
+ 0x13, 0x3b, 0x64, 0x34, 0x1b, 0xf0, 0xf2, 0xe5,
+ 0x3b, 0xb2, 0xc7, 0xd3, 0x5f, 0x3a, 0x44, 0xa6,
+ 0x9b, 0xb7, 0x78, 0x0e, 0x42, 0x5d, 0x4c, 0xc1,
+ 0xe9, 0xd2, 0xcb, 0xb7, 0x78, 0xd1, 0xfe, 0x9a,
+ 0xb5, 0x07, 0xe9, 0xe0, 0xbe, 0xe2, 0x8a, 0xa7,
+ 0x01, 0x83, 0x00, 0x8c, 0x5c, 0x08, 0xe6, 0x63,
+ 0x12, 0x92, 0xb7, 0xb7, 0xa6, 0x19, 0x7d, 0x38,
+ 0x13, 0x38, 0x92, 0x87, 0x24, 0xf9, 0x48, 0xb3,
+ 0x5e, 0x87, 0x6a, 0x40, 0x39, 0x5c, 0x3f, 0xed,
+ 0x8f, 0xee, 0xdb, 0x15, 0x82, 0x06, 0xda, 0x49,
+ 0x21, 0x2b, 0xb5, 0xbf, 0x32, 0x7c, 0x9f, 0x42,
+ 0x28, 0x63, 0xcf, 0xaf, 0x1e, 0xf8, 0xc6, 0xa0,
+ 0xd1, 0x02, 0x43, 0x57, 0x62, 0xec, 0x9b, 0x0f,
+ 0x01, 0x9e, 0x71, 0xd8, 0x87, 0x9d, 0x01, 0xc1,
+ 0x58, 0x77, 0xd9, 0xaf, 0xb1, 0x10, 0x7e, 0xdd,
+ 0xa6, 0x50, 0x96, 0xe5, 0xf0, 0x72, 0x00, 0x6d,
+ 0x4b, 0xf8, 0x2a, 0x8f, 0x19, 0xf3, 0x22, 0x88,
+ 0x11, 0x4a, 0x8b, 0x7c, 0xfd, 0xb7, 0xed, 0xe1,
+ 0xf6, 0x40, 0x39, 0xe0, 0xe9, 0xf6, 0x3d, 0x25,
+ 0xe6, 0x74, 0x3c, 0x58, 0x57, 0x7f, 0xe1, 0x22,
+ 0x96, 0x47, 0x31, 0x91, 0xba, 0x70, 0x85, 0x28,
+ 0x6b, 0x9f, 0x6e, 0x25, 0xac, 0x23, 0x66, 0x2f,
+ 0x29, 0x88, 0x28, 0xce, 0x8c, 0x5c, 0x88, 0x53,
+ 0xd1, 0x3b, 0xcc, 0x6a, 0x51, 0xb2, 0xe1, 0x28,
+ 0x3f, 0x91, 0xb4, 0x0d, 0x00, 0x3a, 0xe3, 0xf8,
+ 0xc3, 0x8f, 0xd7, 0x96, 0x62, 0x0e, 0x2e, 0xfc,
+ 0xc8, 0x6c, 0x77, 0xa6, 0x1d, 0x22, 0xc1, 0xb8,
+ 0xe6, 0x61, 0xd7, 0x67, 0x36, 0x13, 0x7b, 0xbb,
+ 0x9b, 0x59, 0x09, 0xa6, 0xdf, 0xf7, 0x6b, 0xa3,
+ 0x40, 0x1a, 0xf5, 0x4f, 0xb4, 0xda, 0xd3, 0xf3,
+ 0x81, 0x93, 0xc6, 0x18, 0xd9, 0x26, 0xee, 0xac,
+ 0xf0, 0xaa, 0xdf, 0xc5, 0x9c, 0xca, 0xc2, 0xa2,
+ 0xcc, 0x7b, 0x5c, 0x24, 0xb0, 0xbc, 0xd0, 0x6a,
+ 0x4d, 0x89, 0x09, 0xb8, 0x07, 0xfe, 0x87, 0xad,
+ 0x0a, 0xea, 0xb8, 0x42, 0xf9, 0x5e, 0xb3, 0x3e,
+ 0x36, 0x4c, 0xaf, 0x75, 0x9e, 0x1c, 0xeb, 0xbd,
+ 0xbc, 0xbb, 0x80, 0x40, 0xa7, 0x3a, 0x30, 0xbf,
+ 0xa8, 0x44, 0xf4, 0xeb, 0x38, 0xad, 0x29, 0xba,
+ 0x23, 0xed, 0x41, 0x0c, 0xea, 0xd2, 0xbb, 0x41,
+ 0x18, 0xd6, 0xb9, 0xba, 0x65, 0x2b, 0xa3, 0x91,
+ 0x6d, 0x1f, 0xa9, 0xf4, 0xd1, 0x25, 0x8d, 0x4d,
+ 0x38, 0xff, 0x64, 0xa0, 0xec, 0xde, 0xa6, 0xb6,
+ 0x79, 0xab, 0x8e, 0x33, 0x6c, 0x47, 0xde, 0xaf,
+ 0x94, 0xa4, 0xa5, 0x86, 0x77, 0x55, 0x09, 0x92,
+ 0x81, 0x31, 0x76, 0xc7, 0x34, 0x22, 0x89, 0x8e,
+ 0x3d, 0x26, 0x26, 0xd7, 0xfc, 0x1e, 0x16, 0x72,
+ 0x13, 0x33, 0x63, 0xd5, 0x22, 0xbe, 0xb8, 0x04,
+ 0x34, 0x84, 0x41, 0xbb, 0x80, 0xd0, 0x9f, 0x46,
+ 0x48, 0x07, 0xa7, 0xfc, 0x2b, 0x3a, 0x75, 0x55,
+ 0x8c, 0xc7, 0x6a, 0xbd, 0x7e, 0x46, 0x08, 0x84,
+ 0x0f, 0xd5, 0x74, 0xc0, 0x82, 0x8e, 0xaa, 0x61,
+ 0x05, 0x01, 0xb2, 0x47, 0x6e, 0x20, 0x6a, 0x2d,
+ 0x58, 0x70, 0x48, 0x32, 0xa7, 0x37, 0xd2, 0xb8,
+ 0x82, 0x1a, 0x51, 0xb9, 0x61, 0xdd, 0xfd, 0x9d,
+ 0x6b, 0x0e, 0x18, 0x97, 0xf8, 0x45, 0x5f, 0x87,
+ 0x10, 0xcf, 0x34, 0x72, 0x45, 0x26, 0x49, 0x70,
+ 0xe7, 0xa3, 0x78, 0xe0, 0x52, 0x89, 0x84, 0x94,
+ 0x83, 0x82, 0xc2, 0x69, 0x8f, 0xe3, 0xe1, 0x3f,
+ 0x60, 0x74, 0x88, 0xc4, 0xf7, 0x75, 0x2c, 0xfb,
+ 0xbd, 0xb6, 0xc4, 0x7e, 0x10, 0x0a, 0x6c, 0x90,
+ 0x04, 0x9e, 0xc3, 0x3f, 0x59, 0x7c, 0xce, 0x31,
+ 0x18, 0x60, 0x57, 0x73, 0x46, 0x94, 0x7d, 0x06,
+ 0xa0, 0x6d, 0x44, 0xec, 0xa2, 0x0a, 0x9e, 0x05,
+ 0x15, 0xef, 0xca, 0x5c, 0xbf, 0x00, 0xeb, 0xf7,
+ 0x3d, 0x32, 0xd4, 0xa5, 0xef, 0x49, 0x89, 0x5e,
+ 0x46, 0xb0, 0xa6, 0x63, 0x5b, 0x8a, 0x73, 0xae,
+ 0x6f, 0xd5, 0x9d, 0xf8, 0x4f, 0x40, 0xb5, 0xb2,
+ 0x6e, 0xd3, 0xb6, 0x01, 0xa9, 0x26, 0xa2, 0x21,
+ 0xcf, 0x33, 0x7a, 0x3a, 0xa4, 0x23, 0x13, 0xb0,
+ 0x69, 0x6a, 0xee, 0xce, 0xd8, 0x9d, 0x01, 0x1d,
+ 0x50, 0xc1, 0x30, 0x6c, 0xb1, 0xcd, 0xa0, 0xf0,
+ 0xf0, 0xa2, 0x64, 0x6f, 0xbb, 0xbf, 0x5e, 0xe6,
+ 0xab, 0x87, 0xb4, 0x0f, 0x4f, 0x15, 0xaf, 0xb5,
+ 0x25, 0xa1, 0xb2, 0xd0, 0x80, 0x2c, 0xfb, 0xf9,
+ 0xfe, 0xd2, 0x33, 0xbb, 0x76, 0xfe, 0x7c, 0xa8,
+ 0x66, 0xf7, 0xe7, 0x85, 0x9f, 0x1f, 0x85, 0x57,
+ 0x88, 0xe1, 0xe9, 0x63, 0xe4, 0xd8, 0x1c, 0xa1,
+ 0xfb, 0xda, 0x44, 0x05, 0x2e, 0x1d, 0x3a, 0x1c,
+ 0xff, 0xc8, 0x3b, 0xc0, 0xfe, 0xda, 0x22, 0x0b,
+ 0x43, 0xd6, 0x88, 0x39, 0x4c, 0x4a, 0xa6, 0x69,
+ 0x18, 0x93, 0x42, 0x4e, 0xb5, 0xcc, 0x66, 0x0d,
+ 0x09, 0xf8, 0x1e, 0x7c, 0xd3, 0x3c, 0x99, 0x0d,
+ 0x50, 0x1d, 0x62, 0xe9, 0x57, 0x06, 0xbf, 0x19,
+ 0x88, 0xdd, 0xad, 0x7b, 0x4f, 0xf9, 0xc7, 0x82,
+ 0x6d, 0x8d, 0xc8, 0xc4, 0xc5, 0x78, 0x17, 0x20,
+ 0x15, 0xc5, 0x52, 0x41, 0xcf, 0x5b, 0xd6, 0x7f,
+ 0x94, 0x02, 0x41, 0xe0, 0x40, 0x22, 0x03, 0x5e,
+ 0xd1, 0x53, 0xd4, 0x86, 0xd3, 0x2c, 0x9f, 0x0f,
+ 0x96, 0xe3, 0x6b, 0x9a, 0x76, 0x32, 0x06, 0x47,
+ 0x4b, 0x11, 0xb3, 0xdd, 0x03, 0x65, 0xbd, 0x9b,
+ 0x01, 0xda, 0x9c, 0xb9, 0x7e, 0x3f, 0x6a, 0xc4,
+ 0x7b, 0xea, 0xd4, 0x3c, 0xb9, 0xfb, 0x5c, 0x6b,
+ 0x64, 0x33, 0x52, 0xba, 0x64, 0x78, 0x8f, 0xa4,
+ 0xaf, 0x7a, 0x61, 0x8d, 0xbc, 0xc5, 0x73, 0xe9,
+ 0x6b, 0x58, 0x97, 0x4b, 0xbf, 0x63, 0x22, 0xd3,
+ 0x37, 0x02, 0x54, 0xc5, 0xb9, 0x16, 0x4a, 0xf0,
+ 0x19, 0xd8, 0x94, 0x57, 0xb8, 0x8a, 0xb3, 0x16,
+ 0x3b, 0xd0, 0x84, 0x8e, 0x67, 0xa6, 0xa3, 0x7d,
+ 0x78, 0xec, 0x00
+};
+static const u8 enc_output012[] __initconst = {
+ 0x52, 0x34, 0xb3, 0x65, 0x3b, 0xb7, 0xe5, 0xd3,
+ 0xab, 0x49, 0x17, 0x60, 0xd2, 0x52, 0x56, 0xdf,
+ 0xdf, 0x34, 0x56, 0x82, 0xe2, 0xbe, 0xe5, 0xe1,
+ 0x28, 0xd1, 0x4e, 0x5f, 0x4f, 0x01, 0x7d, 0x3f,
+ 0x99, 0x6b, 0x30, 0x6e, 0x1a, 0x7c, 0x4c, 0x8e,
+ 0x62, 0x81, 0xae, 0x86, 0x3f, 0x6b, 0xd0, 0xb5,
+ 0xa9, 0xcf, 0x50, 0xf1, 0x02, 0x12, 0xa0, 0x0b,
+ 0x24, 0xe9, 0xe6, 0x72, 0x89, 0x2c, 0x52, 0x1b,
+ 0x34, 0x38, 0xf8, 0x75, 0x5f, 0xa0, 0x74, 0xe2,
+ 0x99, 0xdd, 0xa6, 0x4b, 0x14, 0x50, 0x4e, 0xf1,
+ 0xbe, 0xd6, 0x9e, 0xdb, 0xb2, 0x24, 0x27, 0x74,
+ 0x12, 0x4a, 0x78, 0x78, 0x17, 0xa5, 0x58, 0x8e,
+ 0x2f, 0xf9, 0xf4, 0x8d, 0xee, 0x03, 0x88, 0xae,
+ 0xb8, 0x29, 0xa1, 0x2f, 0x4b, 0xee, 0x92, 0xbd,
+ 0x87, 0xb3, 0xce, 0x34, 0x21, 0x57, 0x46, 0x04,
+ 0x49, 0x0c, 0x80, 0xf2, 0x01, 0x13, 0xa1, 0x55,
+ 0xb3, 0xff, 0x44, 0x30, 0x3c, 0x1c, 0xd0, 0xef,
+ 0xbc, 0x18, 0x74, 0x26, 0xad, 0x41, 0x5b, 0x5b,
+ 0x3e, 0x9a, 0x7a, 0x46, 0x4f, 0x16, 0xd6, 0x74,
+ 0x5a, 0xb7, 0x3a, 0x28, 0x31, 0xd8, 0xae, 0x26,
+ 0xac, 0x50, 0x53, 0x86, 0xf2, 0x56, 0xd7, 0x3f,
+ 0x29, 0xbc, 0x45, 0x68, 0x8e, 0xcb, 0x98, 0x64,
+ 0xdd, 0xc9, 0xba, 0xb8, 0x4b, 0x7b, 0x82, 0xdd,
+ 0x14, 0xa7, 0xcb, 0x71, 0x72, 0x00, 0x5c, 0xad,
+ 0x7b, 0x6a, 0x89, 0xa4, 0x3d, 0xbf, 0xb5, 0x4b,
+ 0x3e, 0x7c, 0x5a, 0xcf, 0xb8, 0xa1, 0xc5, 0x6e,
+ 0xc8, 0xb6, 0x31, 0x57, 0x7b, 0xdf, 0xa5, 0x7e,
+ 0xb1, 0xd6, 0x42, 0x2a, 0x31, 0x36, 0xd1, 0xd0,
+ 0x3f, 0x7a, 0xe5, 0x94, 0xd6, 0x36, 0xa0, 0x6f,
+ 0xb7, 0x40, 0x7d, 0x37, 0xc6, 0x55, 0x7c, 0x50,
+ 0x40, 0x6d, 0x29, 0x89, 0xe3, 0x5a, 0xae, 0x97,
+ 0xe7, 0x44, 0x49, 0x6e, 0xbd, 0x81, 0x3d, 0x03,
+ 0x93, 0x06, 0x12, 0x06, 0xe2, 0x41, 0x12, 0x4a,
+ 0xf1, 0x6a, 0xa4, 0x58, 0xa2, 0xfb, 0xd2, 0x15,
+ 0xba, 0xc9, 0x79, 0xc9, 0xce, 0x5e, 0x13, 0xbb,
+ 0xf1, 0x09, 0x04, 0xcc, 0xfd, 0xe8, 0x51, 0x34,
+ 0x6a, 0xe8, 0x61, 0x88, 0xda, 0xed, 0x01, 0x47,
+ 0x84, 0xf5, 0x73, 0x25, 0xf9, 0x1c, 0x42, 0x86,
+ 0x07, 0xf3, 0x5b, 0x1a, 0x01, 0xb3, 0xeb, 0x24,
+ 0x32, 0x8d, 0xf6, 0xed, 0x7c, 0x4b, 0xeb, 0x3c,
+ 0x36, 0x42, 0x28, 0xdf, 0xdf, 0xb6, 0xbe, 0xd9,
+ 0x8c, 0x52, 0xd3, 0x2b, 0x08, 0x90, 0x8c, 0xe7,
+ 0x98, 0x31, 0xe2, 0x32, 0x8e, 0xfc, 0x11, 0x48,
+ 0x00, 0xa8, 0x6a, 0x42, 0x4a, 0x02, 0xc6, 0x4b,
+ 0x09, 0xf1, 0xe3, 0x49, 0xf3, 0x45, 0x1f, 0x0e,
+ 0xbc, 0x56, 0xe2, 0xe4, 0xdf, 0xfb, 0xeb, 0x61,
+ 0xfa, 0x24, 0xc1, 0x63, 0x75, 0xbb, 0x47, 0x75,
+ 0xaf, 0xe1, 0x53, 0x16, 0x96, 0x21, 0x85, 0x26,
+ 0x11, 0xb3, 0x76, 0xe3, 0x23, 0xa1, 0x6b, 0x74,
+ 0x37, 0xd0, 0xde, 0x06, 0x90, 0x71, 0x5d, 0x43,
+ 0x88, 0x9b, 0x00, 0x54, 0xa6, 0x75, 0x2f, 0xa1,
+ 0xc2, 0x0b, 0x73, 0x20, 0x1d, 0xb6, 0x21, 0x79,
+ 0x57, 0x3f, 0xfa, 0x09, 0xbe, 0x8a, 0x33, 0xc3,
+ 0x52, 0xf0, 0x1d, 0x82, 0x31, 0xd1, 0x55, 0xb5,
+ 0x6c, 0x99, 0x25, 0xcf, 0x5c, 0x32, 0xce, 0xe9,
+ 0x0d, 0xfa, 0x69, 0x2c, 0xd5, 0x0d, 0xc5, 0x6d,
+ 0x86, 0xd0, 0x0c, 0x3b, 0x06, 0x50, 0x79, 0xe8,
+ 0xc3, 0xae, 0x04, 0xe6, 0xcd, 0x51, 0xe4, 0x26,
+ 0x9b, 0x4f, 0x7e, 0xa6, 0x0f, 0xab, 0xd8, 0xe5,
+ 0xde, 0xa9, 0x00, 0x95, 0xbe, 0xa3, 0x9d, 0x5d,
+ 0xb2, 0x09, 0x70, 0x18, 0x1c, 0xf0, 0xac, 0x29,
+ 0x23, 0x02, 0x29, 0x28, 0xd2, 0x74, 0x35, 0x57,
+ 0x62, 0x0f, 0x24, 0xea, 0x5e, 0x33, 0xc2, 0x92,
+ 0xf3, 0x78, 0x4d, 0x30, 0x1e, 0xa1, 0x99, 0xa9,
+ 0x82, 0xb0, 0x42, 0x31, 0x8d, 0xad, 0x8a, 0xbc,
+ 0xfc, 0xd4, 0x57, 0x47, 0x3e, 0xb4, 0x50, 0xdd,
+ 0x6e, 0x2c, 0x80, 0x4d, 0x22, 0xf1, 0xfb, 0x57,
+ 0xc4, 0xdd, 0x17, 0xe1, 0x8a, 0x36, 0x4a, 0xb3,
+ 0x37, 0xca, 0xc9, 0x4e, 0xab, 0xd5, 0x69, 0xc4,
+ 0xf4, 0xbc, 0x0b, 0x3b, 0x44, 0x4b, 0x29, 0x9c,
+ 0xee, 0xd4, 0x35, 0x22, 0x21, 0xb0, 0x1f, 0x27,
+ 0x64, 0xa8, 0x51, 0x1b, 0xf0, 0x9f, 0x19, 0x5c,
+ 0xfb, 0x5a, 0x64, 0x74, 0x70, 0x45, 0x09, 0xf5,
+ 0x64, 0xfe, 0x1a, 0x2d, 0xc9, 0x14, 0x04, 0x14,
+ 0xcf, 0xd5, 0x7d, 0x60, 0xaf, 0x94, 0x39, 0x94,
+ 0xe2, 0x7d, 0x79, 0x82, 0xd0, 0x65, 0x3b, 0x6b,
+ 0x9c, 0x19, 0x84, 0xb4, 0x6d, 0xb3, 0x0c, 0x99,
+ 0xc0, 0x56, 0xa8, 0xbd, 0x73, 0xce, 0x05, 0x84,
+ 0x3e, 0x30, 0xaa, 0xc4, 0x9b, 0x1b, 0x04, 0x2a,
+ 0x9f, 0xd7, 0x43, 0x2b, 0x23, 0xdf, 0xbf, 0xaa,
+ 0xd5, 0xc2, 0x43, 0x2d, 0x70, 0xab, 0xdc, 0x75,
+ 0xad, 0xac, 0xf7, 0xc0, 0xbe, 0x67, 0xb2, 0x74,
+ 0xed, 0x67, 0x10, 0x4a, 0x92, 0x60, 0xc1, 0x40,
+ 0x50, 0x19, 0x8a, 0x8a, 0x8c, 0x09, 0x0e, 0x72,
+ 0xe1, 0x73, 0x5e, 0xe8, 0x41, 0x85, 0x63, 0x9f,
+ 0x3f, 0xd7, 0x7d, 0xc4, 0xfb, 0x22, 0x5d, 0x92,
+ 0x6c, 0xb3, 0x1e, 0xe2, 0x50, 0x2f, 0x82, 0xa8,
+ 0x28, 0xc0, 0xb5, 0xd7, 0x5f, 0x68, 0x0d, 0x2c,
+ 0x2d, 0xaf, 0x7e, 0xfa, 0x2e, 0x08, 0x0f, 0x1f,
+ 0x70, 0x9f, 0xe9, 0x19, 0x72, 0x55, 0xf8, 0xfb,
+ 0x51, 0xd2, 0x33, 0x5d, 0xa0, 0xd3, 0x2b, 0x0a,
+ 0x6c, 0xbc, 0x4e, 0xcf, 0x36, 0x4d, 0xdc, 0x3b,
+ 0xe9, 0x3e, 0x81, 0x7c, 0x61, 0xdb, 0x20, 0x2d,
+ 0x3a, 0xc3, 0xb3, 0x0c, 0x1e, 0x00, 0xb9, 0x7c,
+ 0xf5, 0xca, 0x10, 0x5f, 0x3a, 0x71, 0xb3, 0xe4,
+ 0x20, 0xdb, 0x0c, 0x2a, 0x98, 0x63, 0x45, 0x00,
+ 0x58, 0xf6, 0x68, 0xe4, 0x0b, 0xda, 0x13, 0x3b,
+ 0x60, 0x5c, 0x76, 0xdb, 0xb9, 0x97, 0x71, 0xe4,
+ 0xd9, 0xb7, 0xdb, 0xbd, 0x68, 0xc7, 0x84, 0x84,
+ 0xaa, 0x7c, 0x68, 0x62, 0x5e, 0x16, 0xfc, 0xba,
+ 0x72, 0xaa, 0x9a, 0xa9, 0xeb, 0x7c, 0x75, 0x47,
+ 0x97, 0x7e, 0xad, 0xe2, 0xd9, 0x91, 0xe8, 0xe4,
+ 0xa5, 0x31, 0xd7, 0x01, 0x8e, 0xa2, 0x11, 0x88,
+ 0x95, 0xb9, 0xf2, 0x9b, 0xd3, 0x7f, 0x1b, 0x81,
+ 0x22, 0xf7, 0x98, 0x60, 0x0a, 0x64, 0xa6, 0xc1,
+ 0xf6, 0x49, 0xc7, 0xe3, 0x07, 0x4d, 0x94, 0x7a,
+ 0xcf, 0x6e, 0x68, 0x0c, 0x1b, 0x3f, 0x6e, 0x2e,
+ 0xee, 0x92, 0xfa, 0x52, 0xb3, 0x59, 0xf8, 0xf1,
+ 0x8f, 0x6a, 0x66, 0xa3, 0x82, 0x76, 0x4a, 0x07,
+ 0x1a, 0xc7, 0xdd, 0xf5, 0xda, 0x9c, 0x3c, 0x24,
+ 0xbf, 0xfd, 0x42, 0xa1, 0x10, 0x64, 0x6a, 0x0f,
+ 0x89, 0xee, 0x36, 0xa5, 0xce, 0x99, 0x48, 0x6a,
+ 0xf0, 0x9f, 0x9e, 0x69, 0xa4, 0x40, 0x20, 0xe9,
+ 0x16, 0x15, 0xf7, 0xdb, 0x75, 0x02, 0xcb, 0xe9,
+ 0x73, 0x8b, 0x3b, 0x49, 0x2f, 0xf0, 0xaf, 0x51,
+ 0x06, 0x5c, 0xdf, 0x27, 0x27, 0x49, 0x6a, 0xd1,
+ 0xcc, 0xc7, 0xb5, 0x63, 0xb5, 0xfc, 0xb8, 0x5c,
+ 0x87, 0x7f, 0x84, 0xb4, 0xcc, 0x14, 0xa9, 0x53,
+ 0xda, 0xa4, 0x56, 0xf8, 0xb6, 0x1b, 0xcc, 0x40,
+ 0x27, 0x52, 0x06, 0x5a, 0x13, 0x81, 0xd7, 0x3a,
+ 0xd4, 0x3b, 0xfb, 0x49, 0x65, 0x31, 0x33, 0xb2,
+ 0xfa, 0xcd, 0xad, 0x58, 0x4e, 0x2b, 0xae, 0xd2,
+ 0x20, 0xfb, 0x1a, 0x48, 0xb4, 0x3f, 0x9a, 0xd8,
+ 0x7a, 0x35, 0x4a, 0xc8, 0xee, 0x88, 0x5e, 0x07,
+ 0x66, 0x54, 0xb9, 0xec, 0x9f, 0xa3, 0xe3, 0xb9,
+ 0x37, 0xaa, 0x49, 0x76, 0x31, 0xda, 0x74, 0x2d,
+ 0x3c, 0xa4, 0x65, 0x10, 0x32, 0x38, 0xf0, 0xde,
+ 0xd3, 0x99, 0x17, 0xaa, 0x71, 0xaa, 0x8f, 0x0f,
+ 0x8c, 0xaf, 0xa2, 0xf8, 0x5d, 0x64, 0xba, 0x1d,
+ 0xa3, 0xef, 0x96, 0x73, 0xe8, 0xa1, 0x02, 0x8d,
+ 0x0c, 0x6d, 0xb8, 0x06, 0x90, 0xb8, 0x08, 0x56,
+ 0x2c, 0xa7, 0x06, 0xc9, 0xc2, 0x38, 0xdb, 0x7c,
+ 0x63, 0xb1, 0x57, 0x8e, 0xea, 0x7c, 0x79, 0xf3,
+ 0x49, 0x1d, 0xfe, 0x9f, 0xf3, 0x6e, 0xb1, 0x1d,
+ 0xba, 0x19, 0x80, 0x1a, 0x0a, 0xd3, 0xb0, 0x26,
+ 0x21, 0x40, 0xb1, 0x7c, 0xf9, 0x4d, 0x8d, 0x10,
+ 0xc1, 0x7e, 0xf4, 0xf6, 0x3c, 0xa8, 0xfd, 0x7c,
+ 0xa3, 0x92, 0xb2, 0x0f, 0xaa, 0xcc, 0xa6, 0x11,
+ 0xfe, 0x04, 0xe3, 0xd1, 0x7a, 0x32, 0x89, 0xdf,
+ 0x0d, 0xc4, 0x8f, 0x79, 0x6b, 0xca, 0x16, 0x7c,
+ 0x6e, 0xf9, 0xad, 0x0f, 0xf6, 0xfe, 0x27, 0xdb,
+ 0xc4, 0x13, 0x70, 0xf1, 0x62, 0x1a, 0x4f, 0x79,
+ 0x40, 0xc9, 0x9b, 0x8b, 0x21, 0xea, 0x84, 0xfa,
+ 0xf5, 0xf1, 0x89, 0xce, 0xb7, 0x55, 0x0a, 0x80,
+ 0x39, 0x2f, 0x55, 0x36, 0x16, 0x9c, 0x7b, 0x08,
+ 0xbd, 0x87, 0x0d, 0xa5, 0x32, 0xf1, 0x52, 0x7c,
+ 0xe8, 0x55, 0x60, 0x5b, 0xd7, 0x69, 0xe4, 0xfc,
+ 0xfa, 0x12, 0x85, 0x96, 0xea, 0x50, 0x28, 0xab,
+ 0x8a, 0xf7, 0xbb, 0x0e, 0x53, 0x74, 0xca, 0xa6,
+ 0x27, 0x09, 0xc2, 0xb5, 0xde, 0x18, 0x14, 0xd9,
+ 0xea, 0xe5, 0x29, 0x1c, 0x40, 0x56, 0xcf, 0xd7,
+ 0xae, 0x05, 0x3f, 0x65, 0xaf, 0x05, 0x73, 0xe2,
+ 0x35, 0x96, 0x27, 0x07, 0x14, 0xc0, 0xad, 0x33,
+ 0xf1, 0xdc, 0x44, 0x7a, 0x89, 0x17, 0x77, 0xd2,
+ 0x9c, 0x58, 0x60, 0xf0, 0x3f, 0x7b, 0x2d, 0x2e,
+ 0x57, 0x95, 0x54, 0x87, 0xed, 0xf2, 0xc7, 0x4c,
+ 0xf0, 0xae, 0x56, 0x29, 0x19, 0x7d, 0x66, 0x4b,
+ 0x9b, 0x83, 0x84, 0x42, 0x3b, 0x01, 0x25, 0x66,
+ 0x8e, 0x02, 0xde, 0xb9, 0x83, 0x54, 0x19, 0xf6,
+ 0x9f, 0x79, 0x0d, 0x67, 0xc5, 0x1d, 0x7a, 0x44,
+ 0x02, 0x98, 0xa7, 0x16, 0x1c, 0x29, 0x0d, 0x74,
+ 0xff, 0x85, 0x40, 0x06, 0xef, 0x2c, 0xa9, 0xc6,
+ 0xf5, 0x53, 0x07, 0x06, 0xae, 0xe4, 0xfa, 0x5f,
+ 0xd8, 0x39, 0x4d, 0xf1, 0x9b, 0x6b, 0xd9, 0x24,
+ 0x84, 0xfe, 0x03, 0x4c, 0xb2, 0x3f, 0xdf, 0xa1,
+ 0x05, 0x9e, 0x50, 0x14, 0x5a, 0xd9, 0x1a, 0xa2,
+ 0xa7, 0xfa, 0xfa, 0x17, 0xf7, 0x78, 0xd6, 0xb5,
+ 0x92, 0x61, 0x91, 0xac, 0x36, 0xfa, 0x56, 0x0d,
+ 0x38, 0x32, 0x18, 0x85, 0x08, 0x58, 0x37, 0xf0,
+ 0x4b, 0xdb, 0x59, 0xe7, 0xa4, 0x34, 0xc0, 0x1b,
+ 0x01, 0xaf, 0x2d, 0xde, 0xa1, 0xaa, 0x5d, 0xd3,
+ 0xec, 0xe1, 0xd4, 0xf7, 0xe6, 0x54, 0x68, 0xf0,
+ 0x51, 0x97, 0xa7, 0x89, 0xea, 0x24, 0xad, 0xd3,
+ 0x6e, 0x47, 0x93, 0x8b, 0x4b, 0xb4, 0xf7, 0x1c,
+ 0x42, 0x06, 0x67, 0xe8, 0x99, 0xf6, 0xf5, 0x7b,
+ 0x85, 0xb5, 0x65, 0xb5, 0xb5, 0xd2, 0x37, 0xf5,
+ 0xf3, 0x02, 0xa6, 0x4d, 0x11, 0xa7, 0xdc, 0x51,
+ 0x09, 0x7f, 0xa0, 0xd8, 0x88, 0x1c, 0x13, 0x71,
+ 0xae, 0x9c, 0xb7, 0x7b, 0x34, 0xd6, 0x4e, 0x68,
+ 0x26, 0x83, 0x51, 0xaf, 0x1d, 0xee, 0x8b, 0xbb,
+ 0x69, 0x43, 0x2b, 0x9e, 0x8a, 0xbc, 0x02, 0x0e,
+ 0xa0, 0x1b, 0xe0, 0xa8, 0x5f, 0x6f, 0xaf, 0x1b,
+ 0x8f, 0xe7, 0x64, 0x71, 0x74, 0x11, 0x7e, 0xa8,
+ 0xd8, 0xf9, 0x97, 0x06, 0xc3, 0xb6, 0xfb, 0xfb,
+ 0xb7, 0x3d, 0x35, 0x9d, 0x3b, 0x52, 0xed, 0x54,
+ 0xca, 0xf4, 0x81, 0x01, 0x2d, 0x1b, 0xc3, 0xa7,
+ 0x00, 0x3d, 0x1a, 0x39, 0x54, 0xe1, 0xf6, 0xff,
+ 0xed, 0x6f, 0x0b, 0x5a, 0x68, 0xda, 0x58, 0xdd,
+ 0xa9, 0xcf, 0x5c, 0x4a, 0xe5, 0x09, 0x4e, 0xde,
+ 0x9d, 0xbc, 0x3e, 0xee, 0x5a, 0x00, 0x3b, 0x2c,
+ 0x87, 0x10, 0x65, 0x60, 0xdd, 0xd7, 0x56, 0xd1,
+ 0x4c, 0x64, 0x45, 0xe4, 0x21, 0xec, 0x78, 0xf8,
+ 0x25, 0x7a, 0x3e, 0x16, 0x5d, 0x09, 0x53, 0x14,
+ 0xbe, 0x4f, 0xae, 0x87, 0xd8, 0xd1, 0xaa, 0x3c,
+ 0xf6, 0x3e, 0xa4, 0x70, 0x8c, 0x5e, 0x70, 0xa4,
+ 0xb3, 0x6b, 0x66, 0x73, 0xd3, 0xbf, 0x31, 0x06,
+ 0x19, 0x62, 0x93, 0x15, 0xf2, 0x86, 0xe4, 0x52,
+ 0x7e, 0x53, 0x4c, 0x12, 0x38, 0xcc, 0x34, 0x7d,
+ 0x57, 0xf6, 0x42, 0x93, 0x8a, 0xc4, 0xee, 0x5c,
+ 0x8a, 0xe1, 0x52, 0x8f, 0x56, 0x64, 0xf6, 0xa6,
+ 0xd1, 0x91, 0x57, 0x70, 0xcd, 0x11, 0x76, 0xf5,
+ 0x59, 0x60, 0x60, 0x3c, 0xc1, 0xc3, 0x0b, 0x7f,
+ 0x58, 0x1a, 0x50, 0x91, 0xf1, 0x68, 0x8f, 0x6e,
+ 0x74, 0x74, 0xa8, 0x51, 0x0b, 0xf7, 0x7a, 0x98,
+ 0x37, 0xf2, 0x0a, 0x0e, 0xa4, 0x97, 0x04, 0xb8,
+ 0x9b, 0xfd, 0xa0, 0xea, 0xf7, 0x0d, 0xe1, 0xdb,
+ 0x03, 0xf0, 0x31, 0x29, 0xf8, 0xdd, 0x6b, 0x8b,
+ 0x5d, 0xd8, 0x59, 0xa9, 0x29, 0xcf, 0x9a, 0x79,
+ 0x89, 0x19, 0x63, 0x46, 0x09, 0x79, 0x6a, 0x11,
+ 0xda, 0x63, 0x68, 0x48, 0x77, 0x23, 0xfb, 0x7d,
+ 0x3a, 0x43, 0xcb, 0x02, 0x3b, 0x7a, 0x6d, 0x10,
+ 0x2a, 0x9e, 0xac, 0xf1, 0xd4, 0x19, 0xf8, 0x23,
+ 0x64, 0x1d, 0x2c, 0x5f, 0xf2, 0xb0, 0x5c, 0x23,
+ 0x27, 0xf7, 0x27, 0x30, 0x16, 0x37, 0xb1, 0x90,
+ 0xab, 0x38, 0xfb, 0x55, 0xcd, 0x78, 0x58, 0xd4,
+ 0x7d, 0x43, 0xf6, 0x45, 0x5e, 0x55, 0x8d, 0xb1,
+ 0x02, 0x65, 0x58, 0xb4, 0x13, 0x4b, 0x36, 0xf7,
+ 0xcc, 0xfe, 0x3d, 0x0b, 0x82, 0xe2, 0x12, 0x11,
+ 0xbb, 0xe6, 0xb8, 0x3a, 0x48, 0x71, 0xc7, 0x50,
+ 0x06, 0x16, 0x3a, 0xe6, 0x7c, 0x05, 0xc7, 0xc8,
+ 0x4d, 0x2f, 0x08, 0x6a, 0x17, 0x9a, 0x95, 0x97,
+ 0x50, 0x68, 0xdc, 0x28, 0x18, 0xc4, 0x61, 0x38,
+ 0xb9, 0xe0, 0x3e, 0x78, 0xdb, 0x29, 0xe0, 0x9f,
+ 0x52, 0xdd, 0xf8, 0x4f, 0x91, 0xc1, 0xd0, 0x33,
+ 0xa1, 0x7a, 0x8e, 0x30, 0x13, 0x82, 0x07, 0x9f,
+ 0xd3, 0x31, 0x0f, 0x23, 0xbe, 0x32, 0x5a, 0x75,
+ 0xcf, 0x96, 0xb2, 0xec, 0xb5, 0x32, 0xac, 0x21,
+ 0xd1, 0x82, 0x33, 0xd3, 0x15, 0x74, 0xbd, 0x90,
+ 0xf1, 0x2c, 0xe6, 0x5f, 0x8d, 0xe3, 0x02, 0xe8,
+ 0xe9, 0xc4, 0xca, 0x96, 0xeb, 0x0e, 0xbc, 0x91,
+ 0xf4, 0xb9, 0xea, 0xd9, 0x1b, 0x75, 0xbd, 0xe1,
+ 0xac, 0x2a, 0x05, 0x37, 0x52, 0x9b, 0x1b, 0x3f,
+ 0x5a, 0xdc, 0x21, 0xc3, 0x98, 0xbb, 0xaf, 0xa3,
+ 0xf2, 0x00, 0xbf, 0x0d, 0x30, 0x89, 0x05, 0xcc,
+ 0xa5, 0x76, 0xf5, 0x06, 0xf0, 0xc6, 0x54, 0x8a,
+ 0x5d, 0xd4, 0x1e, 0xc1, 0xf2, 0xce, 0xb0, 0x62,
+ 0xc8, 0xfc, 0x59, 0x42, 0x9a, 0x90, 0x60, 0x55,
+ 0xfe, 0x88, 0xa5, 0x8b, 0xb8, 0x33, 0x0c, 0x23,
+ 0x24, 0x0d, 0x15, 0x70, 0x37, 0x1e, 0x3d, 0xf6,
+ 0xd2, 0xea, 0x92, 0x10, 0xb2, 0xc4, 0x51, 0xac,
+ 0xf2, 0xac, 0xf3, 0x6b, 0x6c, 0xaa, 0xcf, 0x12,
+ 0xc5, 0x6c, 0x90, 0x50, 0xb5, 0x0c, 0xfc, 0x1a,
+ 0x15, 0x52, 0xe9, 0x26, 0xc6, 0x52, 0xa4, 0xe7,
+ 0x81, 0x69, 0xe1, 0xe7, 0x9e, 0x30, 0x01, 0xec,
+ 0x84, 0x89, 0xb2, 0x0d, 0x66, 0xdd, 0xce, 0x28,
+ 0x5c, 0xec, 0x98, 0x46, 0x68, 0x21, 0x9f, 0x88,
+ 0x3f, 0x1f, 0x42, 0x77, 0xce, 0xd0, 0x61, 0xd4,
+ 0x20, 0xa7, 0xff, 0x53, 0xad, 0x37, 0xd0, 0x17,
+ 0x35, 0xc9, 0xfc, 0xba, 0x0a, 0x78, 0x3f, 0xf2,
+ 0xcc, 0x86, 0x89, 0xe8, 0x4b, 0x3c, 0x48, 0x33,
+ 0x09, 0x7f, 0xc6, 0xc0, 0xdd, 0xb8, 0xfd, 0x7a,
+ 0x66, 0x66, 0x65, 0xeb, 0x47, 0xa7, 0x04, 0x28,
+ 0xa3, 0x19, 0x8e, 0xa9, 0xb1, 0x13, 0x67, 0x62,
+ 0x70, 0xcf, 0xd6
+};
+static const u8 enc_assoc012[] __initconst = {
+ 0xb1, 0x69, 0x83, 0x87, 0x30, 0xaa, 0x5d, 0xb8,
+ 0x77, 0xe8, 0x21, 0xff, 0x06, 0x59, 0x35, 0xce,
+ 0x75, 0xfe, 0x38, 0xef, 0xb8, 0x91, 0x43, 0x8c,
+ 0xcf, 0x70, 0xdd, 0x0a, 0x68, 0xbf, 0xd4, 0xbc,
+ 0x16, 0x76, 0x99, 0x36, 0x1e, 0x58, 0x79, 0x5e,
+ 0xd4, 0x29, 0xf7, 0x33, 0x93, 0x48, 0xdb, 0x5f,
+ 0x01, 0xae, 0x9c, 0xb6, 0xe4, 0x88, 0x6d, 0x2b,
+ 0x76, 0x75, 0xe0, 0xf3, 0x74, 0xe2, 0xc9
+};
+static const u8 enc_nonce012[] __initconst = {
+ 0x05, 0xa3, 0x93, 0xed, 0x30, 0xc5, 0xa2, 0x06
+};
+static const u8 enc_key012[] __initconst = {
+ 0xb3, 0x35, 0x50, 0x03, 0x54, 0x2e, 0x40, 0x5e,
+ 0x8f, 0x59, 0x8e, 0xc5, 0x90, 0xd5, 0x27, 0x2d,
+ 0xba, 0x29, 0x2e, 0xcb, 0x1b, 0x70, 0x44, 0x1e,
+ 0x65, 0x91, 0x6e, 0x2a, 0x79, 0x22, 0xda, 0x64
+};
+
+/* wycheproof - rfc7539 */
+static const u8 enc_input013[] __initconst = {
+ 0x4c, 0x61, 0x64, 0x69, 0x65, 0x73, 0x20, 0x61,
+ 0x6e, 0x64, 0x20, 0x47, 0x65, 0x6e, 0x74, 0x6c,
+ 0x65, 0x6d, 0x65, 0x6e, 0x20, 0x6f, 0x66, 0x20,
+ 0x74, 0x68, 0x65, 0x20, 0x63, 0x6c, 0x61, 0x73,
+ 0x73, 0x20, 0x6f, 0x66, 0x20, 0x27, 0x39, 0x39,
+ 0x3a, 0x20, 0x49, 0x66, 0x20, 0x49, 0x20, 0x63,
+ 0x6f, 0x75, 0x6c, 0x64, 0x20, 0x6f, 0x66, 0x66,
+ 0x65, 0x72, 0x20, 0x79, 0x6f, 0x75, 0x20, 0x6f,
+ 0x6e, 0x6c, 0x79, 0x20, 0x6f, 0x6e, 0x65, 0x20,
+ 0x74, 0x69, 0x70, 0x20, 0x66, 0x6f, 0x72, 0x20,
+ 0x74, 0x68, 0x65, 0x20, 0x66, 0x75, 0x74, 0x75,
+ 0x72, 0x65, 0x2c, 0x20, 0x73, 0x75, 0x6e, 0x73,
+ 0x63, 0x72, 0x65, 0x65, 0x6e, 0x20, 0x77, 0x6f,
+ 0x75, 0x6c, 0x64, 0x20, 0x62, 0x65, 0x20, 0x69,
+ 0x74, 0x2e
+};
+static const u8 enc_output013[] __initconst = {
+ 0xd3, 0x1a, 0x8d, 0x34, 0x64, 0x8e, 0x60, 0xdb,
+ 0x7b, 0x86, 0xaf, 0xbc, 0x53, 0xef, 0x7e, 0xc2,
+ 0xa4, 0xad, 0xed, 0x51, 0x29, 0x6e, 0x08, 0xfe,
+ 0xa9, 0xe2, 0xb5, 0xa7, 0x36, 0xee, 0x62, 0xd6,
+ 0x3d, 0xbe, 0xa4, 0x5e, 0x8c, 0xa9, 0x67, 0x12,
+ 0x82, 0xfa, 0xfb, 0x69, 0xda, 0x92, 0x72, 0x8b,
+ 0x1a, 0x71, 0xde, 0x0a, 0x9e, 0x06, 0x0b, 0x29,
+ 0x05, 0xd6, 0xa5, 0xb6, 0x7e, 0xcd, 0x3b, 0x36,
+ 0x92, 0xdd, 0xbd, 0x7f, 0x2d, 0x77, 0x8b, 0x8c,
+ 0x98, 0x03, 0xae, 0xe3, 0x28, 0x09, 0x1b, 0x58,
+ 0xfa, 0xb3, 0x24, 0xe4, 0xfa, 0xd6, 0x75, 0x94,
+ 0x55, 0x85, 0x80, 0x8b, 0x48, 0x31, 0xd7, 0xbc,
+ 0x3f, 0xf4, 0xde, 0xf0, 0x8e, 0x4b, 0x7a, 0x9d,
+ 0xe5, 0x76, 0xd2, 0x65, 0x86, 0xce, 0xc6, 0x4b,
+ 0x61, 0x16, 0x1a, 0xe1, 0x0b, 0x59, 0x4f, 0x09,
+ 0xe2, 0x6a, 0x7e, 0x90, 0x2e, 0xcb, 0xd0, 0x60,
+ 0x06, 0x91
+};
+static const u8 enc_assoc013[] __initconst = {
+ 0x50, 0x51, 0x52, 0x53, 0xc0, 0xc1, 0xc2, 0xc3,
+ 0xc4, 0xc5, 0xc6, 0xc7
+};
+static const u8 enc_nonce013[] __initconst = {
+ 0x07, 0x00, 0x00, 0x00, 0x40, 0x41, 0x42, 0x43,
+ 0x44, 0x45, 0x46, 0x47
+};
+static const u8 enc_key013[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - misc */
+static const u8 enc_input014[] __initconst = { };
+static const u8 enc_output014[] __initconst = {
+ 0x76, 0xac, 0xb3, 0x42, 0xcf, 0x31, 0x66, 0xa5,
+ 0xb6, 0x3c, 0x0c, 0x0e, 0xa1, 0x38, 0x3c, 0x8d
+};
+static const u8 enc_assoc014[] __initconst = { };
+static const u8 enc_nonce014[] __initconst = {
+ 0x4d, 0xa5, 0xbf, 0x8d, 0xfd, 0x58, 0x52, 0xc1,
+ 0xea, 0x12, 0x37, 0x9d
+};
+static const u8 enc_key014[] __initconst = {
+ 0x80, 0xba, 0x31, 0x92, 0xc8, 0x03, 0xce, 0x96,
+ 0x5e, 0xa3, 0x71, 0xd5, 0xff, 0x07, 0x3c, 0xf0,
+ 0xf4, 0x3b, 0x6a, 0x2a, 0xb5, 0x76, 0xb2, 0x08,
+ 0x42, 0x6e, 0x11, 0x40, 0x9c, 0x09, 0xb9, 0xb0
+};
+
+/* wycheproof - misc */
+static const u8 enc_input015[] __initconst = { };
+static const u8 enc_output015[] __initconst = {
+ 0x90, 0x6f, 0xa6, 0x28, 0x4b, 0x52, 0xf8, 0x7b,
+ 0x73, 0x59, 0xcb, 0xaa, 0x75, 0x63, 0xc7, 0x09
+};
+static const u8 enc_assoc015[] __initconst = {
+ 0xbd, 0x50, 0x67, 0x64, 0xf2, 0xd2, 0xc4, 0x10
+};
+static const u8 enc_nonce015[] __initconst = {
+ 0xa9, 0x2e, 0xf0, 0xac, 0x99, 0x1d, 0xd5, 0x16,
+ 0xa3, 0xc6, 0xf6, 0x89
+};
+static const u8 enc_key015[] __initconst = {
+ 0x7a, 0x4c, 0xd7, 0x59, 0x17, 0x2e, 0x02, 0xeb,
+ 0x20, 0x4d, 0xb2, 0xc3, 0xf5, 0xc7, 0x46, 0x22,
+ 0x7d, 0xf5, 0x84, 0xfc, 0x13, 0x45, 0x19, 0x63,
+ 0x91, 0xdb, 0xb9, 0x57, 0x7a, 0x25, 0x07, 0x42
+};
+
+/* wycheproof - misc */
+static const u8 enc_input016[] __initconst = {
+ 0x2a
+};
+static const u8 enc_output016[] __initconst = {
+ 0x3a, 0xca, 0xc2, 0x7d, 0xec, 0x09, 0x68, 0x80,
+ 0x1e, 0x9f, 0x6e, 0xde, 0xd6, 0x9d, 0x80, 0x75,
+ 0x22
+};
+static const u8 enc_assoc016[] __initconst = { };
+static const u8 enc_nonce016[] __initconst = {
+ 0x99, 0xe2, 0x3e, 0xc4, 0x89, 0x85, 0xbc, 0xcd,
+ 0xee, 0xab, 0x60, 0xf1
+};
+static const u8 enc_key016[] __initconst = {
+ 0xcc, 0x56, 0xb6, 0x80, 0x55, 0x2e, 0xb7, 0x50,
+ 0x08, 0xf5, 0x48, 0x4b, 0x4c, 0xb8, 0x03, 0xfa,
+ 0x50, 0x63, 0xeb, 0xd6, 0xea, 0xb9, 0x1f, 0x6a,
+ 0xb6, 0xae, 0xf4, 0x91, 0x6a, 0x76, 0x62, 0x73
+};
+
+/* wycheproof - misc */
+static const u8 enc_input017[] __initconst = {
+ 0x51
+};
+static const u8 enc_output017[] __initconst = {
+ 0xc4, 0x16, 0x83, 0x10, 0xca, 0x45, 0xb1, 0xf7,
+ 0xc6, 0x6c, 0xad, 0x4e, 0x99, 0xe4, 0x3f, 0x72,
+ 0xb9
+};
+static const u8 enc_assoc017[] __initconst = {
+ 0x91, 0xca, 0x6c, 0x59, 0x2c, 0xbc, 0xca, 0x53
+};
+static const u8 enc_nonce017[] __initconst = {
+ 0xab, 0x0d, 0xca, 0x71, 0x6e, 0xe0, 0x51, 0xd2,
+ 0x78, 0x2f, 0x44, 0x03
+};
+static const u8 enc_key017[] __initconst = {
+ 0x46, 0xf0, 0x25, 0x49, 0x65, 0xf7, 0x69, 0xd5,
+ 0x2b, 0xdb, 0x4a, 0x70, 0xb4, 0x43, 0x19, 0x9f,
+ 0x8e, 0xf2, 0x07, 0x52, 0x0d, 0x12, 0x20, 0xc5,
+ 0x5e, 0x4b, 0x70, 0xf0, 0xfd, 0xa6, 0x20, 0xee
+};
+
+/* wycheproof - misc */
+static const u8 enc_input018[] __initconst = {
+ 0x5c, 0x60
+};
+static const u8 enc_output018[] __initconst = {
+ 0x4d, 0x13, 0x91, 0xe8, 0xb6, 0x1e, 0xfb, 0x39,
+ 0xc1, 0x22, 0x19, 0x54, 0x53, 0x07, 0x7b, 0x22,
+ 0xe5, 0xe2
+};
+static const u8 enc_assoc018[] __initconst = { };
+static const u8 enc_nonce018[] __initconst = {
+ 0x46, 0x1a, 0xf1, 0x22, 0xe9, 0xf2, 0xe0, 0x34,
+ 0x7e, 0x03, 0xf2, 0xdb
+};
+static const u8 enc_key018[] __initconst = {
+ 0x2f, 0x7f, 0x7e, 0x4f, 0x59, 0x2b, 0xb3, 0x89,
+ 0x19, 0x49, 0x89, 0x74, 0x35, 0x07, 0xbf, 0x3e,
+ 0xe9, 0xcb, 0xde, 0x17, 0x86, 0xb6, 0x69, 0x5f,
+ 0xe6, 0xc0, 0x25, 0xfd, 0x9b, 0xa4, 0xc1, 0x00
+};
+
+/* wycheproof - misc */
+static const u8 enc_input019[] __initconst = {
+ 0xdd, 0xf2
+};
+static const u8 enc_output019[] __initconst = {
+ 0xb6, 0x0d, 0xea, 0xd0, 0xfd, 0x46, 0x97, 0xec,
+ 0x2e, 0x55, 0x58, 0x23, 0x77, 0x19, 0xd0, 0x24,
+ 0x37, 0xa2
+};
+static const u8 enc_assoc019[] __initconst = {
+ 0x88, 0x36, 0x4f, 0xc8, 0x06, 0x05, 0x18, 0xbf
+};
+static const u8 enc_nonce019[] __initconst = {
+ 0x61, 0x54, 0x6b, 0xa5, 0xf1, 0x72, 0x05, 0x90,
+ 0xb6, 0x04, 0x0a, 0xc6
+};
+static const u8 enc_key019[] __initconst = {
+ 0xc8, 0x83, 0x3d, 0xce, 0x5e, 0xa9, 0xf2, 0x48,
+ 0xaa, 0x20, 0x30, 0xea, 0xcf, 0xe7, 0x2b, 0xff,
+ 0xe6, 0x9a, 0x62, 0x0c, 0xaf, 0x79, 0x33, 0x44,
+ 0xe5, 0x71, 0x8f, 0xe0, 0xd7, 0xab, 0x1a, 0x58
+};
+
+/* wycheproof - misc */
+static const u8 enc_input020[] __initconst = {
+ 0xab, 0x85, 0xe9, 0xc1, 0x57, 0x17, 0x31
+};
+static const u8 enc_output020[] __initconst = {
+ 0x5d, 0xfe, 0x34, 0x40, 0xdb, 0xb3, 0xc3, 0xed,
+ 0x7a, 0x43, 0x4e, 0x26, 0x02, 0xd3, 0x94, 0x28,
+ 0x1e, 0x0a, 0xfa, 0x9f, 0xb7, 0xaa, 0x42
+};
+static const u8 enc_assoc020[] __initconst = { };
+static const u8 enc_nonce020[] __initconst = {
+ 0x3c, 0x4e, 0x65, 0x4d, 0x66, 0x3f, 0xa4, 0x59,
+ 0x6d, 0xc5, 0x5b, 0xb7
+};
+static const u8 enc_key020[] __initconst = {
+ 0x55, 0x56, 0x81, 0x58, 0xd3, 0xa6, 0x48, 0x3f,
+ 0x1f, 0x70, 0x21, 0xea, 0xb6, 0x9b, 0x70, 0x3f,
+ 0x61, 0x42, 0x51, 0xca, 0xdc, 0x1a, 0xf5, 0xd3,
+ 0x4a, 0x37, 0x4f, 0xdb, 0xfc, 0x5a, 0xda, 0xc7
+};
+
+/* wycheproof - misc */
+static const u8 enc_input021[] __initconst = {
+ 0x4e, 0xe5, 0xcd, 0xa2, 0x0d, 0x42, 0x90
+};
+static const u8 enc_output021[] __initconst = {
+ 0x4b, 0xd4, 0x72, 0x12, 0x94, 0x1c, 0xe3, 0x18,
+ 0x5f, 0x14, 0x08, 0xee, 0x7f, 0xbf, 0x18, 0xf5,
+ 0xab, 0xad, 0x6e, 0x22, 0x53, 0xa1, 0xba
+};
+static const u8 enc_assoc021[] __initconst = {
+ 0x84, 0xe4, 0x6b, 0xe8, 0xc0, 0x91, 0x90, 0x53
+};
+static const u8 enc_nonce021[] __initconst = {
+ 0x58, 0x38, 0x93, 0x75, 0xc6, 0x9e, 0xe3, 0x98,
+ 0xde, 0x94, 0x83, 0x96
+};
+static const u8 enc_key021[] __initconst = {
+ 0xe3, 0xc0, 0x9e, 0x7f, 0xab, 0x1a, 0xef, 0xb5,
+ 0x16, 0xda, 0x6a, 0x33, 0x02, 0x2a, 0x1d, 0xd4,
+ 0xeb, 0x27, 0x2c, 0x80, 0xd5, 0x40, 0xc5, 0xda,
+ 0x52, 0xa7, 0x30, 0xf3, 0x4d, 0x84, 0x0d, 0x7f
+};
+
+/* wycheproof - misc */
+static const u8 enc_input022[] __initconst = {
+ 0xbe, 0x33, 0x08, 0xf7, 0x2a, 0x2c, 0x6a, 0xed
+};
+static const u8 enc_output022[] __initconst = {
+ 0x8e, 0x94, 0x39, 0xa5, 0x6e, 0xee, 0xc8, 0x17,
+ 0xfb, 0xe8, 0xa6, 0xed, 0x8f, 0xab, 0xb1, 0x93,
+ 0x75, 0x39, 0xdd, 0x6c, 0x00, 0xe9, 0x00, 0x21
+};
+static const u8 enc_assoc022[] __initconst = { };
+static const u8 enc_nonce022[] __initconst = {
+ 0x4f, 0x07, 0xaf, 0xed, 0xfd, 0xc3, 0xb6, 0xc2,
+ 0x36, 0x18, 0x23, 0xd3
+};
+static const u8 enc_key022[] __initconst = {
+ 0x51, 0xe4, 0xbf, 0x2b, 0xad, 0x92, 0xb7, 0xaf,
+ 0xf1, 0xa4, 0xbc, 0x05, 0x55, 0x0b, 0xa8, 0x1d,
+ 0xf4, 0xb9, 0x6f, 0xab, 0xf4, 0x1c, 0x12, 0xc7,
+ 0xb0, 0x0e, 0x60, 0xe4, 0x8d, 0xb7, 0xe1, 0x52
+};
+
+/* wycheproof - misc */
+static const u8 enc_input023[] __initconst = {
+ 0xa4, 0xc9, 0xc2, 0x80, 0x1b, 0x71, 0xf7, 0xdf
+};
+static const u8 enc_output023[] __initconst = {
+ 0xb9, 0xb9, 0x10, 0x43, 0x3a, 0xf0, 0x52, 0xb0,
+ 0x45, 0x30, 0xf5, 0x1a, 0xee, 0xe0, 0x24, 0xe0,
+ 0xa4, 0x45, 0xa6, 0x32, 0x8f, 0xa6, 0x7a, 0x18
+};
+static const u8 enc_assoc023[] __initconst = {
+ 0x66, 0xc0, 0xae, 0x70, 0x07, 0x6c, 0xb1, 0x4d
+};
+static const u8 enc_nonce023[] __initconst = {
+ 0xb4, 0xea, 0x66, 0x6e, 0xe1, 0x19, 0x56, 0x33,
+ 0x66, 0x48, 0x4a, 0x78
+};
+static const u8 enc_key023[] __initconst = {
+ 0x11, 0x31, 0xc1, 0x41, 0x85, 0x77, 0xa0, 0x54,
+ 0xde, 0x7a, 0x4a, 0xc5, 0x51, 0x95, 0x0f, 0x1a,
+ 0x05, 0x3f, 0x9a, 0xe4, 0x6e, 0x5b, 0x75, 0xfe,
+ 0x4a, 0xbd, 0x56, 0x08, 0xd7, 0xcd, 0xda, 0xdd
+};
+
+/* wycheproof - misc */
+static const u8 enc_input024[] __initconst = {
+ 0x42, 0xba, 0xae, 0x59, 0x78, 0xfe, 0xaf, 0x5c,
+ 0x36, 0x8d, 0x14, 0xe0
+};
+static const u8 enc_output024[] __initconst = {
+ 0xff, 0x7d, 0xc2, 0x03, 0xb2, 0x6c, 0x46, 0x7a,
+ 0x6b, 0x50, 0xdb, 0x33, 0x57, 0x8c, 0x0f, 0x27,
+ 0x58, 0xc2, 0xe1, 0x4e, 0x36, 0xd4, 0xfc, 0x10,
+ 0x6d, 0xcb, 0x29, 0xb4
+};
+static const u8 enc_assoc024[] __initconst = { };
+static const u8 enc_nonce024[] __initconst = {
+ 0x9a, 0x59, 0xfc, 0xe2, 0x6d, 0xf0, 0x00, 0x5e,
+ 0x07, 0x53, 0x86, 0x56
+};
+static const u8 enc_key024[] __initconst = {
+ 0x99, 0xb6, 0x2b, 0xd5, 0xaf, 0xbe, 0x3f, 0xb0,
+ 0x15, 0xbd, 0xe9, 0x3f, 0x0a, 0xbf, 0x48, 0x39,
+ 0x57, 0xa1, 0xc3, 0xeb, 0x3c, 0xa5, 0x9c, 0xb5,
+ 0x0b, 0x39, 0xf7, 0xf8, 0xa9, 0xcc, 0x51, 0xbe
+};
+
+/* wycheproof - misc */
+static const u8 enc_input025[] __initconst = {
+ 0xfd, 0xc8, 0x5b, 0x94, 0xa4, 0xb2, 0xa6, 0xb7,
+ 0x59, 0xb1, 0xa0, 0xda
+};
+static const u8 enc_output025[] __initconst = {
+ 0x9f, 0x88, 0x16, 0xde, 0x09, 0x94, 0xe9, 0x38,
+ 0xd9, 0xe5, 0x3f, 0x95, 0xd0, 0x86, 0xfc, 0x6c,
+ 0x9d, 0x8f, 0xa9, 0x15, 0xfd, 0x84, 0x23, 0xa7,
+ 0xcf, 0x05, 0x07, 0x2f
+};
+static const u8 enc_assoc025[] __initconst = {
+ 0xa5, 0x06, 0xe1, 0xa5, 0xc6, 0x90, 0x93, 0xf9
+};
+static const u8 enc_nonce025[] __initconst = {
+ 0x58, 0xdb, 0xd4, 0xad, 0x2c, 0x4a, 0xd3, 0x5d,
+ 0xd9, 0x06, 0xe9, 0xce
+};
+static const u8 enc_key025[] __initconst = {
+ 0x85, 0xf3, 0x5b, 0x62, 0x82, 0xcf, 0xf4, 0x40,
+ 0xbc, 0x10, 0x20, 0xc8, 0x13, 0x6f, 0xf2, 0x70,
+ 0x31, 0x11, 0x0f, 0xa6, 0x3e, 0xc1, 0x6f, 0x1e,
+ 0x82, 0x51, 0x18, 0xb0, 0x06, 0xb9, 0x12, 0x57
+};
+
+/* wycheproof - misc */
+static const u8 enc_input026[] __initconst = {
+ 0x51, 0xf8, 0xc1, 0xf7, 0x31, 0xea, 0x14, 0xac,
+ 0xdb, 0x21, 0x0a, 0x6d, 0x97, 0x3e, 0x07
+};
+static const u8 enc_output026[] __initconst = {
+ 0x0b, 0x29, 0x63, 0x8e, 0x1f, 0xbd, 0xd6, 0xdf,
+ 0x53, 0x97, 0x0b, 0xe2, 0x21, 0x00, 0x42, 0x2a,
+ 0x91, 0x34, 0x08, 0x7d, 0x67, 0xa4, 0x6e, 0x79,
+ 0x17, 0x8d, 0x0a, 0x93, 0xf5, 0xe1, 0xd2
+};
+static const u8 enc_assoc026[] __initconst = { };
+static const u8 enc_nonce026[] __initconst = {
+ 0x68, 0xab, 0x7f, 0xdb, 0xf6, 0x19, 0x01, 0xda,
+ 0xd4, 0x61, 0xd2, 0x3c
+};
+static const u8 enc_key026[] __initconst = {
+ 0x67, 0x11, 0x96, 0x27, 0xbd, 0x98, 0x8e, 0xda,
+ 0x90, 0x62, 0x19, 0xe0, 0x8c, 0x0d, 0x0d, 0x77,
+ 0x9a, 0x07, 0xd2, 0x08, 0xce, 0x8a, 0x4f, 0xe0,
+ 0x70, 0x9a, 0xf7, 0x55, 0xee, 0xec, 0x6d, 0xcb
+};
+
+/* wycheproof - misc */
+static const u8 enc_input027[] __initconst = {
+ 0x97, 0x46, 0x9d, 0xa6, 0x67, 0xd6, 0x11, 0x0f,
+ 0x9c, 0xbd, 0xa1, 0xd1, 0xa2, 0x06, 0x73
+};
+static const u8 enc_output027[] __initconst = {
+ 0x32, 0xdb, 0x66, 0xc4, 0xa3, 0x81, 0x9d, 0x81,
+ 0x55, 0x74, 0x55, 0xe5, 0x98, 0x0f, 0xed, 0xfe,
+ 0xae, 0x30, 0xde, 0xc9, 0x4e, 0x6a, 0xd3, 0xa9,
+ 0xee, 0xa0, 0x6a, 0x0d, 0x70, 0x39, 0x17
+};
+static const u8 enc_assoc027[] __initconst = {
+ 0x64, 0x53, 0xa5, 0x33, 0x84, 0x63, 0x22, 0x12
+};
+static const u8 enc_nonce027[] __initconst = {
+ 0xd9, 0x5b, 0x32, 0x43, 0xaf, 0xae, 0xf7, 0x14,
+ 0xc5, 0x03, 0x5b, 0x6a
+};
+static const u8 enc_key027[] __initconst = {
+ 0xe6, 0xf1, 0x11, 0x8d, 0x41, 0xe4, 0xb4, 0x3f,
+ 0xb5, 0x82, 0x21, 0xb7, 0xed, 0x79, 0x67, 0x38,
+ 0x34, 0xe0, 0xd8, 0xac, 0x5c, 0x4f, 0xa6, 0x0b,
+ 0xbc, 0x8b, 0xc4, 0x89, 0x3a, 0x58, 0x89, 0x4d
+};
+
+/* wycheproof - misc */
+static const u8 enc_input028[] __initconst = {
+ 0x54, 0x9b, 0x36, 0x5a, 0xf9, 0x13, 0xf3, 0xb0,
+ 0x81, 0x13, 0x1c, 0xcb, 0x6b, 0x82, 0x55, 0x88
+};
+static const u8 enc_output028[] __initconst = {
+ 0xe9, 0x11, 0x0e, 0x9f, 0x56, 0xab, 0x3c, 0xa4,
+ 0x83, 0x50, 0x0c, 0xea, 0xba, 0xb6, 0x7a, 0x13,
+ 0x83, 0x6c, 0xca, 0xbf, 0x15, 0xa6, 0xa2, 0x2a,
+ 0x51, 0xc1, 0x07, 0x1c, 0xfa, 0x68, 0xfa, 0x0c
+};
+static const u8 enc_assoc028[] __initconst = { };
+static const u8 enc_nonce028[] __initconst = {
+ 0x2f, 0xcb, 0x1b, 0x38, 0xa9, 0x9e, 0x71, 0xb8,
+ 0x47, 0x40, 0xad, 0x9b
+};
+static const u8 enc_key028[] __initconst = {
+ 0x59, 0xd4, 0xea, 0xfb, 0x4d, 0xe0, 0xcf, 0xc7,
+ 0xd3, 0xdb, 0x99, 0xa8, 0xf5, 0x4b, 0x15, 0xd7,
+ 0xb3, 0x9f, 0x0a, 0xcc, 0x8d, 0xa6, 0x97, 0x63,
+ 0xb0, 0x19, 0xc1, 0x69, 0x9f, 0x87, 0x67, 0x4a
+};
+
+/* wycheproof - misc */
+static const u8 enc_input029[] __initconst = {
+ 0x55, 0xa4, 0x65, 0x64, 0x4f, 0x5b, 0x65, 0x09,
+ 0x28, 0xcb, 0xee, 0x7c, 0x06, 0x32, 0x14, 0xd6
+};
+static const u8 enc_output029[] __initconst = {
+ 0xe4, 0xb1, 0x13, 0xcb, 0x77, 0x59, 0x45, 0xf3,
+ 0xd3, 0xa8, 0xae, 0x9e, 0xc1, 0x41, 0xc0, 0x0c,
+ 0x7c, 0x43, 0xf1, 0x6c, 0xe0, 0x96, 0xd0, 0xdc,
+ 0x27, 0xc9, 0x58, 0x49, 0xdc, 0x38, 0x3b, 0x7d
+};
+static const u8 enc_assoc029[] __initconst = {
+ 0x03, 0x45, 0x85, 0x62, 0x1a, 0xf8, 0xd7, 0xff
+};
+static const u8 enc_nonce029[] __initconst = {
+ 0x11, 0x8a, 0x69, 0x64, 0xc2, 0xd3, 0xe3, 0x80,
+ 0x07, 0x1f, 0x52, 0x66
+};
+static const u8 enc_key029[] __initconst = {
+ 0xb9, 0x07, 0xa4, 0x50, 0x75, 0x51, 0x3f, 0xe8,
+ 0xa8, 0x01, 0x9e, 0xde, 0xe3, 0xf2, 0x59, 0x14,
+ 0x87, 0xb2, 0xa0, 0x30, 0xb0, 0x3c, 0x6e, 0x1d,
+ 0x77, 0x1c, 0x86, 0x25, 0x71, 0xd2, 0xea, 0x1e
+};
+
+/* wycheproof - misc */
+static const u8 enc_input030[] __initconst = {
+ 0x3f, 0xf1, 0x51, 0x4b, 0x1c, 0x50, 0x39, 0x15,
+ 0x91, 0x8f, 0x0c, 0x0c, 0x31, 0x09, 0x4a, 0x6e,
+ 0x1f
+};
+static const u8 enc_output030[] __initconst = {
+ 0x02, 0xcc, 0x3a, 0xcb, 0x5e, 0xe1, 0xfc, 0xdd,
+ 0x12, 0xa0, 0x3b, 0xb8, 0x57, 0x97, 0x64, 0x74,
+ 0xd3, 0xd8, 0x3b, 0x74, 0x63, 0xa2, 0xc3, 0x80,
+ 0x0f, 0xe9, 0x58, 0xc2, 0x8e, 0xaa, 0x29, 0x08,
+ 0x13
+};
+static const u8 enc_assoc030[] __initconst = { };
+static const u8 enc_nonce030[] __initconst = {
+ 0x45, 0xaa, 0xa3, 0xe5, 0xd1, 0x6d, 0x2d, 0x42,
+ 0xdc, 0x03, 0x44, 0x5d
+};
+static const u8 enc_key030[] __initconst = {
+ 0x3b, 0x24, 0x58, 0xd8, 0x17, 0x6e, 0x16, 0x21,
+ 0xc0, 0xcc, 0x24, 0xc0, 0xc0, 0xe2, 0x4c, 0x1e,
+ 0x80, 0xd7, 0x2f, 0x7e, 0xe9, 0x14, 0x9a, 0x4b,
+ 0x16, 0x61, 0x76, 0x62, 0x96, 0x16, 0xd0, 0x11
+};
+
+/* wycheproof - misc */
+static const u8 enc_input031[] __initconst = {
+ 0x63, 0x85, 0x8c, 0xa3, 0xe2, 0xce, 0x69, 0x88,
+ 0x7b, 0x57, 0x8a, 0x3c, 0x16, 0x7b, 0x42, 0x1c,
+ 0x9c
+};
+static const u8 enc_output031[] __initconst = {
+ 0x35, 0x76, 0x64, 0x88, 0xd2, 0xbc, 0x7c, 0x2b,
+ 0x8d, 0x17, 0xcb, 0xbb, 0x9a, 0xbf, 0xad, 0x9e,
+ 0x6d, 0x1f, 0x39, 0x1e, 0x65, 0x7b, 0x27, 0x38,
+ 0xdd, 0xa0, 0x84, 0x48, 0xcb, 0xa2, 0x81, 0x1c,
+ 0xeb
+};
+static const u8 enc_assoc031[] __initconst = {
+ 0x9a, 0xaf, 0x29, 0x9e, 0xee, 0xa7, 0x8f, 0x79
+};
+static const u8 enc_nonce031[] __initconst = {
+ 0xf0, 0x38, 0x4f, 0xb8, 0x76, 0x12, 0x14, 0x10,
+ 0x63, 0x3d, 0x99, 0x3d
+};
+static const u8 enc_key031[] __initconst = {
+ 0xf6, 0x0c, 0x6a, 0x1b, 0x62, 0x57, 0x25, 0xf7,
+ 0x6c, 0x70, 0x37, 0xb4, 0x8f, 0xe3, 0x57, 0x7f,
+ 0xa7, 0xf7, 0xb8, 0x7b, 0x1b, 0xd5, 0xa9, 0x82,
+ 0x17, 0x6d, 0x18, 0x23, 0x06, 0xff, 0xb8, 0x70
+};
+
+/* wycheproof - misc */
+static const u8 enc_input032[] __initconst = {
+ 0x10, 0xf1, 0xec, 0xf9, 0xc6, 0x05, 0x84, 0x66,
+ 0x5d, 0x9a, 0xe5, 0xef, 0xe2, 0x79, 0xe7, 0xf7,
+ 0x37, 0x7e, 0xea, 0x69, 0x16, 0xd2, 0xb1, 0x11
+};
+static const u8 enc_output032[] __initconst = {
+ 0x42, 0xf2, 0x6c, 0x56, 0xcb, 0x4b, 0xe2, 0x1d,
+ 0x9d, 0x8d, 0x0c, 0x80, 0xfc, 0x99, 0xdd, 0xe0,
+ 0x0d, 0x75, 0xf3, 0x80, 0x74, 0xbf, 0xe7, 0x64,
+ 0x54, 0xaa, 0x7e, 0x13, 0xd4, 0x8f, 0xff, 0x7d,
+ 0x75, 0x57, 0x03, 0x94, 0x57, 0x04, 0x0a, 0x3a
+};
+static const u8 enc_assoc032[] __initconst = { };
+static const u8 enc_nonce032[] __initconst = {
+ 0xe6, 0xb1, 0xad, 0xf2, 0xfd, 0x58, 0xa8, 0x76,
+ 0x2c, 0x65, 0xf3, 0x1b
+};
+static const u8 enc_key032[] __initconst = {
+ 0x02, 0x12, 0xa8, 0xde, 0x50, 0x07, 0xed, 0x87,
+ 0xb3, 0x3f, 0x1a, 0x70, 0x90, 0xb6, 0x11, 0x4f,
+ 0x9e, 0x08, 0xce, 0xfd, 0x96, 0x07, 0xf2, 0xc2,
+ 0x76, 0xbd, 0xcf, 0xdb, 0xc5, 0xce, 0x9c, 0xd7
+};
+
+/* wycheproof - misc */
+static const u8 enc_input033[] __initconst = {
+ 0x92, 0x22, 0xf9, 0x01, 0x8e, 0x54, 0xfd, 0x6d,
+ 0xe1, 0x20, 0x08, 0x06, 0xa9, 0xee, 0x8e, 0x4c,
+ 0xc9, 0x04, 0xd2, 0x9f, 0x25, 0xcb, 0xa1, 0x93
+};
+static const u8 enc_output033[] __initconst = {
+ 0x12, 0x30, 0x32, 0x43, 0x7b, 0x4b, 0xfd, 0x69,
+ 0x20, 0xe8, 0xf7, 0xe7, 0xe0, 0x08, 0x7a, 0xe4,
+ 0x88, 0x9e, 0xbe, 0x7a, 0x0a, 0xd0, 0xe9, 0x00,
+ 0x3c, 0xf6, 0x8f, 0x17, 0x95, 0x50, 0xda, 0x63,
+ 0xd3, 0xb9, 0x6c, 0x2d, 0x55, 0x41, 0x18, 0x65
+};
+static const u8 enc_assoc033[] __initconst = {
+ 0x3e, 0x8b, 0xc5, 0xad, 0xe1, 0x82, 0xff, 0x08
+};
+static const u8 enc_nonce033[] __initconst = {
+ 0x6b, 0x28, 0x2e, 0xbe, 0xcc, 0x54, 0x1b, 0xcd,
+ 0x78, 0x34, 0xed, 0x55
+};
+static const u8 enc_key033[] __initconst = {
+ 0xc5, 0xbc, 0x09, 0x56, 0x56, 0x46, 0xe7, 0xed,
+ 0xda, 0x95, 0x4f, 0x1f, 0x73, 0x92, 0x23, 0xda,
+ 0xda, 0x20, 0xb9, 0x5c, 0x44, 0xab, 0x03, 0x3d,
+ 0x0f, 0xae, 0x4b, 0x02, 0x83, 0xd1, 0x8b, 0xe3
+};
+
+/* wycheproof - misc */
+static const u8 enc_input034[] __initconst = {
+ 0xb0, 0x53, 0x99, 0x92, 0x86, 0xa2, 0x82, 0x4f,
+ 0x42, 0xcc, 0x8c, 0x20, 0x3a, 0xb2, 0x4e, 0x2c,
+ 0x97, 0xa6, 0x85, 0xad, 0xcc, 0x2a, 0xd3, 0x26,
+ 0x62, 0x55, 0x8e, 0x55, 0xa5, 0xc7, 0x29
+};
+static const u8 enc_output034[] __initconst = {
+ 0x45, 0xc7, 0xd6, 0xb5, 0x3a, 0xca, 0xd4, 0xab,
+ 0xb6, 0x88, 0x76, 0xa6, 0xe9, 0x6a, 0x48, 0xfb,
+ 0x59, 0x52, 0x4d, 0x2c, 0x92, 0xc9, 0xd8, 0xa1,
+ 0x89, 0xc9, 0xfd, 0x2d, 0xb9, 0x17, 0x46, 0x56,
+ 0x6d, 0x3c, 0xa1, 0x0e, 0x31, 0x1b, 0x69, 0x5f,
+ 0x3e, 0xae, 0x15, 0x51, 0x65, 0x24, 0x93
+};
+static const u8 enc_assoc034[] __initconst = { };
+static const u8 enc_nonce034[] __initconst = {
+ 0x04, 0xa9, 0xbe, 0x03, 0x50, 0x8a, 0x5f, 0x31,
+ 0x37, 0x1a, 0x6f, 0xd2
+};
+static const u8 enc_key034[] __initconst = {
+ 0x2e, 0xb5, 0x1c, 0x46, 0x9a, 0xa8, 0xeb, 0x9e,
+ 0x6c, 0x54, 0xa8, 0x34, 0x9b, 0xae, 0x50, 0xa2,
+ 0x0f, 0x0e, 0x38, 0x27, 0x11, 0xbb, 0xa1, 0x15,
+ 0x2c, 0x42, 0x4f, 0x03, 0xb6, 0x67, 0x1d, 0x71
+};
+
+/* wycheproof - misc */
+static const u8 enc_input035[] __initconst = {
+ 0xf4, 0x52, 0x06, 0xab, 0xc2, 0x55, 0x52, 0xb2,
+ 0xab, 0xc9, 0xab, 0x7f, 0xa2, 0x43, 0x03, 0x5f,
+ 0xed, 0xaa, 0xdd, 0xc3, 0xb2, 0x29, 0x39, 0x56,
+ 0xf1, 0xea, 0x6e, 0x71, 0x56, 0xe7, 0xeb
+};
+static const u8 enc_output035[] __initconst = {
+ 0x46, 0xa8, 0x0c, 0x41, 0x87, 0x02, 0x47, 0x20,
+ 0x08, 0x46, 0x27, 0x58, 0x00, 0x80, 0xdd, 0xe5,
+ 0xa3, 0xf4, 0xa1, 0x10, 0x93, 0xa7, 0x07, 0x6e,
+ 0xd6, 0xf3, 0xd3, 0x26, 0xbc, 0x7b, 0x70, 0x53,
+ 0x4d, 0x4a, 0xa2, 0x83, 0x5a, 0x52, 0xe7, 0x2d,
+ 0x14, 0xdf, 0x0e, 0x4f, 0x47, 0xf2, 0x5f
+};
+static const u8 enc_assoc035[] __initconst = {
+ 0x37, 0x46, 0x18, 0xa0, 0x6e, 0xa9, 0x8a, 0x48
+};
+static const u8 enc_nonce035[] __initconst = {
+ 0x47, 0x0a, 0x33, 0x9e, 0xcb, 0x32, 0x19, 0xb8,
+ 0xb8, 0x1a, 0x1f, 0x8b
+};
+static const u8 enc_key035[] __initconst = {
+ 0x7f, 0x5b, 0x74, 0xc0, 0x7e, 0xd1, 0xb4, 0x0f,
+ 0xd1, 0x43, 0x58, 0xfe, 0x2f, 0xf2, 0xa7, 0x40,
+ 0xc1, 0x16, 0xc7, 0x70, 0x65, 0x10, 0xe6, 0xa4,
+ 0x37, 0xf1, 0x9e, 0xa4, 0x99, 0x11, 0xce, 0xc4
+};
+
+/* wycheproof - misc */
+static const u8 enc_input036[] __initconst = {
+ 0xb9, 0xc5, 0x54, 0xcb, 0xc3, 0x6a, 0xc1, 0x8a,
+ 0xe8, 0x97, 0xdf, 0x7b, 0xee, 0xca, 0xc1, 0xdb,
+ 0xeb, 0x4e, 0xaf, 0xa1, 0x56, 0xbb, 0x60, 0xce,
+ 0x2e, 0x5d, 0x48, 0xf0, 0x57, 0x15, 0xe6, 0x78
+};
+static const u8 enc_output036[] __initconst = {
+ 0xea, 0x29, 0xaf, 0xa4, 0x9d, 0x36, 0xe8, 0x76,
+ 0x0f, 0x5f, 0xe1, 0x97, 0x23, 0xb9, 0x81, 0x1e,
+ 0xd5, 0xd5, 0x19, 0x93, 0x4a, 0x44, 0x0f, 0x50,
+ 0x81, 0xac, 0x43, 0x0b, 0x95, 0x3b, 0x0e, 0x21,
+ 0x22, 0x25, 0x41, 0xaf, 0x46, 0xb8, 0x65, 0x33,
+ 0xc6, 0xb6, 0x8d, 0x2f, 0xf1, 0x08, 0xa7, 0xea
+};
+static const u8 enc_assoc036[] __initconst = { };
+static const u8 enc_nonce036[] __initconst = {
+ 0x72, 0xcf, 0xd9, 0x0e, 0xf3, 0x02, 0x6c, 0xa2,
+ 0x2b, 0x7e, 0x6e, 0x6a
+};
+static const u8 enc_key036[] __initconst = {
+ 0xe1, 0x73, 0x1d, 0x58, 0x54, 0xe1, 0xb7, 0x0c,
+ 0xb3, 0xff, 0xe8, 0xb7, 0x86, 0xa2, 0xb3, 0xeb,
+ 0xf0, 0x99, 0x43, 0x70, 0x95, 0x47, 0x57, 0xb9,
+ 0xdc, 0x8c, 0x7b, 0xc5, 0x35, 0x46, 0x34, 0xa3
+};
+
+/* wycheproof - misc */
+static const u8 enc_input037[] __initconst = {
+ 0x6b, 0x26, 0x04, 0x99, 0x6c, 0xd3, 0x0c, 0x14,
+ 0xa1, 0x3a, 0x52, 0x57, 0xed, 0x6c, 0xff, 0xd3,
+ 0xbc, 0x5e, 0x29, 0xd6, 0xb9, 0x7e, 0xb1, 0x79,
+ 0x9e, 0xb3, 0x35, 0xe2, 0x81, 0xea, 0x45, 0x1e
+};
+static const u8 enc_output037[] __initconst = {
+ 0x6d, 0xad, 0x63, 0x78, 0x97, 0x54, 0x4d, 0x8b,
+ 0xf6, 0xbe, 0x95, 0x07, 0xed, 0x4d, 0x1b, 0xb2,
+ 0xe9, 0x54, 0xbc, 0x42, 0x7e, 0x5d, 0xe7, 0x29,
+ 0xda, 0xf5, 0x07, 0x62, 0x84, 0x6f, 0xf2, 0xf4,
+ 0x7b, 0x99, 0x7d, 0x93, 0xc9, 0x82, 0x18, 0x9d,
+ 0x70, 0x95, 0xdc, 0x79, 0x4c, 0x74, 0x62, 0x32
+};
+static const u8 enc_assoc037[] __initconst = {
+ 0x23, 0x33, 0xe5, 0xce, 0x0f, 0x93, 0xb0, 0x59
+};
+static const u8 enc_nonce037[] __initconst = {
+ 0x26, 0x28, 0x80, 0xd4, 0x75, 0xf3, 0xda, 0xc5,
+ 0x34, 0x0d, 0xd1, 0xb8
+};
+static const u8 enc_key037[] __initconst = {
+ 0x27, 0xd8, 0x60, 0x63, 0x1b, 0x04, 0x85, 0xa4,
+ 0x10, 0x70, 0x2f, 0xea, 0x61, 0xbc, 0x87, 0x3f,
+ 0x34, 0x42, 0x26, 0x0c, 0xad, 0xed, 0x4a, 0xbd,
+ 0xe2, 0x5b, 0x78, 0x6a, 0x2d, 0x97, 0xf1, 0x45
+};
+
+/* wycheproof - misc */
+static const u8 enc_input038[] __initconst = {
+ 0x97, 0x3d, 0x0c, 0x75, 0x38, 0x26, 0xba, 0xe4,
+ 0x66, 0xcf, 0x9a, 0xbb, 0x34, 0x93, 0x15, 0x2e,
+ 0x9d, 0xe7, 0x81, 0x9e, 0x2b, 0xd0, 0xc7, 0x11,
+ 0x71, 0x34, 0x6b, 0x4d, 0x2c, 0xeb, 0xf8, 0x04,
+ 0x1a, 0xa3, 0xce, 0xdc, 0x0d, 0xfd, 0x7b, 0x46,
+ 0x7e, 0x26, 0x22, 0x8b, 0xc8, 0x6c, 0x9a
+};
+static const u8 enc_output038[] __initconst = {
+ 0xfb, 0xa7, 0x8a, 0xe4, 0xf9, 0xd8, 0x08, 0xa6,
+ 0x2e, 0x3d, 0xa4, 0x0b, 0xe2, 0xcb, 0x77, 0x00,
+ 0xc3, 0x61, 0x3d, 0x9e, 0xb2, 0xc5, 0x29, 0xc6,
+ 0x52, 0xe7, 0x6a, 0x43, 0x2c, 0x65, 0x8d, 0x27,
+ 0x09, 0x5f, 0x0e, 0xb8, 0xf9, 0x40, 0xc3, 0x24,
+ 0x98, 0x1e, 0xa9, 0x35, 0xe5, 0x07, 0xf9, 0x8f,
+ 0x04, 0x69, 0x56, 0xdb, 0x3a, 0x51, 0x29, 0x08,
+ 0xbd, 0x7a, 0xfc, 0x8f, 0x2a, 0xb0, 0xa9
+};
+static const u8 enc_assoc038[] __initconst = { };
+static const u8 enc_nonce038[] __initconst = {
+ 0xe7, 0x4a, 0x51, 0x5e, 0x7e, 0x21, 0x02, 0xb9,
+ 0x0b, 0xef, 0x55, 0xd2
+};
+static const u8 enc_key038[] __initconst = {
+ 0xcf, 0x0d, 0x40, 0xa4, 0x64, 0x4e, 0x5f, 0x51,
+ 0x81, 0x51, 0x65, 0xd5, 0x30, 0x1b, 0x22, 0x63,
+ 0x1f, 0x45, 0x44, 0xc4, 0x9a, 0x18, 0x78, 0xe3,
+ 0xa0, 0xa5, 0xe8, 0xe1, 0xaa, 0xe0, 0xf2, 0x64
+};
+
+/* wycheproof - misc */
+static const u8 enc_input039[] __initconst = {
+ 0xa9, 0x89, 0x95, 0x50, 0x4d, 0xf1, 0x6f, 0x74,
+ 0x8b, 0xfb, 0x77, 0x85, 0xff, 0x91, 0xee, 0xb3,
+ 0xb6, 0x60, 0xea, 0x9e, 0xd3, 0x45, 0x0c, 0x3d,
+ 0x5e, 0x7b, 0x0e, 0x79, 0xef, 0x65, 0x36, 0x59,
+ 0xa9, 0x97, 0x8d, 0x75, 0x54, 0x2e, 0xf9, 0x1c,
+ 0x45, 0x67, 0x62, 0x21, 0x56, 0x40, 0xb9
+};
+static const u8 enc_output039[] __initconst = {
+ 0xa1, 0xff, 0xed, 0x80, 0x76, 0x18, 0x29, 0xec,
+ 0xce, 0x24, 0x2e, 0x0e, 0x88, 0xb1, 0x38, 0x04,
+ 0x90, 0x16, 0xbc, 0xa0, 0x18, 0xda, 0x2b, 0x6e,
+ 0x19, 0x98, 0x6b, 0x3e, 0x31, 0x8c, 0xae, 0x8d,
+ 0x80, 0x61, 0x98, 0xfb, 0x4c, 0x52, 0x7c, 0xc3,
+ 0x93, 0x50, 0xeb, 0xdd, 0xea, 0xc5, 0x73, 0xc4,
+ 0xcb, 0xf0, 0xbe, 0xfd, 0xa0, 0xb7, 0x02, 0x42,
+ 0xc6, 0x40, 0xd7, 0xcd, 0x02, 0xd7, 0xa3
+};
+static const u8 enc_assoc039[] __initconst = {
+ 0xb3, 0xe4, 0x06, 0x46, 0x83, 0xb0, 0x2d, 0x84
+};
+static const u8 enc_nonce039[] __initconst = {
+ 0xd4, 0xd8, 0x07, 0x34, 0x16, 0x83, 0x82, 0x5b,
+ 0x31, 0xcd, 0x4d, 0x95
+};
+static const u8 enc_key039[] __initconst = {
+ 0x6c, 0xbf, 0xd7, 0x1c, 0x64, 0x5d, 0x18, 0x4c,
+ 0xf5, 0xd2, 0x3c, 0x40, 0x2b, 0xdb, 0x0d, 0x25,
+ 0xec, 0x54, 0x89, 0x8c, 0x8a, 0x02, 0x73, 0xd4,
+ 0x2e, 0xb5, 0xbe, 0x10, 0x9f, 0xdc, 0xb2, 0xac
+};
+
+/* wycheproof - misc */
+static const u8 enc_input040[] __initconst = {
+ 0xd0, 0x96, 0x80, 0x31, 0x81, 0xbe, 0xef, 0x9e,
+ 0x00, 0x8f, 0xf8, 0x5d, 0x5d, 0xdc, 0x38, 0xdd,
+ 0xac, 0xf0, 0xf0, 0x9e, 0xe5, 0xf7, 0xe0, 0x7f,
+ 0x1e, 0x40, 0x79, 0xcb, 0x64, 0xd0, 0xdc, 0x8f,
+ 0x5e, 0x67, 0x11, 0xcd, 0x49, 0x21, 0xa7, 0x88,
+ 0x7d, 0xe7, 0x6e, 0x26, 0x78, 0xfd, 0xc6, 0x76,
+ 0x18, 0xf1, 0x18, 0x55, 0x86, 0xbf, 0xea, 0x9d,
+ 0x4c, 0x68, 0x5d, 0x50, 0xe4, 0xbb, 0x9a, 0x82
+};
+static const u8 enc_output040[] __initconst = {
+ 0x9a, 0x4e, 0xf2, 0x2b, 0x18, 0x16, 0x77, 0xb5,
+ 0x75, 0x5c, 0x08, 0xf7, 0x47, 0xc0, 0xf8, 0xd8,
+ 0xe8, 0xd4, 0xc1, 0x8a, 0x9c, 0xc2, 0x40, 0x5c,
+ 0x12, 0xbb, 0x51, 0xbb, 0x18, 0x72, 0xc8, 0xe8,
+ 0xb8, 0x77, 0x67, 0x8b, 0xec, 0x44, 0x2c, 0xfc,
+ 0xbb, 0x0f, 0xf4, 0x64, 0xa6, 0x4b, 0x74, 0x33,
+ 0x2c, 0xf0, 0x72, 0x89, 0x8c, 0x7e, 0x0e, 0xdd,
+ 0xf6, 0x23, 0x2e, 0xa6, 0xe2, 0x7e, 0xfe, 0x50,
+ 0x9f, 0xf3, 0x42, 0x7a, 0x0f, 0x32, 0xfa, 0x56,
+ 0x6d, 0x9c, 0xa0, 0xa7, 0x8a, 0xef, 0xc0, 0x13
+};
+static const u8 enc_assoc040[] __initconst = { };
+static const u8 enc_nonce040[] __initconst = {
+ 0xd6, 0x10, 0x40, 0xa3, 0x13, 0xed, 0x49, 0x28,
+ 0x23, 0xcc, 0x06, 0x5b
+};
+static const u8 enc_key040[] __initconst = {
+ 0x5b, 0x1d, 0x10, 0x35, 0xc0, 0xb1, 0x7e, 0xe0,
+ 0xb0, 0x44, 0x47, 0x67, 0xf8, 0x0a, 0x25, 0xb8,
+ 0xc1, 0xb7, 0x41, 0xf4, 0xb5, 0x0a, 0x4d, 0x30,
+ 0x52, 0x22, 0x6b, 0xaa, 0x1c, 0x6f, 0xb7, 0x01
+};
+
+/* wycheproof - misc */
+static const u8 enc_input041[] __initconst = {
+ 0x94, 0xee, 0x16, 0x6d, 0x6d, 0x6e, 0xcf, 0x88,
+ 0x32, 0x43, 0x71, 0x36, 0xb4, 0xae, 0x80, 0x5d,
+ 0x42, 0x88, 0x64, 0x35, 0x95, 0x86, 0xd9, 0x19,
+ 0x3a, 0x25, 0x01, 0x62, 0x93, 0xed, 0xba, 0x44,
+ 0x3c, 0x58, 0xe0, 0x7e, 0x7b, 0x71, 0x95, 0xec,
+ 0x5b, 0xd8, 0x45, 0x82, 0xa9, 0xd5, 0x6c, 0x8d,
+ 0x4a, 0x10, 0x8c, 0x7d, 0x7c, 0xe3, 0x4e, 0x6c,
+ 0x6f, 0x8e, 0xa1, 0xbe, 0xc0, 0x56, 0x73, 0x17
+};
+static const u8 enc_output041[] __initconst = {
+ 0x5f, 0xbb, 0xde, 0xcc, 0x34, 0xbe, 0x20, 0x16,
+ 0x14, 0xf6, 0x36, 0x03, 0x1e, 0xeb, 0x42, 0xf1,
+ 0xca, 0xce, 0x3c, 0x79, 0xa1, 0x2c, 0xff, 0xd8,
+ 0x71, 0xee, 0x8e, 0x73, 0x82, 0x0c, 0x82, 0x97,
+ 0x49, 0xf1, 0xab, 0xb4, 0x29, 0x43, 0x67, 0x84,
+ 0x9f, 0xb6, 0xc2, 0xaa, 0x56, 0xbd, 0xa8, 0xa3,
+ 0x07, 0x8f, 0x72, 0x3d, 0x7c, 0x1c, 0x85, 0x20,
+ 0x24, 0xb0, 0x17, 0xb5, 0x89, 0x73, 0xfb, 0x1e,
+ 0x09, 0x26, 0x3d, 0xa7, 0xb4, 0xcb, 0x92, 0x14,
+ 0x52, 0xf9, 0x7d, 0xca, 0x40, 0xf5, 0x80, 0xec
+};
+static const u8 enc_assoc041[] __initconst = {
+ 0x71, 0x93, 0xf6, 0x23, 0x66, 0x33, 0x21, 0xa2
+};
+static const u8 enc_nonce041[] __initconst = {
+ 0xd3, 0x1c, 0x21, 0xab, 0xa1, 0x75, 0xb7, 0x0d,
+ 0xe4, 0xeb, 0xb1, 0x9c
+};
+static const u8 enc_key041[] __initconst = {
+ 0x97, 0xd6, 0x35, 0xc4, 0xf4, 0x75, 0x74, 0xd9,
+ 0x99, 0x8a, 0x90, 0x87, 0x5d, 0xa1, 0xd3, 0xa2,
+ 0x84, 0xb7, 0x55, 0xb2, 0xd3, 0x92, 0x97, 0xa5,
+ 0x72, 0x52, 0x35, 0x19, 0x0e, 0x10, 0xa9, 0x7e
+};
+
+/* wycheproof - misc */
+static const u8 enc_input042[] __initconst = {
+ 0xb4, 0x29, 0xeb, 0x80, 0xfb, 0x8f, 0xe8, 0xba,
+ 0xed, 0xa0, 0xc8, 0x5b, 0x9c, 0x33, 0x34, 0x58,
+ 0xe7, 0xc2, 0x99, 0x2e, 0x55, 0x84, 0x75, 0x06,
+ 0x9d, 0x12, 0xd4, 0x5c, 0x22, 0x21, 0x75, 0x64,
+ 0x12, 0x15, 0x88, 0x03, 0x22, 0x97, 0xef, 0xf5,
+ 0x67, 0x83, 0x74, 0x2a, 0x5f, 0xc2, 0x2d, 0x74,
+ 0x10, 0xff, 0xb2, 0x9d, 0x66, 0x09, 0x86, 0x61,
+ 0xd7, 0x6f, 0x12, 0x6c, 0x3c, 0x27, 0x68, 0x9e,
+ 0x43, 0xb3, 0x72, 0x67, 0xca, 0xc5, 0xa3, 0xa6,
+ 0xd3, 0xab, 0x49, 0xe3, 0x91, 0xda, 0x29, 0xcd,
+ 0x30, 0x54, 0xa5, 0x69, 0x2e, 0x28, 0x07, 0xe4,
+ 0xc3, 0xea, 0x46, 0xc8, 0x76, 0x1d, 0x50, 0xf5,
+ 0x92
+};
+static const u8 enc_output042[] __initconst = {
+ 0xd0, 0x10, 0x2f, 0x6c, 0x25, 0x8b, 0xf4, 0x97,
+ 0x42, 0xce, 0xc3, 0x4c, 0xf2, 0xd0, 0xfe, 0xdf,
+ 0x23, 0xd1, 0x05, 0xfb, 0x4c, 0x84, 0xcf, 0x98,
+ 0x51, 0x5e, 0x1b, 0xc9, 0xa6, 0x4f, 0x8a, 0xd5,
+ 0xbe, 0x8f, 0x07, 0x21, 0xbd, 0xe5, 0x06, 0x45,
+ 0xd0, 0x00, 0x83, 0xc3, 0xa2, 0x63, 0xa3, 0x10,
+ 0x53, 0xb7, 0x60, 0x24, 0x5f, 0x52, 0xae, 0x28,
+ 0x66, 0xa5, 0xec, 0x83, 0xb1, 0x9f, 0x61, 0xbe,
+ 0x1d, 0x30, 0xd5, 0xc5, 0xd9, 0xfe, 0xcc, 0x4c,
+ 0xbb, 0xe0, 0x8f, 0xd3, 0x85, 0x81, 0x3a, 0x2a,
+ 0xa3, 0x9a, 0x00, 0xff, 0x9c, 0x10, 0xf7, 0xf2,
+ 0x37, 0x02, 0xad, 0xd1, 0xe4, 0xb2, 0xff, 0xa3,
+ 0x1c, 0x41, 0x86, 0x5f, 0xc7, 0x1d, 0xe1, 0x2b,
+ 0x19, 0x61, 0x21, 0x27, 0xce, 0x49, 0x99, 0x3b,
+ 0xb0
+};
+static const u8 enc_assoc042[] __initconst = { };
+static const u8 enc_nonce042[] __initconst = {
+ 0x17, 0xc8, 0x6a, 0x8a, 0xbb, 0xb7, 0xe0, 0x03,
+ 0xac, 0xde, 0x27, 0x99
+};
+static const u8 enc_key042[] __initconst = {
+ 0xfe, 0x6e, 0x55, 0xbd, 0xae, 0xd1, 0xf7, 0x28,
+ 0x4c, 0xa5, 0xfc, 0x0f, 0x8c, 0x5f, 0x2b, 0x8d,
+ 0xf5, 0x6d, 0xc0, 0xf4, 0x9e, 0x8c, 0xa6, 0x6a,
+ 0x41, 0x99, 0x5e, 0x78, 0x33, 0x51, 0xf9, 0x01
+};
+
+/* wycheproof - misc */
+static const u8 enc_input043[] __initconst = {
+ 0xce, 0xb5, 0x34, 0xce, 0x50, 0xdc, 0x23, 0xff,
+ 0x63, 0x8a, 0xce, 0x3e, 0xf6, 0x3a, 0xb2, 0xcc,
+ 0x29, 0x73, 0xee, 0xad, 0xa8, 0x07, 0x85, 0xfc,
+ 0x16, 0x5d, 0x06, 0xc2, 0xf5, 0x10, 0x0f, 0xf5,
+ 0xe8, 0xab, 0x28, 0x82, 0xc4, 0x75, 0xaf, 0xcd,
+ 0x05, 0xcc, 0xd4, 0x9f, 0x2e, 0x7d, 0x8f, 0x55,
+ 0xef, 0x3a, 0x72, 0xe3, 0xdc, 0x51, 0xd6, 0x85,
+ 0x2b, 0x8e, 0x6b, 0x9e, 0x7a, 0xec, 0xe5, 0x7b,
+ 0xe6, 0x55, 0x6b, 0x0b, 0x6d, 0x94, 0x13, 0xe3,
+ 0x3f, 0xc5, 0xfc, 0x24, 0xa9, 0xa2, 0x05, 0xad,
+ 0x59, 0x57, 0x4b, 0xb3, 0x9d, 0x94, 0x4a, 0x92,
+ 0xdc, 0x47, 0x97, 0x0d, 0x84, 0xa6, 0xad, 0x31,
+ 0x76
+};
+static const u8 enc_output043[] __initconst = {
+ 0x75, 0x45, 0x39, 0x1b, 0x51, 0xde, 0x01, 0xd5,
+ 0xc5, 0x3d, 0xfa, 0xca, 0x77, 0x79, 0x09, 0x06,
+ 0x3e, 0x58, 0xed, 0xee, 0x4b, 0xb1, 0x22, 0x7e,
+ 0x71, 0x10, 0xac, 0x4d, 0x26, 0x20, 0xc2, 0xae,
+ 0xc2, 0xf8, 0x48, 0xf5, 0x6d, 0xee, 0xb0, 0x37,
+ 0xa8, 0xdc, 0xed, 0x75, 0xaf, 0xa8, 0xa6, 0xc8,
+ 0x90, 0xe2, 0xde, 0xe4, 0x2f, 0x95, 0x0b, 0xb3,
+ 0x3d, 0x9e, 0x24, 0x24, 0xd0, 0x8a, 0x50, 0x5d,
+ 0x89, 0x95, 0x63, 0x97, 0x3e, 0xd3, 0x88, 0x70,
+ 0xf3, 0xde, 0x6e, 0xe2, 0xad, 0xc7, 0xfe, 0x07,
+ 0x2c, 0x36, 0x6c, 0x14, 0xe2, 0xcf, 0x7c, 0xa6,
+ 0x2f, 0xb3, 0xd3, 0x6b, 0xee, 0x11, 0x68, 0x54,
+ 0x61, 0xb7, 0x0d, 0x44, 0xef, 0x8c, 0x66, 0xc5,
+ 0xc7, 0xbb, 0xf1, 0x0d, 0xca, 0xdd, 0x7f, 0xac,
+ 0xf6
+};
+static const u8 enc_assoc043[] __initconst = {
+ 0xa1, 0x1c, 0x40, 0xb6, 0x03, 0x76, 0x73, 0x30
+};
+static const u8 enc_nonce043[] __initconst = {
+ 0x46, 0x36, 0x2f, 0x45, 0xd6, 0x37, 0x9e, 0x63,
+ 0xe5, 0x22, 0x94, 0x60
+};
+static const u8 enc_key043[] __initconst = {
+ 0xaa, 0xbc, 0x06, 0x34, 0x74, 0xe6, 0x5c, 0x4c,
+ 0x3e, 0x9b, 0xdc, 0x48, 0x0d, 0xea, 0x97, 0xb4,
+ 0x51, 0x10, 0xc8, 0x61, 0x88, 0x46, 0xff, 0x6b,
+ 0x15, 0xbd, 0xd2, 0xa4, 0xa5, 0x68, 0x2c, 0x4e
+};
+
+/* wycheproof - misc */
+static const u8 enc_input044[] __initconst = {
+ 0xe5, 0xcc, 0xaa, 0x44, 0x1b, 0xc8, 0x14, 0x68,
+ 0x8f, 0x8f, 0x6e, 0x8f, 0x28, 0xb5, 0x00, 0xb2
+};
+static const u8 enc_output044[] __initconst = {
+ 0x7e, 0x72, 0xf5, 0xa1, 0x85, 0xaf, 0x16, 0xa6,
+ 0x11, 0x92, 0x1b, 0x43, 0x8f, 0x74, 0x9f, 0x0b,
+ 0x12, 0x42, 0xc6, 0x70, 0x73, 0x23, 0x34, 0x02,
+ 0x9a, 0xdf, 0xe1, 0xc5, 0x00, 0x16, 0x51, 0xe4
+};
+static const u8 enc_assoc044[] __initconst = {
+ 0x02
+};
+static const u8 enc_nonce044[] __initconst = {
+ 0x87, 0x34, 0x5f, 0x10, 0x55, 0xfd, 0x9e, 0x21,
+ 0x02, 0xd5, 0x06, 0x56
+};
+static const u8 enc_key044[] __initconst = {
+ 0x7d, 0x00, 0xb4, 0x80, 0x95, 0xad, 0xfa, 0x32,
+ 0x72, 0x05, 0x06, 0x07, 0xb2, 0x64, 0x18, 0x50,
+ 0x02, 0xba, 0x99, 0x95, 0x7c, 0x49, 0x8b, 0xe0,
+ 0x22, 0x77, 0x0f, 0x2c, 0xe2, 0xf3, 0x14, 0x3c
+};
+
+/* wycheproof - misc */
+static const u8 enc_input045[] __initconst = {
+ 0x02, 0xcd, 0xe1, 0x68, 0xfb, 0xa3, 0xf5, 0x44,
+ 0xbb, 0xd0, 0x33, 0x2f, 0x7a, 0xde, 0xad, 0xa8
+};
+static const u8 enc_output045[] __initconst = {
+ 0x85, 0xf2, 0x9a, 0x71, 0x95, 0x57, 0xcd, 0xd1,
+ 0x4d, 0x1f, 0x8f, 0xff, 0xab, 0x6d, 0x9e, 0x60,
+ 0x73, 0x2c, 0xa3, 0x2b, 0xec, 0xd5, 0x15, 0xa1,
+ 0xed, 0x35, 0x3f, 0x54, 0x2e, 0x99, 0x98, 0x58
+};
+static const u8 enc_assoc045[] __initconst = {
+ 0xb6, 0x48
+};
+static const u8 enc_nonce045[] __initconst = {
+ 0x87, 0xa3, 0x16, 0x3e, 0xc0, 0x59, 0x8a, 0xd9,
+ 0x5b, 0x3a, 0xa7, 0x13
+};
+static const u8 enc_key045[] __initconst = {
+ 0x64, 0x32, 0x71, 0x7f, 0x1d, 0xb8, 0x5e, 0x41,
+ 0xac, 0x78, 0x36, 0xbc, 0xe2, 0x51, 0x85, 0xa0,
+ 0x80, 0xd5, 0x76, 0x2b, 0x9e, 0x2b, 0x18, 0x44,
+ 0x4b, 0x6e, 0xc7, 0x2c, 0x3b, 0xd8, 0xe4, 0xdc
+};
+
+/* wycheproof - misc */
+static const u8 enc_input046[] __initconst = {
+ 0x16, 0xdd, 0xd2, 0x3f, 0xf5, 0x3f, 0x3d, 0x23,
+ 0xc0, 0x63, 0x34, 0x48, 0x70, 0x40, 0xeb, 0x47
+};
+static const u8 enc_output046[] __initconst = {
+ 0xc1, 0xb2, 0x95, 0x93, 0x6d, 0x56, 0xfa, 0xda,
+ 0xc0, 0x3e, 0x5f, 0x74, 0x2b, 0xff, 0x73, 0xa1,
+ 0x39, 0xc4, 0x57, 0xdb, 0xab, 0x66, 0x38, 0x2b,
+ 0xab, 0xb3, 0xb5, 0x58, 0x00, 0xcd, 0xa5, 0xb8
+};
+static const u8 enc_assoc046[] __initconst = {
+ 0xbd, 0x4c, 0xd0, 0x2f, 0xc7, 0x50, 0x2b, 0xbd,
+ 0xbd, 0xf6, 0xc9, 0xa3, 0xcb, 0xe8, 0xf0
+};
+static const u8 enc_nonce046[] __initconst = {
+ 0x6f, 0x57, 0x3a, 0xa8, 0x6b, 0xaa, 0x49, 0x2b,
+ 0xa4, 0x65, 0x96, 0xdf
+};
+static const u8 enc_key046[] __initconst = {
+ 0x8e, 0x34, 0xcf, 0x73, 0xd2, 0x45, 0xa1, 0x08,
+ 0x2a, 0x92, 0x0b, 0x86, 0x36, 0x4e, 0xb8, 0x96,
+ 0xc4, 0x94, 0x64, 0x67, 0xbc, 0xb3, 0xd5, 0x89,
+ 0x29, 0xfc, 0xb3, 0x66, 0x90, 0xe6, 0x39, 0x4f
+};
+
+/* wycheproof - misc */
+static const u8 enc_input047[] __initconst = {
+ 0x62, 0x3b, 0x78, 0x50, 0xc3, 0x21, 0xe2, 0xcf,
+ 0x0c, 0x6f, 0xbc, 0xc8, 0xdf, 0xd1, 0xaf, 0xf2
+};
+static const u8 enc_output047[] __initconst = {
+ 0xc8, 0x4c, 0x9b, 0xb7, 0xc6, 0x1c, 0x1b, 0xcb,
+ 0x17, 0x77, 0x2a, 0x1c, 0x50, 0x0c, 0x50, 0x95,
+ 0xdb, 0xad, 0xf7, 0xa5, 0x13, 0x8c, 0xa0, 0x34,
+ 0x59, 0xa2, 0xcd, 0x65, 0x83, 0x1e, 0x09, 0x2f
+};
+static const u8 enc_assoc047[] __initconst = {
+ 0x89, 0xcc, 0xe9, 0xfb, 0x47, 0x44, 0x1d, 0x07,
+ 0xe0, 0x24, 0x5a, 0x66, 0xfe, 0x8b, 0x77, 0x8b
+};
+static const u8 enc_nonce047[] __initconst = {
+ 0x1a, 0x65, 0x18, 0xf0, 0x2e, 0xde, 0x1d, 0xa6,
+ 0x80, 0x92, 0x66, 0xd9
+};
+static const u8 enc_key047[] __initconst = {
+ 0xcb, 0x55, 0x75, 0xf5, 0xc7, 0xc4, 0x5c, 0x91,
+ 0xcf, 0x32, 0x0b, 0x13, 0x9f, 0xb5, 0x94, 0x23,
+ 0x75, 0x60, 0xd0, 0xa3, 0xe6, 0xf8, 0x65, 0xa6,
+ 0x7d, 0x4f, 0x63, 0x3f, 0x2c, 0x08, 0xf0, 0x16
+};
+
+/* wycheproof - misc */
+static const u8 enc_input048[] __initconst = {
+ 0x87, 0xb3, 0xa4, 0xd7, 0xb2, 0x6d, 0x8d, 0x32,
+ 0x03, 0xa0, 0xde, 0x1d, 0x64, 0xef, 0x82, 0xe3
+};
+static const u8 enc_output048[] __initconst = {
+ 0x94, 0xbc, 0x80, 0x62, 0x1e, 0xd1, 0xe7, 0x1b,
+ 0x1f, 0xd2, 0xb5, 0xc3, 0xa1, 0x5e, 0x35, 0x68,
+ 0x33, 0x35, 0x11, 0x86, 0x17, 0x96, 0x97, 0x84,
+ 0x01, 0x59, 0x8b, 0x96, 0x37, 0x22, 0xf5, 0xb3
+};
+static const u8 enc_assoc048[] __initconst = {
+ 0xd1, 0x9f, 0x2d, 0x98, 0x90, 0x95, 0xf7, 0xab,
+ 0x03, 0xa5, 0xfd, 0xe8, 0x44, 0x16, 0xe0, 0x0c,
+ 0x0e
+};
+static const u8 enc_nonce048[] __initconst = {
+ 0x56, 0x4d, 0xee, 0x49, 0xab, 0x00, 0xd2, 0x40,
+ 0xfc, 0x10, 0x68, 0xc3
+};
+static const u8 enc_key048[] __initconst = {
+ 0xa5, 0x56, 0x9e, 0x72, 0x9a, 0x69, 0xb2, 0x4b,
+ 0xa6, 0xe0, 0xff, 0x15, 0xc4, 0x62, 0x78, 0x97,
+ 0x43, 0x68, 0x24, 0xc9, 0x41, 0xe9, 0xd0, 0x0b,
+ 0x2e, 0x93, 0xfd, 0xdc, 0x4b, 0xa7, 0x76, 0x57
+};
+
+/* wycheproof - misc */
+static const u8 enc_input049[] __initconst = {
+ 0xe6, 0x01, 0xb3, 0x85, 0x57, 0x79, 0x7d, 0xa2,
+ 0xf8, 0xa4, 0x10, 0x6a, 0x08, 0x9d, 0x1d, 0xa6
+};
+static const u8 enc_output049[] __initconst = {
+ 0x29, 0x9b, 0x5d, 0x3f, 0x3d, 0x03, 0xc0, 0x87,
+ 0x20, 0x9a, 0x16, 0xe2, 0x85, 0x14, 0x31, 0x11,
+ 0x4b, 0x45, 0x4e, 0xd1, 0x98, 0xde, 0x11, 0x7e,
+ 0x83, 0xec, 0x49, 0xfa, 0x8d, 0x85, 0x08, 0xd6
+};
+static const u8 enc_assoc049[] __initconst = {
+ 0x5e, 0x64, 0x70, 0xfa, 0xcd, 0x99, 0xc1, 0xd8,
+ 0x1e, 0x37, 0xcd, 0x44, 0x01, 0x5f, 0xe1, 0x94,
+ 0x80, 0xa2, 0xa4, 0xd3, 0x35, 0x2a, 0x4f, 0xf5,
+ 0x60, 0xc0, 0x64, 0x0f, 0xdb, 0xda
+};
+static const u8 enc_nonce049[] __initconst = {
+ 0xdf, 0x87, 0x13, 0xe8, 0x7e, 0xc3, 0xdb, 0xcf,
+ 0xad, 0x14, 0xd5, 0x3e
+};
+static const u8 enc_key049[] __initconst = {
+ 0x56, 0x20, 0x74, 0x65, 0xb4, 0xe4, 0x8e, 0x6d,
+ 0x04, 0x63, 0x0f, 0x4a, 0x42, 0xf3, 0x5c, 0xfc,
+ 0x16, 0x3a, 0xb2, 0x89, 0xc2, 0x2a, 0x2b, 0x47,
+ 0x84, 0xf6, 0xf9, 0x29, 0x03, 0x30, 0xbe, 0xe0
+};
+
+/* wycheproof - misc */
+static const u8 enc_input050[] __initconst = {
+ 0xdc, 0x9e, 0x9e, 0xaf, 0x11, 0xe3, 0x14, 0x18,
+ 0x2d, 0xf6, 0xa4, 0xeb, 0xa1, 0x7a, 0xec, 0x9c
+};
+static const u8 enc_output050[] __initconst = {
+ 0x60, 0x5b, 0xbf, 0x90, 0xae, 0xb9, 0x74, 0xf6,
+ 0x60, 0x2b, 0xc7, 0x78, 0x05, 0x6f, 0x0d, 0xca,
+ 0x38, 0xea, 0x23, 0xd9, 0x90, 0x54, 0xb4, 0x6b,
+ 0x42, 0xff, 0xe0, 0x04, 0x12, 0x9d, 0x22, 0x04
+};
+static const u8 enc_assoc050[] __initconst = {
+ 0xba, 0x44, 0x6f, 0x6f, 0x9a, 0x0c, 0xed, 0x22,
+ 0x45, 0x0f, 0xeb, 0x10, 0x73, 0x7d, 0x90, 0x07,
+ 0xfd, 0x69, 0xab, 0xc1, 0x9b, 0x1d, 0x4d, 0x90,
+ 0x49, 0xa5, 0x55, 0x1e, 0x86, 0xec, 0x2b, 0x37
+};
+static const u8 enc_nonce050[] __initconst = {
+ 0x8d, 0xf4, 0xb1, 0x5a, 0x88, 0x8c, 0x33, 0x28,
+ 0x6a, 0x7b, 0x76, 0x51
+};
+static const u8 enc_key050[] __initconst = {
+ 0x39, 0x37, 0x98, 0x6a, 0xf8, 0x6d, 0xaf, 0xc1,
+ 0xba, 0x0c, 0x46, 0x72, 0xd8, 0xab, 0xc4, 0x6c,
+ 0x20, 0x70, 0x62, 0x68, 0x2d, 0x9c, 0x26, 0x4a,
+ 0xb0, 0x6d, 0x6c, 0x58, 0x07, 0x20, 0x51, 0x30
+};
+
+/* wycheproof - misc */
+static const u8 enc_input051[] __initconst = {
+ 0x81, 0xce, 0x84, 0xed, 0xe9, 0xb3, 0x58, 0x59,
+ 0xcc, 0x8c, 0x49, 0xa8, 0xf6, 0xbe, 0x7d, 0xc6
+};
+static const u8 enc_output051[] __initconst = {
+ 0x7b, 0x7c, 0xe0, 0xd8, 0x24, 0x80, 0x9a, 0x70,
+ 0xde, 0x32, 0x56, 0x2c, 0xcf, 0x2c, 0x2b, 0xbd,
+ 0x15, 0xd4, 0x4a, 0x00, 0xce, 0x0d, 0x19, 0xb4,
+ 0x23, 0x1f, 0x92, 0x1e, 0x22, 0xbc, 0x0a, 0x43
+};
+static const u8 enc_assoc051[] __initconst = {
+ 0xd4, 0x1a, 0x82, 0x8d, 0x5e, 0x71, 0x82, 0x92,
+ 0x47, 0x02, 0x19, 0x05, 0x40, 0x2e, 0xa2, 0x57,
+ 0xdc, 0xcb, 0xc3, 0xb8, 0x0f, 0xcd, 0x56, 0x75,
+ 0x05, 0x6b, 0x68, 0xbb, 0x59, 0xe6, 0x2e, 0x88,
+ 0x73
+};
+static const u8 enc_nonce051[] __initconst = {
+ 0xbe, 0x40, 0xe5, 0xf1, 0xa1, 0x18, 0x17, 0xa0,
+ 0xa8, 0xfa, 0x89, 0x49
+};
+static const u8 enc_key051[] __initconst = {
+ 0x36, 0x37, 0x2a, 0xbc, 0xdb, 0x78, 0xe0, 0x27,
+ 0x96, 0x46, 0xac, 0x3d, 0x17, 0x6b, 0x96, 0x74,
+ 0xe9, 0x15, 0x4e, 0xec, 0xf0, 0xd5, 0x46, 0x9c,
+ 0x65, 0x1e, 0xc7, 0xe1, 0x6b, 0x4c, 0x11, 0x99
+};
+
+/* wycheproof - misc */
+static const u8 enc_input052[] __initconst = {
+ 0xa6, 0x67, 0x47, 0xc8, 0x9e, 0x85, 0x7a, 0xf3,
+ 0xa1, 0x8e, 0x2c, 0x79, 0x50, 0x00, 0x87, 0xed
+};
+static const u8 enc_output052[] __initconst = {
+ 0xca, 0x82, 0xbf, 0xf3, 0xe2, 0xf3, 0x10, 0xcc,
+ 0xc9, 0x76, 0x67, 0x2c, 0x44, 0x15, 0xe6, 0x9b,
+ 0x57, 0x63, 0x8c, 0x62, 0xa5, 0xd8, 0x5d, 0xed,
+ 0x77, 0x4f, 0x91, 0x3c, 0x81, 0x3e, 0xa0, 0x32
+};
+static const u8 enc_assoc052[] __initconst = {
+ 0x3f, 0x2d, 0xd4, 0x9b, 0xbf, 0x09, 0xd6, 0x9a,
+ 0x78, 0xa3, 0xd8, 0x0e, 0xa2, 0x56, 0x66, 0x14,
+ 0xfc, 0x37, 0x94, 0x74, 0x19, 0x6c, 0x1a, 0xae,
+ 0x84, 0x58, 0x3d, 0xa7, 0x3d, 0x7f, 0xf8, 0x5c,
+ 0x6f, 0x42, 0xca, 0x42, 0x05, 0x6a, 0x97, 0x92,
+ 0xcc, 0x1b, 0x9f, 0xb3, 0xc7, 0xd2, 0x61
+};
+static const u8 enc_nonce052[] __initconst = {
+ 0x84, 0xc8, 0x7d, 0xae, 0x4e, 0xee, 0x27, 0x73,
+ 0x0e, 0xc3, 0x5d, 0x12
+};
+static const u8 enc_key052[] __initconst = {
+ 0x9f, 0x14, 0x79, 0xed, 0x09, 0x7d, 0x7f, 0xe5,
+ 0x29, 0xc1, 0x1f, 0x2f, 0x5a, 0xdd, 0x9a, 0xaf,
+ 0xf4, 0xa1, 0xca, 0x0b, 0x68, 0x99, 0x7a, 0x2c,
+ 0xb7, 0xf7, 0x97, 0x49, 0xbd, 0x90, 0xaa, 0xf4
+};
+
+/* wycheproof - misc */
+static const u8 enc_input053[] __initconst = {
+ 0x25, 0x6d, 0x40, 0x88, 0x80, 0x94, 0x17, 0x83,
+ 0x55, 0xd3, 0x04, 0x84, 0x64, 0x43, 0xfe, 0xe8,
+ 0xdf, 0x99, 0x47, 0x03, 0x03, 0xfb, 0x3b, 0x7b,
+ 0x80, 0xe0, 0x30, 0xbe, 0xeb, 0xd3, 0x29, 0xbe
+};
+static const u8 enc_output053[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xe6, 0xd3, 0xd7, 0x32, 0x4a, 0x1c, 0xbb, 0xa7,
+ 0x77, 0xbb, 0xb0, 0xec, 0xdd, 0xa3, 0x78, 0x07
+};
+static const u8 enc_assoc053[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+};
+static const u8 enc_nonce053[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x01, 0xee, 0x32, 0x00
+};
+static const u8 enc_key053[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - misc */
+static const u8 enc_input054[] __initconst = {
+ 0x25, 0x6d, 0x40, 0x88, 0x80, 0x94, 0x17, 0x83,
+ 0x55, 0xd3, 0x04, 0x84, 0x64, 0x43, 0xfe, 0xe8,
+ 0xdf, 0x99, 0x47, 0x03, 0x03, 0xfb, 0x3b, 0x7b,
+ 0x80, 0xe0, 0x30, 0xbe, 0xeb, 0xd3, 0x29, 0xbe,
+ 0xe3, 0xbc, 0xdb, 0x5b, 0x1e, 0xde, 0xfc, 0xfe,
+ 0x8b, 0xcd, 0xa1, 0xb6, 0xa1, 0x5c, 0x8c, 0x2b,
+ 0x08, 0x69, 0xff, 0xd2, 0xec, 0x5e, 0x26, 0xe5,
+ 0x53, 0xb7, 0xb2, 0x27, 0xfe, 0x87, 0xfd, 0xbd
+};
+static const u8 enc_output054[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x06, 0x2d, 0xe6, 0x79, 0x5f, 0x27, 0x4f, 0xd2,
+ 0xa3, 0x05, 0xd7, 0x69, 0x80, 0xbc, 0x9c, 0xce
+};
+static const u8 enc_assoc054[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+};
+static const u8 enc_nonce054[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x01, 0xee, 0x32, 0x00
+};
+static const u8 enc_key054[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - misc */
+static const u8 enc_input055[] __initconst = {
+ 0x25, 0x6d, 0x40, 0x88, 0x80, 0x94, 0x17, 0x83,
+ 0x55, 0xd3, 0x04, 0x84, 0x64, 0x43, 0xfe, 0xe8,
+ 0xdf, 0x99, 0x47, 0x03, 0x03, 0xfb, 0x3b, 0x7b,
+ 0x80, 0xe0, 0x30, 0xbe, 0xeb, 0xd3, 0x29, 0xbe,
+ 0xe3, 0xbc, 0xdb, 0x5b, 0x1e, 0xde, 0xfc, 0xfe,
+ 0x8b, 0xcd, 0xa1, 0xb6, 0xa1, 0x5c, 0x8c, 0x2b,
+ 0x08, 0x69, 0xff, 0xd2, 0xec, 0x5e, 0x26, 0xe5,
+ 0x53, 0xb7, 0xb2, 0x27, 0xfe, 0x87, 0xfd, 0xbd,
+ 0x7a, 0xda, 0x44, 0x42, 0x42, 0x69, 0xbf, 0xfa,
+ 0x55, 0x27, 0xf2, 0x70, 0xac, 0xf6, 0x85, 0x02,
+ 0xb7, 0x4c, 0x5a, 0xe2, 0xe6, 0x0c, 0x05, 0x80,
+ 0x98, 0x1a, 0x49, 0x38, 0x45, 0x93, 0x92, 0xc4,
+ 0x9b, 0xb2, 0xf2, 0x84, 0xb6, 0x46, 0xef, 0xc7,
+ 0xf3, 0xf0, 0xb1, 0x36, 0x1d, 0xc3, 0x48, 0xed,
+ 0x77, 0xd3, 0x0b, 0xc5, 0x76, 0x92, 0xed, 0x38,
+ 0xfb, 0xac, 0x01, 0x88, 0x38, 0x04, 0x88, 0xc7
+};
+static const u8 enc_output055[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xd8, 0xb4, 0x79, 0x02, 0xba, 0xae, 0xaf, 0xb3,
+ 0x42, 0x03, 0x05, 0x15, 0x29, 0xaf, 0x28, 0x2e
+};
+static const u8 enc_assoc055[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+};
+static const u8 enc_nonce055[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x01, 0xee, 0x32, 0x00
+};
+static const u8 enc_key055[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - misc */
+static const u8 enc_input056[] __initconst = {
+ 0xda, 0x92, 0xbf, 0x77, 0x7f, 0x6b, 0xe8, 0x7c,
+ 0xaa, 0x2c, 0xfb, 0x7b, 0x9b, 0xbc, 0x01, 0x17,
+ 0x20, 0x66, 0xb8, 0xfc, 0xfc, 0x04, 0xc4, 0x84,
+ 0x7f, 0x1f, 0xcf, 0x41, 0x14, 0x2c, 0xd6, 0x41
+};
+static const u8 enc_output056[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xb3, 0x89, 0x1c, 0x84, 0x9c, 0xb5, 0x2c, 0x27,
+ 0x74, 0x7e, 0xdf, 0xcf, 0x31, 0x21, 0x3b, 0xb6
+};
+static const u8 enc_assoc056[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+};
+static const u8 enc_nonce056[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x01, 0xee, 0x32, 0x00
+};
+static const u8 enc_key056[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - misc */
+static const u8 enc_input057[] __initconst = {
+ 0xda, 0x92, 0xbf, 0x77, 0x7f, 0x6b, 0xe8, 0x7c,
+ 0xaa, 0x2c, 0xfb, 0x7b, 0x9b, 0xbc, 0x01, 0x17,
+ 0x20, 0x66, 0xb8, 0xfc, 0xfc, 0x04, 0xc4, 0x84,
+ 0x7f, 0x1f, 0xcf, 0x41, 0x14, 0x2c, 0xd6, 0x41,
+ 0x1c, 0x43, 0x24, 0xa4, 0xe1, 0x21, 0x03, 0x01,
+ 0x74, 0x32, 0x5e, 0x49, 0x5e, 0xa3, 0x73, 0xd4,
+ 0xf7, 0x96, 0x00, 0x2d, 0x13, 0xa1, 0xd9, 0x1a,
+ 0xac, 0x48, 0x4d, 0xd8, 0x01, 0x78, 0x02, 0x42
+};
+static const u8 enc_output057[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xf0, 0xc1, 0x2d, 0x26, 0xef, 0x03, 0x02, 0x9b,
+ 0x62, 0xc0, 0x08, 0xda, 0x27, 0xc5, 0xdc, 0x68
+};
+static const u8 enc_assoc057[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+};
+static const u8 enc_nonce057[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x01, 0xee, 0x32, 0x00
+};
+static const u8 enc_key057[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - misc */
+static const u8 enc_input058[] __initconst = {
+ 0xda, 0x92, 0xbf, 0x77, 0x7f, 0x6b, 0xe8, 0x7c,
+ 0xaa, 0x2c, 0xfb, 0x7b, 0x9b, 0xbc, 0x01, 0x17,
+ 0x20, 0x66, 0xb8, 0xfc, 0xfc, 0x04, 0xc4, 0x84,
+ 0x7f, 0x1f, 0xcf, 0x41, 0x14, 0x2c, 0xd6, 0x41,
+ 0x1c, 0x43, 0x24, 0xa4, 0xe1, 0x21, 0x03, 0x01,
+ 0x74, 0x32, 0x5e, 0x49, 0x5e, 0xa3, 0x73, 0xd4,
+ 0xf7, 0x96, 0x00, 0x2d, 0x13, 0xa1, 0xd9, 0x1a,
+ 0xac, 0x48, 0x4d, 0xd8, 0x01, 0x78, 0x02, 0x42,
+ 0x85, 0x25, 0xbb, 0xbd, 0xbd, 0x96, 0x40, 0x05,
+ 0xaa, 0xd8, 0x0d, 0x8f, 0x53, 0x09, 0x7a, 0xfd,
+ 0x48, 0xb3, 0xa5, 0x1d, 0x19, 0xf3, 0xfa, 0x7f,
+ 0x67, 0xe5, 0xb6, 0xc7, 0xba, 0x6c, 0x6d, 0x3b,
+ 0x64, 0x4d, 0x0d, 0x7b, 0x49, 0xb9, 0x10, 0x38,
+ 0x0c, 0x0f, 0x4e, 0xc9, 0xe2, 0x3c, 0xb7, 0x12,
+ 0x88, 0x2c, 0xf4, 0x3a, 0x89, 0x6d, 0x12, 0xc7,
+ 0x04, 0x53, 0xfe, 0x77, 0xc7, 0xfb, 0x77, 0x38
+};
+static const u8 enc_output058[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xee, 0x65, 0x78, 0x30, 0x01, 0xc2, 0x56, 0x91,
+ 0xfa, 0x28, 0xd0, 0xf5, 0xf1, 0xc1, 0xd7, 0x62
+};
+static const u8 enc_assoc058[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+};
+static const u8 enc_nonce058[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x01, 0xee, 0x32, 0x00
+};
+static const u8 enc_key058[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - misc */
+static const u8 enc_input059[] __initconst = {
+ 0x25, 0x6d, 0x40, 0x08, 0x80, 0x94, 0x17, 0x03,
+ 0x55, 0xd3, 0x04, 0x04, 0x64, 0x43, 0xfe, 0x68,
+ 0xdf, 0x99, 0x47, 0x83, 0x03, 0xfb, 0x3b, 0xfb,
+ 0x80, 0xe0, 0x30, 0x3e, 0xeb, 0xd3, 0x29, 0x3e
+};
+static const u8 enc_output059[] __initconst = {
+ 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80,
+ 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80,
+ 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80,
+ 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80,
+ 0x79, 0xba, 0x7a, 0x29, 0xf5, 0xa7, 0xbb, 0x75,
+ 0x79, 0x7a, 0xf8, 0x7a, 0x61, 0x01, 0x29, 0xa4
+};
+static const u8 enc_assoc059[] __initconst = {
+ 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80,
+ 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80
+};
+static const u8 enc_nonce059[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x01, 0xee, 0x32, 0x00
+};
+static const u8 enc_key059[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - misc */
+static const u8 enc_input060[] __initconst = {
+ 0x25, 0x6d, 0x40, 0x08, 0x80, 0x94, 0x17, 0x03,
+ 0x55, 0xd3, 0x04, 0x04, 0x64, 0x43, 0xfe, 0x68,
+ 0xdf, 0x99, 0x47, 0x83, 0x03, 0xfb, 0x3b, 0xfb,
+ 0x80, 0xe0, 0x30, 0x3e, 0xeb, 0xd3, 0x29, 0x3e,
+ 0xe3, 0xbc, 0xdb, 0xdb, 0x1e, 0xde, 0xfc, 0x7e,
+ 0x8b, 0xcd, 0xa1, 0x36, 0xa1, 0x5c, 0x8c, 0xab,
+ 0x08, 0x69, 0xff, 0x52, 0xec, 0x5e, 0x26, 0x65,
+ 0x53, 0xb7, 0xb2, 0xa7, 0xfe, 0x87, 0xfd, 0x3d
+};
+static const u8 enc_output060[] __initconst = {
+ 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80,
+ 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80,
+ 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80,
+ 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80,
+ 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80,
+ 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80,
+ 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80,
+ 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80,
+ 0x36, 0xb1, 0x74, 0x38, 0x19, 0xe1, 0xb9, 0xba,
+ 0x15, 0x51, 0xe8, 0xed, 0x92, 0x2a, 0x95, 0x9a
+};
+static const u8 enc_assoc060[] __initconst = {
+ 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80,
+ 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80
+};
+static const u8 enc_nonce060[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x01, 0xee, 0x32, 0x00
+};
+static const u8 enc_key060[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - misc */
+static const u8 enc_input061[] __initconst = {
+ 0x25, 0x6d, 0x40, 0x08, 0x80, 0x94, 0x17, 0x03,
+ 0x55, 0xd3, 0x04, 0x04, 0x64, 0x43, 0xfe, 0x68,
+ 0xdf, 0x99, 0x47, 0x83, 0x03, 0xfb, 0x3b, 0xfb,
+ 0x80, 0xe0, 0x30, 0x3e, 0xeb, 0xd3, 0x29, 0x3e,
+ 0xe3, 0xbc, 0xdb, 0xdb, 0x1e, 0xde, 0xfc, 0x7e,
+ 0x8b, 0xcd, 0xa1, 0x36, 0xa1, 0x5c, 0x8c, 0xab,
+ 0x08, 0x69, 0xff, 0x52, 0xec, 0x5e, 0x26, 0x65,
+ 0x53, 0xb7, 0xb2, 0xa7, 0xfe, 0x87, 0xfd, 0x3d,
+ 0x7a, 0xda, 0x44, 0xc2, 0x42, 0x69, 0xbf, 0x7a,
+ 0x55, 0x27, 0xf2, 0xf0, 0xac, 0xf6, 0x85, 0x82,
+ 0xb7, 0x4c, 0x5a, 0x62, 0xe6, 0x0c, 0x05, 0x00,
+ 0x98, 0x1a, 0x49, 0xb8, 0x45, 0x93, 0x92, 0x44,
+ 0x9b, 0xb2, 0xf2, 0x04, 0xb6, 0x46, 0xef, 0x47,
+ 0xf3, 0xf0, 0xb1, 0xb6, 0x1d, 0xc3, 0x48, 0x6d,
+ 0x77, 0xd3, 0x0b, 0x45, 0x76, 0x92, 0xed, 0xb8,
+ 0xfb, 0xac, 0x01, 0x08, 0x38, 0x04, 0x88, 0x47
+};
+static const u8 enc_output061[] __initconst = {
+ 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80,
+ 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80,
+ 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80,
+ 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80,
+ 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80,
+ 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80,
+ 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80,
+ 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80,
+ 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80,
+ 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80,
+ 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80,
+ 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80,
+ 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80,
+ 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80,
+ 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80,
+ 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80,
+ 0xfe, 0xac, 0x49, 0x55, 0x55, 0x4e, 0x80, 0x6f,
+ 0x3a, 0x19, 0x02, 0xe2, 0x44, 0x32, 0xc0, 0x8a
+};
+static const u8 enc_assoc061[] __initconst = {
+ 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80,
+ 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80
+};
+static const u8 enc_nonce061[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x01, 0xee, 0x32, 0x00
+};
+static const u8 enc_key061[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - misc */
+static const u8 enc_input062[] __initconst = {
+ 0xda, 0x92, 0xbf, 0xf7, 0x7f, 0x6b, 0xe8, 0xfc,
+ 0xaa, 0x2c, 0xfb, 0xfb, 0x9b, 0xbc, 0x01, 0x97,
+ 0x20, 0x66, 0xb8, 0x7c, 0xfc, 0x04, 0xc4, 0x04,
+ 0x7f, 0x1f, 0xcf, 0xc1, 0x14, 0x2c, 0xd6, 0xc1
+};
+static const u8 enc_output062[] __initconst = {
+ 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f,
+ 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f,
+ 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f,
+ 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f,
+ 0x20, 0xa3, 0x79, 0x8d, 0xf1, 0x29, 0x2c, 0x59,
+ 0x72, 0xbf, 0x97, 0x41, 0xae, 0xc3, 0x8a, 0x19
+};
+static const u8 enc_assoc062[] __initconst = {
+ 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f,
+ 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f
+};
+static const u8 enc_nonce062[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x01, 0xee, 0x32, 0x00
+};
+static const u8 enc_key062[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - misc */
+static const u8 enc_input063[] __initconst = {
+ 0xda, 0x92, 0xbf, 0xf7, 0x7f, 0x6b, 0xe8, 0xfc,
+ 0xaa, 0x2c, 0xfb, 0xfb, 0x9b, 0xbc, 0x01, 0x97,
+ 0x20, 0x66, 0xb8, 0x7c, 0xfc, 0x04, 0xc4, 0x04,
+ 0x7f, 0x1f, 0xcf, 0xc1, 0x14, 0x2c, 0xd6, 0xc1,
+ 0x1c, 0x43, 0x24, 0x24, 0xe1, 0x21, 0x03, 0x81,
+ 0x74, 0x32, 0x5e, 0xc9, 0x5e, 0xa3, 0x73, 0x54,
+ 0xf7, 0x96, 0x00, 0xad, 0x13, 0xa1, 0xd9, 0x9a,
+ 0xac, 0x48, 0x4d, 0x58, 0x01, 0x78, 0x02, 0xc2
+};
+static const u8 enc_output063[] __initconst = {
+ 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f,
+ 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f,
+ 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f,
+ 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f,
+ 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f,
+ 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f,
+ 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f,
+ 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f,
+ 0xc0, 0x3d, 0x9f, 0x67, 0x35, 0x4a, 0x97, 0xb2,
+ 0xf0, 0x74, 0xf7, 0x55, 0x15, 0x57, 0xe4, 0x9c
+};
+static const u8 enc_assoc063[] __initconst = {
+ 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f,
+ 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f
+};
+static const u8 enc_nonce063[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x01, 0xee, 0x32, 0x00
+};
+static const u8 enc_key063[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - misc */
+static const u8 enc_input064[] __initconst = {
+ 0xda, 0x92, 0xbf, 0xf7, 0x7f, 0x6b, 0xe8, 0xfc,
+ 0xaa, 0x2c, 0xfb, 0xfb, 0x9b, 0xbc, 0x01, 0x97,
+ 0x20, 0x66, 0xb8, 0x7c, 0xfc, 0x04, 0xc4, 0x04,
+ 0x7f, 0x1f, 0xcf, 0xc1, 0x14, 0x2c, 0xd6, 0xc1,
+ 0x1c, 0x43, 0x24, 0x24, 0xe1, 0x21, 0x03, 0x81,
+ 0x74, 0x32, 0x5e, 0xc9, 0x5e, 0xa3, 0x73, 0x54,
+ 0xf7, 0x96, 0x00, 0xad, 0x13, 0xa1, 0xd9, 0x9a,
+ 0xac, 0x48, 0x4d, 0x58, 0x01, 0x78, 0x02, 0xc2,
+ 0x85, 0x25, 0xbb, 0x3d, 0xbd, 0x96, 0x40, 0x85,
+ 0xaa, 0xd8, 0x0d, 0x0f, 0x53, 0x09, 0x7a, 0x7d,
+ 0x48, 0xb3, 0xa5, 0x9d, 0x19, 0xf3, 0xfa, 0xff,
+ 0x67, 0xe5, 0xb6, 0x47, 0xba, 0x6c, 0x6d, 0xbb,
+ 0x64, 0x4d, 0x0d, 0xfb, 0x49, 0xb9, 0x10, 0xb8,
+ 0x0c, 0x0f, 0x4e, 0x49, 0xe2, 0x3c, 0xb7, 0x92,
+ 0x88, 0x2c, 0xf4, 0xba, 0x89, 0x6d, 0x12, 0x47,
+ 0x04, 0x53, 0xfe, 0xf7, 0xc7, 0xfb, 0x77, 0xb8
+};
+static const u8 enc_output064[] __initconst = {
+ 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f,
+ 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f,
+ 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f,
+ 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f,
+ 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f,
+ 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f,
+ 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f,
+ 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f,
+ 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f,
+ 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f,
+ 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f,
+ 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f,
+ 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f,
+ 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f,
+ 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f,
+ 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f,
+ 0xc8, 0x6d, 0xa8, 0xdd, 0x65, 0x22, 0x86, 0xd5,
+ 0x02, 0x13, 0xd3, 0x28, 0xd6, 0x3e, 0x40, 0x06
+};
+static const u8 enc_assoc064[] __initconst = {
+ 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f,
+ 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f
+};
+static const u8 enc_nonce064[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x01, 0xee, 0x32, 0x00
+};
+static const u8 enc_key064[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - misc */
+static const u8 enc_input065[] __initconst = {
+ 0x5a, 0x92, 0xbf, 0x77, 0xff, 0x6b, 0xe8, 0x7c,
+ 0x2a, 0x2c, 0xfb, 0x7b, 0x1b, 0xbc, 0x01, 0x17,
+ 0xa0, 0x66, 0xb8, 0xfc, 0x7c, 0x04, 0xc4, 0x84,
+ 0xff, 0x1f, 0xcf, 0x41, 0x94, 0x2c, 0xd6, 0x41
+};
+static const u8 enc_output065[] __initconst = {
+ 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff,
+ 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff,
+ 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff,
+ 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff,
+ 0xbe, 0xde, 0x90, 0x83, 0xce, 0xb3, 0x6d, 0xdf,
+ 0xe5, 0xfa, 0x81, 0x1f, 0x95, 0x47, 0x1c, 0x67
+};
+static const u8 enc_assoc065[] __initconst = {
+ 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff,
+ 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff
+};
+static const u8 enc_nonce065[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x01, 0xee, 0x32, 0x00
+};
+static const u8 enc_key065[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - misc */
+static const u8 enc_input066[] __initconst = {
+ 0x5a, 0x92, 0xbf, 0x77, 0xff, 0x6b, 0xe8, 0x7c,
+ 0x2a, 0x2c, 0xfb, 0x7b, 0x1b, 0xbc, 0x01, 0x17,
+ 0xa0, 0x66, 0xb8, 0xfc, 0x7c, 0x04, 0xc4, 0x84,
+ 0xff, 0x1f, 0xcf, 0x41, 0x94, 0x2c, 0xd6, 0x41,
+ 0x9c, 0x43, 0x24, 0xa4, 0x61, 0x21, 0x03, 0x01,
+ 0xf4, 0x32, 0x5e, 0x49, 0xde, 0xa3, 0x73, 0xd4,
+ 0x77, 0x96, 0x00, 0x2d, 0x93, 0xa1, 0xd9, 0x1a,
+ 0x2c, 0x48, 0x4d, 0xd8, 0x81, 0x78, 0x02, 0x42
+};
+static const u8 enc_output066[] __initconst = {
+ 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff,
+ 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff,
+ 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff,
+ 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff,
+ 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff,
+ 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff,
+ 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff,
+ 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff,
+ 0x30, 0x08, 0x74, 0xbb, 0x06, 0x92, 0xb6, 0x89,
+ 0xde, 0xad, 0x9a, 0xe1, 0x5b, 0x06, 0x73, 0x90
+};
+static const u8 enc_assoc066[] __initconst = {
+ 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff,
+ 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff
+};
+static const u8 enc_nonce066[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x01, 0xee, 0x32, 0x00
+};
+static const u8 enc_key066[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - misc */
+static const u8 enc_input067[] __initconst = {
+ 0x5a, 0x92, 0xbf, 0x77, 0xff, 0x6b, 0xe8, 0x7c,
+ 0x2a, 0x2c, 0xfb, 0x7b, 0x1b, 0xbc, 0x01, 0x17,
+ 0xa0, 0x66, 0xb8, 0xfc, 0x7c, 0x04, 0xc4, 0x84,
+ 0xff, 0x1f, 0xcf, 0x41, 0x94, 0x2c, 0xd6, 0x41,
+ 0x9c, 0x43, 0x24, 0xa4, 0x61, 0x21, 0x03, 0x01,
+ 0xf4, 0x32, 0x5e, 0x49, 0xde, 0xa3, 0x73, 0xd4,
+ 0x77, 0x96, 0x00, 0x2d, 0x93, 0xa1, 0xd9, 0x1a,
+ 0x2c, 0x48, 0x4d, 0xd8, 0x81, 0x78, 0x02, 0x42,
+ 0x05, 0x25, 0xbb, 0xbd, 0x3d, 0x96, 0x40, 0x05,
+ 0x2a, 0xd8, 0x0d, 0x8f, 0xd3, 0x09, 0x7a, 0xfd,
+ 0xc8, 0xb3, 0xa5, 0x1d, 0x99, 0xf3, 0xfa, 0x7f,
+ 0xe7, 0xe5, 0xb6, 0xc7, 0x3a, 0x6c, 0x6d, 0x3b,
+ 0xe4, 0x4d, 0x0d, 0x7b, 0xc9, 0xb9, 0x10, 0x38,
+ 0x8c, 0x0f, 0x4e, 0xc9, 0x62, 0x3c, 0xb7, 0x12,
+ 0x08, 0x2c, 0xf4, 0x3a, 0x09, 0x6d, 0x12, 0xc7,
+ 0x84, 0x53, 0xfe, 0x77, 0x47, 0xfb, 0x77, 0x38
+};
+static const u8 enc_output067[] __initconst = {
+ 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff,
+ 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff,
+ 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff,
+ 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff,
+ 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff,
+ 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff,
+ 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff,
+ 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff,
+ 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff,
+ 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff,
+ 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff,
+ 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff,
+ 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff,
+ 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff,
+ 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff,
+ 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff,
+ 0x99, 0xca, 0xd8, 0x5f, 0x45, 0xca, 0x40, 0x94,
+ 0x2d, 0x0d, 0x4d, 0x5e, 0x95, 0x0a, 0xde, 0x22
+};
+static const u8 enc_assoc067[] __initconst = {
+ 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff,
+ 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff
+};
+static const u8 enc_nonce067[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x01, 0xee, 0x32, 0x00
+};
+static const u8 enc_key067[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - misc */
+static const u8 enc_input068[] __initconst = {
+ 0x25, 0x6d, 0x40, 0x88, 0x7f, 0x6b, 0xe8, 0x7c,
+ 0x55, 0xd3, 0x04, 0x84, 0x9b, 0xbc, 0x01, 0x17,
+ 0xdf, 0x99, 0x47, 0x03, 0xfc, 0x04, 0xc4, 0x84,
+ 0x80, 0xe0, 0x30, 0xbe, 0x14, 0x2c, 0xd6, 0x41
+};
+static const u8 enc_output068[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
+ 0x8b, 0xbe, 0x14, 0x52, 0x72, 0xe7, 0xc2, 0xd9,
+ 0xa1, 0x89, 0x1a, 0x3a, 0xb0, 0x98, 0x3d, 0x9d
+};
+static const u8 enc_assoc068[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff
+};
+static const u8 enc_nonce068[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x01, 0xee, 0x32, 0x00
+};
+static const u8 enc_key068[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - misc */
+static const u8 enc_input069[] __initconst = {
+ 0x25, 0x6d, 0x40, 0x88, 0x7f, 0x6b, 0xe8, 0x7c,
+ 0x55, 0xd3, 0x04, 0x84, 0x9b, 0xbc, 0x01, 0x17,
+ 0xdf, 0x99, 0x47, 0x03, 0xfc, 0x04, 0xc4, 0x84,
+ 0x80, 0xe0, 0x30, 0xbe, 0x14, 0x2c, 0xd6, 0x41,
+ 0xe3, 0xbc, 0xdb, 0x5b, 0xe1, 0x21, 0x03, 0x01,
+ 0x8b, 0xcd, 0xa1, 0xb6, 0x5e, 0xa3, 0x73, 0xd4,
+ 0x08, 0x69, 0xff, 0xd2, 0x13, 0xa1, 0xd9, 0x1a,
+ 0x53, 0xb7, 0xb2, 0x27, 0x01, 0x78, 0x02, 0x42
+};
+static const u8 enc_output069[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
+ 0x3b, 0x41, 0x86, 0x19, 0x13, 0xa8, 0xf6, 0xde,
+ 0x7f, 0x61, 0xe2, 0x25, 0x63, 0x1b, 0xc3, 0x82
+};
+static const u8 enc_assoc069[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff
+};
+static const u8 enc_nonce069[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x01, 0xee, 0x32, 0x00
+};
+static const u8 enc_key069[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - misc */
+static const u8 enc_input070[] __initconst = {
+ 0x25, 0x6d, 0x40, 0x88, 0x7f, 0x6b, 0xe8, 0x7c,
+ 0x55, 0xd3, 0x04, 0x84, 0x9b, 0xbc, 0x01, 0x17,
+ 0xdf, 0x99, 0x47, 0x03, 0xfc, 0x04, 0xc4, 0x84,
+ 0x80, 0xe0, 0x30, 0xbe, 0x14, 0x2c, 0xd6, 0x41,
+ 0xe3, 0xbc, 0xdb, 0x5b, 0xe1, 0x21, 0x03, 0x01,
+ 0x8b, 0xcd, 0xa1, 0xb6, 0x5e, 0xa3, 0x73, 0xd4,
+ 0x08, 0x69, 0xff, 0xd2, 0x13, 0xa1, 0xd9, 0x1a,
+ 0x53, 0xb7, 0xb2, 0x27, 0x01, 0x78, 0x02, 0x42,
+ 0x7a, 0xda, 0x44, 0x42, 0xbd, 0x96, 0x40, 0x05,
+ 0x55, 0x27, 0xf2, 0x70, 0x53, 0x09, 0x7a, 0xfd,
+ 0xb7, 0x4c, 0x5a, 0xe2, 0x19, 0xf3, 0xfa, 0x7f,
+ 0x98, 0x1a, 0x49, 0x38, 0xba, 0x6c, 0x6d, 0x3b,
+ 0x9b, 0xb2, 0xf2, 0x84, 0x49, 0xb9, 0x10, 0x38,
+ 0xf3, 0xf0, 0xb1, 0x36, 0xe2, 0x3c, 0xb7, 0x12,
+ 0x77, 0xd3, 0x0b, 0xc5, 0x89, 0x6d, 0x12, 0xc7,
+ 0xfb, 0xac, 0x01, 0x88, 0xc7, 0xfb, 0x77, 0x38
+};
+static const u8 enc_output070[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
+ 0x84, 0x28, 0xbc, 0xf0, 0x23, 0xec, 0x6b, 0xf3,
+ 0x1f, 0xd9, 0xef, 0xb2, 0x03, 0xff, 0x08, 0x71
+};
+static const u8 enc_assoc070[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff
+};
+static const u8 enc_nonce070[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x01, 0xee, 0x32, 0x00
+};
+static const u8 enc_key070[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - misc */
+static const u8 enc_input071[] __initconst = {
+ 0xda, 0x92, 0xbf, 0x77, 0x80, 0x94, 0x17, 0x83,
+ 0xaa, 0x2c, 0xfb, 0x7b, 0x64, 0x43, 0xfe, 0xe8,
+ 0x20, 0x66, 0xb8, 0xfc, 0x03, 0xfb, 0x3b, 0x7b,
+ 0x7f, 0x1f, 0xcf, 0x41, 0xeb, 0xd3, 0x29, 0xbe
+};
+static const u8 enc_output071[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
+ 0x13, 0x9f, 0xdf, 0x64, 0x74, 0xea, 0x24, 0xf5,
+ 0x49, 0xb0, 0x75, 0x82, 0x5f, 0x2c, 0x76, 0x20
+};
+static const u8 enc_assoc071[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00
+};
+static const u8 enc_nonce071[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x01, 0xee, 0x32, 0x00
+};
+static const u8 enc_key071[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - misc */
+static const u8 enc_input072[] __initconst = {
+ 0xda, 0x92, 0xbf, 0x77, 0x80, 0x94, 0x17, 0x83,
+ 0xaa, 0x2c, 0xfb, 0x7b, 0x64, 0x43, 0xfe, 0xe8,
+ 0x20, 0x66, 0xb8, 0xfc, 0x03, 0xfb, 0x3b, 0x7b,
+ 0x7f, 0x1f, 0xcf, 0x41, 0xeb, 0xd3, 0x29, 0xbe,
+ 0x1c, 0x43, 0x24, 0xa4, 0x1e, 0xde, 0xfc, 0xfe,
+ 0x74, 0x32, 0x5e, 0x49, 0xa1, 0x5c, 0x8c, 0x2b,
+ 0xf7, 0x96, 0x00, 0x2d, 0xec, 0x5e, 0x26, 0xe5,
+ 0xac, 0x48, 0x4d, 0xd8, 0xfe, 0x87, 0xfd, 0xbd
+};
+static const u8 enc_output072[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
+ 0xbb, 0xad, 0x8d, 0x86, 0x3b, 0x83, 0x5a, 0x8e,
+ 0x86, 0x64, 0xfd, 0x1d, 0x45, 0x66, 0xb6, 0xb4
+};
+static const u8 enc_assoc072[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00
+};
+static const u8 enc_nonce072[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x01, 0xee, 0x32, 0x00
+};
+static const u8 enc_key072[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - misc */
+static const u8 enc_input073[] __initconst = {
+ 0xda, 0x92, 0xbf, 0x77, 0x80, 0x94, 0x17, 0x83,
+ 0xaa, 0x2c, 0xfb, 0x7b, 0x64, 0x43, 0xfe, 0xe8,
+ 0x20, 0x66, 0xb8, 0xfc, 0x03, 0xfb, 0x3b, 0x7b,
+ 0x7f, 0x1f, 0xcf, 0x41, 0xeb, 0xd3, 0x29, 0xbe,
+ 0x1c, 0x43, 0x24, 0xa4, 0x1e, 0xde, 0xfc, 0xfe,
+ 0x74, 0x32, 0x5e, 0x49, 0xa1, 0x5c, 0x8c, 0x2b,
+ 0xf7, 0x96, 0x00, 0x2d, 0xec, 0x5e, 0x26, 0xe5,
+ 0xac, 0x48, 0x4d, 0xd8, 0xfe, 0x87, 0xfd, 0xbd,
+ 0x85, 0x25, 0xbb, 0xbd, 0x42, 0x69, 0xbf, 0xfa,
+ 0xaa, 0xd8, 0x0d, 0x8f, 0xac, 0xf6, 0x85, 0x02,
+ 0x48, 0xb3, 0xa5, 0x1d, 0xe6, 0x0c, 0x05, 0x80,
+ 0x67, 0xe5, 0xb6, 0xc7, 0x45, 0x93, 0x92, 0xc4,
+ 0x64, 0x4d, 0x0d, 0x7b, 0xb6, 0x46, 0xef, 0xc7,
+ 0x0c, 0x0f, 0x4e, 0xc9, 0x1d, 0xc3, 0x48, 0xed,
+ 0x88, 0x2c, 0xf4, 0x3a, 0x76, 0x92, 0xed, 0x38,
+ 0x04, 0x53, 0xfe, 0x77, 0x38, 0x04, 0x88, 0xc7
+};
+static const u8 enc_output073[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
+ 0x42, 0xf2, 0x35, 0x42, 0x97, 0x84, 0x9a, 0x51,
+ 0x1d, 0x53, 0xe5, 0x57, 0x17, 0x72, 0xf7, 0x1f
+};
+static const u8 enc_assoc073[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00
+};
+static const u8 enc_nonce073[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x01, 0xee, 0x32, 0x00
+};
+static const u8 enc_key073[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - checking for int overflows */
+static const u8 enc_input074[] __initconst = {
+ 0xd4, 0x50, 0x0b, 0xf0, 0x09, 0x49, 0x35, 0x51,
+ 0xc3, 0x80, 0xad, 0xf5, 0x2c, 0x57, 0x3a, 0x69,
+ 0xdf, 0x7e, 0x8b, 0x76, 0x24, 0x63, 0x33, 0x0f,
+ 0xac, 0xc1, 0x6a, 0x57, 0x26, 0xbe, 0x71, 0x90,
+ 0xc6, 0x3c, 0x5a, 0x1c, 0x92, 0x65, 0x84, 0xa0,
+ 0x96, 0x75, 0x68, 0x28, 0xdc, 0xdc, 0x64, 0xac,
+ 0xdf, 0x96, 0x3d, 0x93, 0x1b, 0xf1, 0xda, 0xe2,
+ 0x38, 0xf3, 0xf1, 0x57, 0x22, 0x4a, 0xc4, 0xb5,
+ 0x42, 0xd7, 0x85, 0xb0, 0xdd, 0x84, 0xdb, 0x6b,
+ 0xe3, 0xbc, 0x5a, 0x36, 0x63, 0xe8, 0x41, 0x49,
+ 0xff, 0xbe, 0xd0, 0x9e, 0x54, 0xf7, 0x8f, 0x16,
+ 0xa8, 0x22, 0x3b, 0x24, 0xcb, 0x01, 0x9f, 0x58,
+ 0xb2, 0x1b, 0x0e, 0x55, 0x1e, 0x7a, 0xa0, 0x73,
+ 0x27, 0x62, 0x95, 0x51, 0x37, 0x6c, 0xcb, 0xc3,
+ 0x93, 0x76, 0x71, 0xa0, 0x62, 0x9b, 0xd9, 0x5c,
+ 0x99, 0x15, 0xc7, 0x85, 0x55, 0x77, 0x1e, 0x7a
+};
+static const u8 enc_output074[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x0b, 0x30, 0x0d, 0x8d, 0xa5, 0x6c, 0x21, 0x85,
+ 0x75, 0x52, 0x79, 0x55, 0x3c, 0x4c, 0x82, 0xca
+};
+static const u8 enc_assoc074[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+};
+static const u8 enc_nonce074[] __initconst = {
+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
+ 0x00, 0x02, 0x50, 0x6e
+};
+static const u8 enc_key074[] __initconst = {
+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30
+};
+
+/* wycheproof - checking for int overflows */
+static const u8 enc_input075[] __initconst = {
+ 0x7d, 0xe8, 0x7f, 0x67, 0x29, 0x94, 0x52, 0x75,
+ 0xd0, 0x65, 0x5d, 0xa4, 0xc7, 0xfd, 0xe4, 0x56,
+ 0x9e, 0x16, 0xf1, 0x11, 0xb5, 0xeb, 0x26, 0xc2,
+ 0x2d, 0x85, 0x9e, 0x3f, 0xf8, 0x22, 0xec, 0xed,
+ 0x3a, 0x6d, 0xd9, 0xa6, 0x0f, 0x22, 0x95, 0x7f,
+ 0x7b, 0x7c, 0x85, 0x7e, 0x88, 0x22, 0xeb, 0x9f,
+ 0xe0, 0xb8, 0xd7, 0x02, 0x21, 0x41, 0xf2, 0xd0,
+ 0xb4, 0x8f, 0x4b, 0x56, 0x12, 0xd3, 0x22, 0xa8,
+ 0x8d, 0xd0, 0xfe, 0x0b, 0x4d, 0x91, 0x79, 0x32,
+ 0x4f, 0x7c, 0x6c, 0x9e, 0x99, 0x0e, 0xfb, 0xd8,
+ 0x0e, 0x5e, 0xd6, 0x77, 0x58, 0x26, 0x49, 0x8b,
+ 0x1e, 0xfe, 0x0f, 0x71, 0xa0, 0xf3, 0xec, 0x5b,
+ 0x29, 0xcb, 0x28, 0xc2, 0x54, 0x0a, 0x7d, 0xcd,
+ 0x51, 0xb7, 0xda, 0xae, 0xe0, 0xff, 0x4a, 0x7f,
+ 0x3a, 0xc1, 0xee, 0x54, 0xc2, 0x9e, 0xe4, 0xc1,
+ 0x70, 0xde, 0x40, 0x8f, 0x66, 0x69, 0x21, 0x94
+};
+static const u8 enc_output075[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xc5, 0x78, 0xe2, 0xaa, 0x44, 0xd3, 0x09, 0xb7,
+ 0xb6, 0xa5, 0x19, 0x3b, 0xdc, 0x61, 0x18, 0xf5
+};
+static const u8 enc_assoc075[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+};
+static const u8 enc_nonce075[] __initconst = {
+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
+ 0x00, 0x03, 0x18, 0xa5
+};
+static const u8 enc_key075[] __initconst = {
+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30
+};
+
+/* wycheproof - checking for int overflows */
+static const u8 enc_input076[] __initconst = {
+ 0x1b, 0x99, 0x6f, 0x9a, 0x3c, 0xcc, 0x67, 0x85,
+ 0xde, 0x22, 0xff, 0x5b, 0x8a, 0xdd, 0x95, 0x02,
+ 0xce, 0x03, 0xa0, 0xfa, 0xf5, 0x99, 0x2a, 0x09,
+ 0x52, 0x2c, 0xdd, 0x12, 0x06, 0xd2, 0x20, 0xb8,
+ 0xf8, 0xbd, 0x07, 0xd1, 0xf1, 0xf5, 0xa1, 0xbd,
+ 0x9a, 0x71, 0xd1, 0x1c, 0x7f, 0x57, 0x9b, 0x85,
+ 0x58, 0x18, 0xc0, 0x8d, 0x4d, 0xe0, 0x36, 0x39,
+ 0x31, 0x83, 0xb7, 0xf5, 0x90, 0xb3, 0x35, 0xae,
+ 0xd8, 0xde, 0x5b, 0x57, 0xb1, 0x3c, 0x5f, 0xed,
+ 0xe2, 0x44, 0x1c, 0x3e, 0x18, 0x4a, 0xa9, 0xd4,
+ 0x6e, 0x61, 0x59, 0x85, 0x06, 0xb3, 0xe1, 0x1c,
+ 0x43, 0xc6, 0x2c, 0xbc, 0xac, 0xec, 0xed, 0x33,
+ 0x19, 0x08, 0x75, 0xb0, 0x12, 0x21, 0x8b, 0x19,
+ 0x30, 0xfb, 0x7c, 0x38, 0xec, 0x45, 0xac, 0x11,
+ 0xc3, 0x53, 0xd0, 0xcf, 0x93, 0x8d, 0xcc, 0xb9,
+ 0xef, 0xad, 0x8f, 0xed, 0xbe, 0x46, 0xda, 0xa5
+};
+static const u8 enc_output076[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x4b, 0x0b, 0xda, 0x8a, 0xd0, 0x43, 0x83, 0x0d,
+ 0x83, 0x19, 0xab, 0x82, 0xc5, 0x0c, 0x76, 0x63
+};
+static const u8 enc_assoc076[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+};
+static const u8 enc_nonce076[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, 0xb4, 0xf0
+};
+static const u8 enc_key076[] __initconst = {
+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30
+};
+
+/* wycheproof - checking for int overflows */
+static const u8 enc_input077[] __initconst = {
+ 0x86, 0xcb, 0xac, 0xae, 0x4d, 0x3f, 0x74, 0xae,
+ 0x01, 0x21, 0x3e, 0x05, 0x51, 0xcc, 0x15, 0x16,
+ 0x0e, 0xa1, 0xbe, 0x84, 0x08, 0xe3, 0xd5, 0xd7,
+ 0x4f, 0x01, 0x46, 0x49, 0x95, 0xa6, 0x9e, 0x61,
+ 0x76, 0xcb, 0x9e, 0x02, 0xb2, 0x24, 0x7e, 0xd2,
+ 0x99, 0x89, 0x2f, 0x91, 0x82, 0xa4, 0x5c, 0xaf,
+ 0x4c, 0x69, 0x40, 0x56, 0x11, 0x76, 0x6e, 0xdf,
+ 0xaf, 0xdc, 0x28, 0x55, 0x19, 0xea, 0x30, 0x48,
+ 0x0c, 0x44, 0xf0, 0x5e, 0x78, 0x1e, 0xac, 0xf8,
+ 0xfc, 0xec, 0xc7, 0x09, 0x0a, 0xbb, 0x28, 0xfa,
+ 0x5f, 0xd5, 0x85, 0xac, 0x8c, 0xda, 0x7e, 0x87,
+ 0x72, 0xe5, 0x94, 0xe4, 0xce, 0x6c, 0x88, 0x32,
+ 0x81, 0x93, 0x2e, 0x0f, 0x89, 0xf8, 0x77, 0xa1,
+ 0xf0, 0x4d, 0x9c, 0x32, 0xb0, 0x6c, 0xf9, 0x0b,
+ 0x0e, 0x76, 0x2b, 0x43, 0x0c, 0x4d, 0x51, 0x7c,
+ 0x97, 0x10, 0x70, 0x68, 0xf4, 0x98, 0xef, 0x7f
+};
+static const u8 enc_output077[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x4b, 0xc9, 0x8f, 0x72, 0xc4, 0x94, 0xc2, 0xa4,
+ 0x3c, 0x2b, 0x15, 0xa1, 0x04, 0x3f, 0x1c, 0xfa
+};
+static const u8 enc_assoc077[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+};
+static const u8 enc_nonce077[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0xfb, 0x66
+};
+static const u8 enc_key077[] __initconst = {
+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30
+};
+
+/* wycheproof - checking for int overflows */
+static const u8 enc_input078[] __initconst = {
+ 0xfa, 0xb1, 0xcd, 0xdf, 0x4f, 0xe1, 0x98, 0xef,
+ 0x63, 0xad, 0xd8, 0x81, 0xd6, 0xea, 0xd6, 0xc5,
+ 0x76, 0x37, 0xbb, 0xe9, 0x20, 0x18, 0xca, 0x7c,
+ 0x0b, 0x96, 0xfb, 0xa0, 0x87, 0x1e, 0x93, 0x2d,
+ 0xb1, 0xfb, 0xf9, 0x07, 0x61, 0xbe, 0x25, 0xdf,
+ 0x8d, 0xfa, 0xf9, 0x31, 0xce, 0x57, 0x57, 0xe6,
+ 0x17, 0xb3, 0xd7, 0xa9, 0xf0, 0xbf, 0x0f, 0xfe,
+ 0x5d, 0x59, 0x1a, 0x33, 0xc1, 0x43, 0xb8, 0xf5,
+ 0x3f, 0xd0, 0xb5, 0xa1, 0x96, 0x09, 0xfd, 0x62,
+ 0xe5, 0xc2, 0x51, 0xa4, 0x28, 0x1a, 0x20, 0x0c,
+ 0xfd, 0xc3, 0x4f, 0x28, 0x17, 0x10, 0x40, 0x6f,
+ 0x4e, 0x37, 0x62, 0x54, 0x46, 0xff, 0x6e, 0xf2,
+ 0x24, 0x91, 0x3d, 0xeb, 0x0d, 0x89, 0xaf, 0x33,
+ 0x71, 0x28, 0xe3, 0xd1, 0x55, 0xd1, 0x6d, 0x3e,
+ 0xc3, 0x24, 0x60, 0x41, 0x43, 0x21, 0x43, 0xe9,
+ 0xab, 0x3a, 0x6d, 0x2c, 0xcc, 0x2f, 0x4d, 0x62
+};
+static const u8 enc_output078[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xf7, 0xe9, 0xe1, 0x51, 0xb0, 0x25, 0x33, 0xc7,
+ 0x46, 0x58, 0xbf, 0xc7, 0x73, 0x7c, 0x68, 0x0d
+};
+static const u8 enc_assoc078[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+};
+static const u8 enc_nonce078[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0xbb, 0x90
+};
+static const u8 enc_key078[] __initconst = {
+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30
+};
+
+/* wycheproof - checking for int overflows */
+static const u8 enc_input079[] __initconst = {
+ 0x22, 0x72, 0x02, 0xbe, 0x7f, 0x35, 0x15, 0xe9,
+ 0xd1, 0xc0, 0x2e, 0xea, 0x2f, 0x19, 0x50, 0xb6,
+ 0x48, 0x1b, 0x04, 0x8a, 0x4c, 0x91, 0x50, 0x6c,
+ 0xb4, 0x0d, 0x50, 0x4e, 0x6c, 0x94, 0x9f, 0x82,
+ 0xd1, 0x97, 0xc2, 0x5a, 0xd1, 0x7d, 0xc7, 0x21,
+ 0x65, 0x11, 0x25, 0x78, 0x2a, 0xc7, 0xa7, 0x12,
+ 0x47, 0xfe, 0xae, 0xf3, 0x2f, 0x1f, 0x25, 0x0c,
+ 0xe4, 0xbb, 0x8f, 0x79, 0xac, 0xaa, 0x17, 0x9d,
+ 0x45, 0xa7, 0xb0, 0x54, 0x5f, 0x09, 0x24, 0x32,
+ 0x5e, 0xfa, 0x87, 0xd5, 0xe4, 0x41, 0xd2, 0x84,
+ 0x78, 0xc6, 0x1f, 0x22, 0x23, 0xee, 0x67, 0xc3,
+ 0xb4, 0x1f, 0x43, 0x94, 0x53, 0x5e, 0x2a, 0x24,
+ 0x36, 0x9a, 0x2e, 0x16, 0x61, 0x3c, 0x45, 0x94,
+ 0x90, 0xc1, 0x4f, 0xb1, 0xd7, 0x55, 0xfe, 0x53,
+ 0xfb, 0xe1, 0xee, 0x45, 0xb1, 0xb2, 0x1f, 0x71,
+ 0x62, 0xe2, 0xfc, 0xaa, 0x74, 0x2a, 0xbe, 0xfd
+};
+static const u8 enc_output079[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x79, 0x5b, 0xcf, 0xf6, 0x47, 0xc5, 0x53, 0xc2,
+ 0xe4, 0xeb, 0x6e, 0x0e, 0xaf, 0xd9, 0xe0, 0x4e
+};
+static const u8 enc_assoc079[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+};
+static const u8 enc_nonce079[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0x48, 0x4a
+};
+static const u8 enc_key079[] __initconst = {
+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30
+};
+
+/* wycheproof - checking for int overflows */
+static const u8 enc_input080[] __initconst = {
+ 0xfa, 0xe5, 0x83, 0x45, 0xc1, 0x6c, 0xb0, 0xf5,
+ 0xcc, 0x53, 0x7f, 0x2b, 0x1b, 0x34, 0x69, 0xc9,
+ 0x69, 0x46, 0x3b, 0x3e, 0xa7, 0x1b, 0xcf, 0x6b,
+ 0x98, 0xd6, 0x69, 0xa8, 0xe6, 0x0e, 0x04, 0xfc,
+ 0x08, 0xd5, 0xfd, 0x06, 0x9c, 0x36, 0x26, 0x38,
+ 0xe3, 0x40, 0x0e, 0xf4, 0xcb, 0x24, 0x2e, 0x27,
+ 0xe2, 0x24, 0x5e, 0x68, 0xcb, 0x9e, 0xc5, 0x83,
+ 0xda, 0x53, 0x40, 0xb1, 0x2e, 0xdf, 0x42, 0x3b,
+ 0x73, 0x26, 0xad, 0x20, 0xfe, 0xeb, 0x57, 0xda,
+ 0xca, 0x2e, 0x04, 0x67, 0xa3, 0x28, 0x99, 0xb4,
+ 0x2d, 0xf8, 0xe5, 0x6d, 0x84, 0xe0, 0x06, 0xbc,
+ 0x8a, 0x7a, 0xcc, 0x73, 0x1e, 0x7c, 0x1f, 0x6b,
+ 0xec, 0xb5, 0x71, 0x9f, 0x70, 0x77, 0xf0, 0xd4,
+ 0xf4, 0xc6, 0x1a, 0xb1, 0x1e, 0xba, 0xc1, 0x00,
+ 0x18, 0x01, 0xce, 0x33, 0xc4, 0xe4, 0xa7, 0x7d,
+ 0x83, 0x1d, 0x3c, 0xe3, 0x4e, 0x84, 0x10, 0xe1
+};
+static const u8 enc_output080[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x19, 0x46, 0xd6, 0x53, 0x96, 0x0f, 0x94, 0x7a,
+ 0x74, 0xd3, 0xe8, 0x09, 0x3c, 0xf4, 0x85, 0x02
+};
+static const u8 enc_assoc080[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+};
+static const u8 enc_nonce080[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x93, 0x2f, 0x40
+};
+static const u8 enc_key080[] __initconst = {
+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30
+};
+
+/* wycheproof - checking for int overflows */
+static const u8 enc_input081[] __initconst = {
+ 0xeb, 0xb2, 0x16, 0xdd, 0xd7, 0xca, 0x70, 0x92,
+ 0x15, 0xf5, 0x03, 0xdf, 0x9c, 0xe6, 0x3c, 0x5c,
+ 0xd2, 0x19, 0x4e, 0x7d, 0x90, 0x99, 0xe8, 0xa9,
+ 0x0b, 0x2a, 0xfa, 0xad, 0x5e, 0xba, 0x35, 0x06,
+ 0x99, 0x25, 0xa6, 0x03, 0xfd, 0xbc, 0x34, 0x1a,
+ 0xae, 0xd4, 0x15, 0x05, 0xb1, 0x09, 0x41, 0xfa,
+ 0x38, 0x56, 0xa7, 0xe2, 0x47, 0xb1, 0x04, 0x07,
+ 0x09, 0x74, 0x6c, 0xfc, 0x20, 0x96, 0xca, 0xa6,
+ 0x31, 0xb2, 0xff, 0xf4, 0x1c, 0x25, 0x05, 0x06,
+ 0xd8, 0x89, 0xc1, 0xc9, 0x06, 0x71, 0xad, 0xe8,
+ 0x53, 0xee, 0x63, 0x94, 0xc1, 0x91, 0x92, 0xa5,
+ 0xcf, 0x37, 0x10, 0xd1, 0x07, 0x30, 0x99, 0xe5,
+ 0xbc, 0x94, 0x65, 0x82, 0xfc, 0x0f, 0xab, 0x9f,
+ 0x54, 0x3c, 0x71, 0x6a, 0xe2, 0x48, 0x6a, 0x86,
+ 0x83, 0xfd, 0xca, 0x39, 0xd2, 0xe1, 0x4f, 0x23,
+ 0xd0, 0x0a, 0x58, 0x26, 0x64, 0xf4, 0xec, 0xb1
+};
+static const u8 enc_output081[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x36, 0xc3, 0x00, 0x29, 0x85, 0xdd, 0x21, 0xba,
+ 0xf8, 0x95, 0xd6, 0x33, 0x57, 0x3f, 0x12, 0xc0
+};
+static const u8 enc_assoc081[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+};
+static const u8 enc_nonce081[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xe2, 0x93, 0x35
+};
+static const u8 enc_key081[] __initconst = {
+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30
+};
+
+/* wycheproof - checking for int overflows */
+static const u8 enc_input082[] __initconst = {
+ 0x40, 0x8a, 0xe6, 0xef, 0x1c, 0x7e, 0xf0, 0xfb,
+ 0x2c, 0x2d, 0x61, 0x08, 0x16, 0xfc, 0x78, 0x49,
+ 0xef, 0xa5, 0x8f, 0x78, 0x27, 0x3f, 0x5f, 0x16,
+ 0x6e, 0xa6, 0x5f, 0x81, 0xb5, 0x75, 0x74, 0x7d,
+ 0x03, 0x5b, 0x30, 0x40, 0xfe, 0xde, 0x1e, 0xb9,
+ 0x45, 0x97, 0x88, 0x66, 0x97, 0x88, 0x40, 0x8e,
+ 0x00, 0x41, 0x3b, 0x3e, 0x37, 0x6d, 0x15, 0x2d,
+ 0x20, 0x4a, 0xa2, 0xb7, 0xa8, 0x35, 0x58, 0xfc,
+ 0xd4, 0x8a, 0x0e, 0xf7, 0xa2, 0x6b, 0x1c, 0xd6,
+ 0xd3, 0x5d, 0x23, 0xb3, 0xf5, 0xdf, 0xe0, 0xca,
+ 0x77, 0xa4, 0xce, 0x32, 0xb9, 0x4a, 0xbf, 0x83,
+ 0xda, 0x2a, 0xef, 0xca, 0xf0, 0x68, 0x38, 0x08,
+ 0x79, 0xe8, 0x9f, 0xb0, 0xa3, 0x82, 0x95, 0x95,
+ 0xcf, 0x44, 0xc3, 0x85, 0x2a, 0xe2, 0xcc, 0x66,
+ 0x2b, 0x68, 0x9f, 0x93, 0x55, 0xd9, 0xc1, 0x83,
+ 0x80, 0x1f, 0x6a, 0xcc, 0x31, 0x3f, 0x89, 0x07
+};
+static const u8 enc_output082[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x65, 0x14, 0x51, 0x8e, 0x0a, 0x26, 0x41, 0x42,
+ 0xe0, 0xb7, 0x35, 0x1f, 0x96, 0x7f, 0xc2, 0xae
+};
+static const u8 enc_assoc082[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+};
+static const u8 enc_nonce082[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x0e, 0xf7, 0xd5
+};
+static const u8 enc_key082[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - checking for int overflows */
+static const u8 enc_input083[] __initconst = {
+ 0x0a, 0x0a, 0x24, 0x49, 0x9b, 0xca, 0xde, 0x58,
+ 0xcf, 0x15, 0x76, 0xc3, 0x12, 0xac, 0xa9, 0x84,
+ 0x71, 0x8c, 0xb4, 0xcc, 0x7e, 0x01, 0x53, 0xf5,
+ 0xa9, 0x01, 0x58, 0x10, 0x85, 0x96, 0x44, 0xdf,
+ 0xc0, 0x21, 0x17, 0x4e, 0x0b, 0x06, 0x0a, 0x39,
+ 0x74, 0x48, 0xde, 0x8b, 0x48, 0x4a, 0x86, 0x03,
+ 0xbe, 0x68, 0x0a, 0x69, 0x34, 0xc0, 0x90, 0x6f,
+ 0x30, 0xdd, 0x17, 0xea, 0xe2, 0xd4, 0xc5, 0xfa,
+ 0xa7, 0x77, 0xf8, 0xca, 0x53, 0x37, 0x0e, 0x08,
+ 0x33, 0x1b, 0x88, 0xc3, 0x42, 0xba, 0xc9, 0x59,
+ 0x78, 0x7b, 0xbb, 0x33, 0x93, 0x0e, 0x3b, 0x56,
+ 0xbe, 0x86, 0xda, 0x7f, 0x2a, 0x6e, 0xb1, 0xf9,
+ 0x40, 0x89, 0xd1, 0xd1, 0x81, 0x07, 0x4d, 0x43,
+ 0x02, 0xf8, 0xe0, 0x55, 0x2d, 0x0d, 0xe1, 0xfa,
+ 0xb3, 0x06, 0xa2, 0x1b, 0x42, 0xd4, 0xc3, 0xba,
+ 0x6e, 0x6f, 0x0c, 0xbc, 0xc8, 0x1e, 0x87, 0x7a
+};
+static const u8 enc_output083[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x4c, 0x19, 0x4d, 0xa6, 0xa9, 0x9f, 0xd6, 0x5b,
+ 0x40, 0xe9, 0xca, 0xd7, 0x98, 0xf4, 0x4b, 0x19
+};
+static const u8 enc_assoc083[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+};
+static const u8 enc_nonce083[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x3d, 0xfc, 0xe4
+};
+static const u8 enc_key083[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - checking for int overflows */
+static const u8 enc_input084[] __initconst = {
+ 0x4a, 0x0a, 0xaf, 0xf8, 0x49, 0x47, 0x29, 0x18,
+ 0x86, 0x91, 0x70, 0x13, 0x40, 0xf3, 0xce, 0x2b,
+ 0x8a, 0x78, 0xee, 0xd3, 0xa0, 0xf0, 0x65, 0x99,
+ 0x4b, 0x72, 0x48, 0x4e, 0x79, 0x91, 0xd2, 0x5c,
+ 0x29, 0xaa, 0x07, 0x5e, 0xb1, 0xfc, 0x16, 0xde,
+ 0x93, 0xfe, 0x06, 0x90, 0x58, 0x11, 0x2a, 0xb2,
+ 0x84, 0xa3, 0xed, 0x18, 0x78, 0x03, 0x26, 0xd1,
+ 0x25, 0x8a, 0x47, 0x22, 0x2f, 0xa6, 0x33, 0xd8,
+ 0xb2, 0x9f, 0x3b, 0xd9, 0x15, 0x0b, 0x23, 0x9b,
+ 0x15, 0x46, 0xc2, 0xbb, 0x9b, 0x9f, 0x41, 0x0f,
+ 0xeb, 0xea, 0xd3, 0x96, 0x00, 0x0e, 0xe4, 0x77,
+ 0x70, 0x15, 0x32, 0xc3, 0xd0, 0xf5, 0xfb, 0xf8,
+ 0x95, 0xd2, 0x80, 0x19, 0x6d, 0x2f, 0x73, 0x7c,
+ 0x5e, 0x9f, 0xec, 0x50, 0xd9, 0x2b, 0xb0, 0xdf,
+ 0x5d, 0x7e, 0x51, 0x3b, 0xe5, 0xb8, 0xea, 0x97,
+ 0x13, 0x10, 0xd5, 0xbf, 0x16, 0xba, 0x7a, 0xee
+};
+static const u8 enc_output084[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xc8, 0xae, 0x77, 0x88, 0xcd, 0x28, 0x74, 0xab,
+ 0xc1, 0x38, 0x54, 0x1e, 0x11, 0xfd, 0x05, 0x87
+};
+static const u8 enc_assoc084[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+};
+static const u8 enc_nonce084[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x01, 0x84, 0x86, 0xa8
+};
+static const u8 enc_key084[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - checking for int overflows */
+static const u8 enc_input085[] __initconst = {
+ 0xff, 0x94, 0x28, 0xd0, 0x79, 0x35, 0x1f, 0x66,
+ 0x5c, 0xd0, 0x01, 0x35, 0x43, 0x19, 0x87, 0x5c,
+ 0x78, 0x3d, 0x35, 0xf6, 0x13, 0xe6, 0xd9, 0x09,
+ 0x3d, 0x38, 0xe9, 0x75, 0xc3, 0x8f, 0xe3, 0xb8,
+ 0x9f, 0x7a, 0xed, 0x35, 0xcb, 0x5a, 0x2f, 0xca,
+ 0xa0, 0x34, 0x6e, 0xfb, 0x93, 0x65, 0x54, 0x64,
+ 0x9c, 0xf6, 0x37, 0x81, 0x71, 0xea, 0xe4, 0x39,
+ 0x6e, 0xa1, 0x5d, 0xc2, 0x40, 0xd1, 0xab, 0xf4,
+ 0x47, 0x2d, 0x90, 0x96, 0x52, 0x4f, 0xa1, 0xb2,
+ 0xb0, 0x23, 0xb8, 0xb2, 0x88, 0x22, 0x27, 0x73,
+ 0xd4, 0xd2, 0x06, 0x61, 0x6f, 0x92, 0x93, 0xf6,
+ 0x5b, 0x45, 0xdb, 0xbc, 0x74, 0xe7, 0xc2, 0xed,
+ 0xfb, 0xcb, 0xbf, 0x1c, 0xfb, 0x67, 0x9b, 0xb7,
+ 0x39, 0xa5, 0x86, 0x2d, 0xe2, 0xbc, 0xb9, 0x37,
+ 0xf7, 0x4d, 0x5b, 0xf8, 0x67, 0x1c, 0x5a, 0x8a,
+ 0x50, 0x92, 0xf6, 0x1d, 0x54, 0xc9, 0xaa, 0x5b
+};
+static const u8 enc_output085[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x93, 0x3a, 0x51, 0x63, 0xc7, 0xf6, 0x23, 0x68,
+ 0x32, 0x7b, 0x3f, 0xbc, 0x10, 0x36, 0xc9, 0x43
+};
+static const u8 enc_assoc085[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+};
+static const u8 enc_nonce085[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52
+};
+static const u8 enc_key085[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - special case tag */
+static const u8 enc_input086[] __initconst = {
+ 0x9a, 0x49, 0xc4, 0x0f, 0x8b, 0x48, 0xd7, 0xc6,
+ 0x6d, 0x1d, 0xb4, 0xe5, 0x3f, 0x20, 0xf2, 0xdd,
+ 0x4a, 0xaa, 0x24, 0x1d, 0xda, 0xb2, 0x6b, 0x5b,
+ 0xc0, 0xe2, 0x18, 0xb7, 0x2c, 0x33, 0x90, 0xf2,
+ 0xdf, 0x3e, 0xbd, 0x01, 0x76, 0x70, 0x44, 0x19,
+ 0x97, 0x2b, 0xcd, 0xbc, 0x6b, 0xbc, 0xb3, 0xe4,
+ 0xe7, 0x4a, 0x71, 0x52, 0x8e, 0xf5, 0x12, 0x63,
+ 0xce, 0x24, 0xe0, 0xd5, 0x75, 0xe0, 0xe4, 0x4d
+};
+static const u8 enc_output086[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f
+};
+static const u8 enc_assoc086[] __initconst = {
+ 0x85, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xa6, 0x90, 0x2f, 0xcb, 0xc8, 0x83, 0xbb, 0xc1,
+ 0x80, 0xb2, 0x56, 0xae, 0x34, 0xad, 0x7f, 0x00
+};
+static const u8 enc_nonce086[] __initconst = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b
+};
+static const u8 enc_key086[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - special case tag */
+static const u8 enc_input087[] __initconst = {
+ 0x9a, 0x49, 0xc4, 0x0f, 0x8b, 0x48, 0xd7, 0xc6,
+ 0x6d, 0x1d, 0xb4, 0xe5, 0x3f, 0x20, 0xf2, 0xdd,
+ 0x4a, 0xaa, 0x24, 0x1d, 0xda, 0xb2, 0x6b, 0x5b,
+ 0xc0, 0xe2, 0x18, 0xb7, 0x2c, 0x33, 0x90, 0xf2,
+ 0xdf, 0x3e, 0xbd, 0x01, 0x76, 0x70, 0x44, 0x19,
+ 0x97, 0x2b, 0xcd, 0xbc, 0x6b, 0xbc, 0xb3, 0xe4,
+ 0xe7, 0x4a, 0x71, 0x52, 0x8e, 0xf5, 0x12, 0x63,
+ 0xce, 0x24, 0xe0, 0xd5, 0x75, 0xe0, 0xe4, 0x4d
+};
+static const u8 enc_output087[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+};
+static const u8 enc_assoc087[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x24, 0x7e, 0x50, 0x64, 0x2a, 0x1c, 0x0a, 0x2f,
+ 0x8f, 0x77, 0x21, 0x96, 0x09, 0xdb, 0xa9, 0x58
+};
+static const u8 enc_nonce087[] __initconst = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b
+};
+static const u8 enc_key087[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - special case tag */
+static const u8 enc_input088[] __initconst = {
+ 0x9a, 0x49, 0xc4, 0x0f, 0x8b, 0x48, 0xd7, 0xc6,
+ 0x6d, 0x1d, 0xb4, 0xe5, 0x3f, 0x20, 0xf2, 0xdd,
+ 0x4a, 0xaa, 0x24, 0x1d, 0xda, 0xb2, 0x6b, 0x5b,
+ 0xc0, 0xe2, 0x18, 0xb7, 0x2c, 0x33, 0x90, 0xf2,
+ 0xdf, 0x3e, 0xbd, 0x01, 0x76, 0x70, 0x44, 0x19,
+ 0x97, 0x2b, 0xcd, 0xbc, 0x6b, 0xbc, 0xb3, 0xe4,
+ 0xe7, 0x4a, 0x71, 0x52, 0x8e, 0xf5, 0x12, 0x63,
+ 0xce, 0x24, 0xe0, 0xd5, 0x75, 0xe0, 0xe4, 0x4d
+};
+static const u8 enc_output088[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+};
+static const u8 enc_assoc088[] __initconst = {
+ 0x7c, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xd9, 0xe7, 0x2c, 0x06, 0x4a, 0xc8, 0x96, 0x1f,
+ 0x3f, 0xa5, 0x85, 0xe0, 0xe2, 0xab, 0xd6, 0x00
+};
+static const u8 enc_nonce088[] __initconst = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b
+};
+static const u8 enc_key088[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - special case tag */
+static const u8 enc_input089[] __initconst = {
+ 0x9a, 0x49, 0xc4, 0x0f, 0x8b, 0x48, 0xd7, 0xc6,
+ 0x6d, 0x1d, 0xb4, 0xe5, 0x3f, 0x20, 0xf2, 0xdd,
+ 0x4a, 0xaa, 0x24, 0x1d, 0xda, 0xb2, 0x6b, 0x5b,
+ 0xc0, 0xe2, 0x18, 0xb7, 0x2c, 0x33, 0x90, 0xf2,
+ 0xdf, 0x3e, 0xbd, 0x01, 0x76, 0x70, 0x44, 0x19,
+ 0x97, 0x2b, 0xcd, 0xbc, 0x6b, 0xbc, 0xb3, 0xe4,
+ 0xe7, 0x4a, 0x71, 0x52, 0x8e, 0xf5, 0x12, 0x63,
+ 0xce, 0x24, 0xe0, 0xd5, 0x75, 0xe0, 0xe4, 0x4d
+};
+static const u8 enc_output089[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80,
+ 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80
+};
+static const u8 enc_assoc089[] __initconst = {
+ 0x65, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x95, 0xaf, 0x0f, 0x4d, 0x0b, 0x68, 0x6e, 0xae,
+ 0xcc, 0xca, 0x43, 0x07, 0xd5, 0x96, 0xf5, 0x02
+};
+static const u8 enc_nonce089[] __initconst = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b
+};
+static const u8 enc_key089[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - special case tag */
+static const u8 enc_input090[] __initconst = {
+ 0x9a, 0x49, 0xc4, 0x0f, 0x8b, 0x48, 0xd7, 0xc6,
+ 0x6d, 0x1d, 0xb4, 0xe5, 0x3f, 0x20, 0xf2, 0xdd,
+ 0x4a, 0xaa, 0x24, 0x1d, 0xda, 0xb2, 0x6b, 0x5b,
+ 0xc0, 0xe2, 0x18, 0xb7, 0x2c, 0x33, 0x90, 0xf2,
+ 0xdf, 0x3e, 0xbd, 0x01, 0x76, 0x70, 0x44, 0x19,
+ 0x97, 0x2b, 0xcd, 0xbc, 0x6b, 0xbc, 0xb3, 0xe4,
+ 0xe7, 0x4a, 0x71, 0x52, 0x8e, 0xf5, 0x12, 0x63,
+ 0xce, 0x24, 0xe0, 0xd5, 0x75, 0xe0, 0xe4, 0x4d
+};
+static const u8 enc_output090[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f,
+ 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f
+};
+static const u8 enc_assoc090[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x85, 0x40, 0xb4, 0x64, 0x35, 0x77, 0x07, 0xbe,
+ 0x3a, 0x39, 0xd5, 0x5c, 0x34, 0xf8, 0xbc, 0xb3
+};
+static const u8 enc_nonce090[] __initconst = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b
+};
+static const u8 enc_key090[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - special case tag */
+static const u8 enc_input091[] __initconst = {
+ 0x9a, 0x49, 0xc4, 0x0f, 0x8b, 0x48, 0xd7, 0xc6,
+ 0x6d, 0x1d, 0xb4, 0xe5, 0x3f, 0x20, 0xf2, 0xdd,
+ 0x4a, 0xaa, 0x24, 0x1d, 0xda, 0xb2, 0x6b, 0x5b,
+ 0xc0, 0xe2, 0x18, 0xb7, 0x2c, 0x33, 0x90, 0xf2,
+ 0xdf, 0x3e, 0xbd, 0x01, 0x76, 0x70, 0x44, 0x19,
+ 0x97, 0x2b, 0xcd, 0xbc, 0x6b, 0xbc, 0xb3, 0xe4,
+ 0xe7, 0x4a, 0x71, 0x52, 0x8e, 0xf5, 0x12, 0x63,
+ 0xce, 0x24, 0xe0, 0xd5, 0x75, 0xe0, 0xe4, 0x4d
+};
+static const u8 enc_output091[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
+ 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00
+};
+static const u8 enc_assoc091[] __initconst = {
+ 0x4f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x66, 0x23, 0xd9, 0x90, 0xb8, 0x98, 0xd8, 0x30,
+ 0xd2, 0x12, 0xaf, 0x23, 0x83, 0x33, 0x07, 0x01
+};
+static const u8 enc_nonce091[] __initconst = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b
+};
+static const u8 enc_key091[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - special case tag */
+static const u8 enc_input092[] __initconst = {
+ 0x9a, 0x49, 0xc4, 0x0f, 0x8b, 0x48, 0xd7, 0xc6,
+ 0x6d, 0x1d, 0xb4, 0xe5, 0x3f, 0x20, 0xf2, 0xdd,
+ 0x4a, 0xaa, 0x24, 0x1d, 0xda, 0xb2, 0x6b, 0x5b,
+ 0xc0, 0xe2, 0x18, 0xb7, 0x2c, 0x33, 0x90, 0xf2,
+ 0xdf, 0x3e, 0xbd, 0x01, 0x76, 0x70, 0x44, 0x19,
+ 0x97, 0x2b, 0xcd, 0xbc, 0x6b, 0xbc, 0xb3, 0xe4,
+ 0xe7, 0x4a, 0x71, 0x52, 0x8e, 0xf5, 0x12, 0x63,
+ 0xce, 0x24, 0xe0, 0xd5, 0x75, 0xe0, 0xe4, 0x4d
+};
+static const u8 enc_output092[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+};
+static const u8 enc_assoc092[] __initconst = {
+ 0x83, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x5f, 0x16, 0xd0, 0x9f, 0x17, 0x78, 0x72, 0x11,
+ 0xb7, 0xd4, 0x84, 0xe0, 0x24, 0xf8, 0x97, 0x01
+};
+static const u8 enc_nonce092[] __initconst = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b
+};
+static const u8 enc_key092[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - edge case intermediate sums in poly1305 */
+static const u8 enc_input093[] __initconst = {
+ 0x00, 0x52, 0x35, 0xd2, 0xa9, 0x19, 0xf2, 0x8d,
+ 0x3d, 0xb7, 0x66, 0x4a, 0x34, 0xae, 0x6b, 0x44,
+ 0x4d, 0x3d, 0x35, 0xf6, 0x13, 0xe6, 0xd9, 0x09,
+ 0x3d, 0x38, 0xe9, 0x75, 0xc3, 0x8f, 0xe3, 0xb8,
+ 0x5b, 0x8b, 0x94, 0x50, 0x9e, 0x2b, 0x74, 0xa3,
+ 0x6d, 0x34, 0x6e, 0x33, 0xd5, 0x72, 0x65, 0x9b,
+ 0xa9, 0xf6, 0x37, 0x81, 0x71, 0xea, 0xe4, 0x39,
+ 0x6e, 0xa1, 0x5d, 0xc2, 0x40, 0xd1, 0xab, 0xf4,
+ 0x83, 0xdc, 0xe9, 0xf3, 0x07, 0x3e, 0xfa, 0xdb,
+ 0x7d, 0x23, 0xb8, 0x7a, 0xce, 0x35, 0x16, 0x8c
+};
+static const u8 enc_output093[] __initconst = {
+ 0x00, 0x39, 0xe2, 0xfd, 0x2f, 0xd3, 0x12, 0x14,
+ 0x9e, 0x98, 0x98, 0x80, 0x88, 0x48, 0x13, 0xe7,
+ 0xca, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x3b, 0x0e, 0x86, 0x9a, 0xaa, 0x8e, 0xa4, 0x96,
+ 0x32, 0xff, 0xff, 0x37, 0xb9, 0xe8, 0xce, 0x00,
+ 0xca, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x3b, 0x0e, 0x86, 0x9a, 0xaa, 0x8e, 0xa4, 0x96,
+ 0x32, 0xff, 0xff, 0x37, 0xb9, 0xe8, 0xce, 0x00,
+ 0xa5, 0x19, 0xac, 0x1a, 0x35, 0xb4, 0xa5, 0x77,
+ 0x87, 0x51, 0x0a, 0xf7, 0x8d, 0x8d, 0x20, 0x0a
+};
+static const u8 enc_assoc093[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff
+};
+static const u8 enc_nonce093[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52
+};
+static const u8 enc_key093[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - edge case intermediate sums in poly1305 */
+static const u8 enc_input094[] __initconst = {
+ 0xd3, 0x94, 0x28, 0xd0, 0x79, 0x35, 0x1f, 0x66,
+ 0x5c, 0xd0, 0x01, 0x35, 0x43, 0x19, 0x87, 0x5c,
+ 0xe5, 0xda, 0x78, 0x76, 0x6f, 0xa1, 0x92, 0x90,
+ 0xc0, 0x31, 0xf7, 0x52, 0x08, 0x50, 0x67, 0x45,
+ 0xae, 0x7a, 0xed, 0x35, 0xcb, 0x5a, 0x2f, 0xca,
+ 0xa0, 0x34, 0x6e, 0xfb, 0x93, 0x65, 0x54, 0x64,
+ 0x49, 0x6d, 0xde, 0xb0, 0x55, 0x09, 0xc6, 0xef,
+ 0xff, 0xab, 0x75, 0xeb, 0x2d, 0xf4, 0xab, 0x09,
+ 0x76, 0x2d, 0x90, 0x96, 0x52, 0x4f, 0xa1, 0xb2,
+ 0xb0, 0x23, 0xb8, 0xb2, 0x88, 0x22, 0x27, 0x73,
+ 0x01, 0x49, 0xef, 0x50, 0x4b, 0x71, 0xb1, 0x20,
+ 0xca, 0x4f, 0xf3, 0x95, 0x19, 0xc2, 0xc2, 0x10
+};
+static const u8 enc_output094[] __initconst = {
+ 0xd3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x62, 0x18, 0xb2, 0x7f, 0x83, 0xb8, 0xb4, 0x66,
+ 0x02, 0xf6, 0xe1, 0xd8, 0x34, 0x20, 0x7b, 0x02,
+ 0xce, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x2a, 0x64, 0x16, 0xce, 0xdb, 0x1c, 0xdd, 0x29,
+ 0x6e, 0xf5, 0xd7, 0xd6, 0x92, 0xda, 0xff, 0x02,
+ 0xce, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x2a, 0x64, 0x16, 0xce, 0xdb, 0x1c, 0xdd, 0x29,
+ 0x6e, 0xf5, 0xd7, 0xd6, 0x92, 0xda, 0xff, 0x02,
+ 0x30, 0x2f, 0xe8, 0x2a, 0xb0, 0xa0, 0x9a, 0xf6,
+ 0x44, 0x00, 0xd0, 0x15, 0xae, 0x83, 0xd9, 0xcc
+};
+static const u8 enc_assoc094[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff
+};
+static const u8 enc_nonce094[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52
+};
+static const u8 enc_key094[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - edge case intermediate sums in poly1305 */
+static const u8 enc_input095[] __initconst = {
+ 0xe9, 0x94, 0x28, 0xd0, 0x79, 0x35, 0x1f, 0x66,
+ 0x5c, 0xd0, 0x01, 0x35, 0x43, 0x19, 0x87, 0x5c,
+ 0x6d, 0xf1, 0x39, 0x4e, 0xdc, 0x53, 0x9b, 0x5b,
+ 0x3a, 0x09, 0x57, 0xbe, 0x0f, 0xb8, 0x59, 0x46,
+ 0x80, 0x7a, 0xed, 0x35, 0xcb, 0x5a, 0x2f, 0xca,
+ 0xa0, 0x34, 0x6e, 0xfb, 0x93, 0x65, 0x54, 0x64,
+ 0xd1, 0x76, 0x9f, 0xe8, 0x06, 0xbb, 0xfe, 0xb6,
+ 0xf5, 0x90, 0x95, 0x0f, 0x2e, 0xac, 0x9e, 0x0a,
+ 0x58, 0x2d, 0x90, 0x96, 0x52, 0x4f, 0xa1, 0xb2,
+ 0xb0, 0x23, 0xb8, 0xb2, 0x88, 0x22, 0x27, 0x73,
+ 0x99, 0x52, 0xae, 0x08, 0x18, 0xc3, 0x89, 0x79,
+ 0xc0, 0x74, 0x13, 0x71, 0x1a, 0x9a, 0xf7, 0x13
+};
+static const u8 enc_output095[] __initconst = {
+ 0xe9, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xea, 0x33, 0xf3, 0x47, 0x30, 0x4a, 0xbd, 0xad,
+ 0xf8, 0xce, 0x41, 0x34, 0x33, 0xc8, 0x45, 0x01,
+ 0xe0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xb2, 0x7f, 0x57, 0x96, 0x88, 0xae, 0xe5, 0x70,
+ 0x64, 0xce, 0x37, 0x32, 0x91, 0x82, 0xca, 0x01,
+ 0xe0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xb2, 0x7f, 0x57, 0x96, 0x88, 0xae, 0xe5, 0x70,
+ 0x64, 0xce, 0x37, 0x32, 0x91, 0x82, 0xca, 0x01,
+ 0x98, 0xa7, 0xe8, 0x36, 0xe0, 0xee, 0x4d, 0x02,
+ 0x35, 0x00, 0xd0, 0x55, 0x7e, 0xc2, 0xcb, 0xe0
+};
+static const u8 enc_assoc095[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff
+};
+static const u8 enc_nonce095[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52
+};
+static const u8 enc_key095[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - edge case intermediate sums in poly1305 */
+static const u8 enc_input096[] __initconst = {
+ 0xff, 0x94, 0x28, 0xd0, 0x79, 0x35, 0x1f, 0x66,
+ 0x5c, 0xd0, 0x01, 0x35, 0x43, 0x19, 0x87, 0x5c,
+ 0x64, 0xf9, 0x0f, 0x5b, 0x26, 0x92, 0xb8, 0x60,
+ 0xd4, 0x59, 0x6f, 0xf4, 0xb3, 0x40, 0x2c, 0x5c,
+ 0x00, 0xb9, 0xbb, 0x53, 0x70, 0x7a, 0xa6, 0x67,
+ 0xd3, 0x56, 0xfe, 0x50, 0xc7, 0x19, 0x96, 0x94,
+ 0x03, 0x35, 0x61, 0xe7, 0xca, 0xca, 0x6d, 0x94,
+ 0x1d, 0xc3, 0xcd, 0x69, 0x14, 0xad, 0x69, 0x04
+};
+static const u8 enc_output096[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xe3, 0x3b, 0xc5, 0x52, 0xca, 0x8b, 0x9e, 0x96,
+ 0x16, 0x9e, 0x79, 0x7e, 0x8f, 0x30, 0x30, 0x1b,
+ 0x60, 0x3c, 0xa9, 0x99, 0x44, 0xdf, 0x76, 0x52,
+ 0x8c, 0x9d, 0x6f, 0x54, 0xab, 0x83, 0x3d, 0x0f,
+ 0x60, 0x3c, 0xa9, 0x99, 0x44, 0xdf, 0x76, 0x52,
+ 0x8c, 0x9d, 0x6f, 0x54, 0xab, 0x83, 0x3d, 0x0f,
+ 0x6a, 0xb8, 0xdc, 0xe2, 0xc5, 0x9d, 0xa4, 0x73,
+ 0x71, 0x30, 0xb0, 0x25, 0x2f, 0x68, 0xa8, 0xd8
+};
+static const u8 enc_assoc096[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff
+};
+static const u8 enc_nonce096[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52
+};
+static const u8 enc_key096[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - edge case intermediate sums in poly1305 */
+static const u8 enc_input097[] __initconst = {
+ 0x68, 0x94, 0x28, 0xd0, 0x79, 0x35, 0x1f, 0x66,
+ 0x5c, 0xd0, 0x01, 0x35, 0x43, 0x19, 0x87, 0x5c,
+ 0xb0, 0x8f, 0x25, 0x67, 0x5b, 0x9b, 0xcb, 0xf6,
+ 0xe3, 0x84, 0x07, 0xde, 0x2e, 0xc7, 0x5a, 0x47,
+ 0x9f, 0x7a, 0xed, 0x35, 0xcb, 0x5a, 0x2f, 0xca,
+ 0xa0, 0x34, 0x6e, 0xfb, 0x93, 0x65, 0x54, 0x64,
+ 0x2d, 0x2a, 0xf7, 0xcd, 0x6b, 0x08, 0x05, 0x01,
+ 0xd3, 0x1b, 0xa5, 0x4f, 0xb2, 0xeb, 0x75, 0x96,
+ 0x47, 0x2d, 0x90, 0x96, 0x52, 0x4f, 0xa1, 0xb2,
+ 0xb0, 0x23, 0xb8, 0xb2, 0x88, 0x22, 0x27, 0x73,
+ 0x65, 0x0e, 0xc6, 0x2d, 0x75, 0x70, 0x72, 0xce,
+ 0xe6, 0xff, 0x23, 0x31, 0x86, 0xdd, 0x1c, 0x8f
+};
+static const u8 enc_output097[] __initconst = {
+ 0x68, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x37, 0x4d, 0xef, 0x6e, 0xb7, 0x82, 0xed, 0x00,
+ 0x21, 0x43, 0x11, 0x54, 0x12, 0xb7, 0x46, 0x00,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x4e, 0x23, 0x3f, 0xb3, 0xe5, 0x1d, 0x1e, 0xc7,
+ 0x42, 0x45, 0x07, 0x72, 0x0d, 0xc5, 0x21, 0x9d,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x4e, 0x23, 0x3f, 0xb3, 0xe5, 0x1d, 0x1e, 0xc7,
+ 0x42, 0x45, 0x07, 0x72, 0x0d, 0xc5, 0x21, 0x9d,
+ 0x04, 0x4d, 0xea, 0x60, 0x88, 0x80, 0x41, 0x2b,
+ 0xfd, 0xff, 0xcf, 0x35, 0x57, 0x9e, 0x9b, 0x26
+};
+static const u8 enc_assoc097[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff
+};
+static const u8 enc_nonce097[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52
+};
+static const u8 enc_key097[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - edge case intermediate sums in poly1305 */
+static const u8 enc_input098[] __initconst = {
+ 0x6d, 0x94, 0x28, 0xd0, 0x79, 0x35, 0x1f, 0x66,
+ 0x5c, 0xd0, 0x01, 0x35, 0x43, 0x19, 0x87, 0x5c,
+ 0xa1, 0x61, 0xb5, 0xab, 0x04, 0x09, 0x00, 0x62,
+ 0x9e, 0xfe, 0xff, 0x78, 0xd7, 0xd8, 0x6b, 0x45,
+ 0x9f, 0x7a, 0xed, 0x35, 0xcb, 0x5a, 0x2f, 0xca,
+ 0xa0, 0x34, 0x6e, 0xfb, 0x93, 0x65, 0x54, 0x64,
+ 0xc6, 0xf8, 0x07, 0x8c, 0xc8, 0xef, 0x12, 0xa0,
+ 0xff, 0x65, 0x7d, 0x6d, 0x08, 0xdb, 0x10, 0xb8,
+ 0x47, 0x2d, 0x90, 0x96, 0x52, 0x4f, 0xa1, 0xb2,
+ 0xb0, 0x23, 0xb8, 0xb2, 0x88, 0x22, 0x27, 0x73,
+ 0x8e, 0xdc, 0x36, 0x6c, 0xd6, 0x97, 0x65, 0x6f,
+ 0xca, 0x81, 0xfb, 0x13, 0x3c, 0xed, 0x79, 0xa1
+};
+static const u8 enc_output098[] __initconst = {
+ 0x6d, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x26, 0xa3, 0x7f, 0xa2, 0xe8, 0x10, 0x26, 0x94,
+ 0x5c, 0x39, 0xe9, 0xf2, 0xeb, 0xa8, 0x77, 0x02,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xa5, 0xf1, 0xcf, 0xf2, 0x46, 0xfa, 0x09, 0x66,
+ 0x6e, 0x3b, 0xdf, 0x50, 0xb7, 0xf5, 0x44, 0xb3,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xa5, 0xf1, 0xcf, 0xf2, 0x46, 0xfa, 0x09, 0x66,
+ 0x6e, 0x3b, 0xdf, 0x50, 0xb7, 0xf5, 0x44, 0xb3,
+ 0x1e, 0x6b, 0xea, 0x63, 0x14, 0x54, 0x2e, 0x2e,
+ 0xf9, 0xff, 0xcf, 0x45, 0x0b, 0x2e, 0x98, 0x2b
+};
+static const u8 enc_assoc098[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff
+};
+static const u8 enc_nonce098[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52
+};
+static const u8 enc_key098[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - edge case intermediate sums in poly1305 */
+static const u8 enc_input099[] __initconst = {
+ 0xff, 0x94, 0x28, 0xd0, 0x79, 0x35, 0x1f, 0x66,
+ 0x5c, 0xd0, 0x01, 0x35, 0x43, 0x19, 0x87, 0x5c,
+ 0xfc, 0x01, 0xb8, 0x91, 0xe5, 0xf0, 0xf9, 0x12,
+ 0x8d, 0x7d, 0x1c, 0x57, 0x91, 0x92, 0xb6, 0x98,
+ 0x63, 0x41, 0x44, 0x15, 0xb6, 0x99, 0x68, 0x95,
+ 0x9a, 0x72, 0x91, 0xb7, 0xa5, 0xaf, 0x13, 0x48,
+ 0x60, 0xcd, 0x9e, 0xa1, 0x0c, 0x29, 0xa3, 0x66,
+ 0x54, 0xe7, 0xa2, 0x8e, 0x76, 0x1b, 0xec, 0xd8
+};
+static const u8 enc_output099[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x7b, 0xc3, 0x72, 0x98, 0x09, 0xe9, 0xdf, 0xe4,
+ 0x4f, 0xba, 0x0a, 0xdd, 0xad, 0xe2, 0xaa, 0xdf,
+ 0x03, 0xc4, 0x56, 0xdf, 0x82, 0x3c, 0xb8, 0xa0,
+ 0xc5, 0xb9, 0x00, 0xb3, 0xc9, 0x35, 0xb8, 0xd3,
+ 0x03, 0xc4, 0x56, 0xdf, 0x82, 0x3c, 0xb8, 0xa0,
+ 0xc5, 0xb9, 0x00, 0xb3, 0xc9, 0x35, 0xb8, 0xd3,
+ 0xed, 0x20, 0x17, 0xc8, 0xdb, 0xa4, 0x77, 0x56,
+ 0x29, 0x04, 0x9d, 0x78, 0x6e, 0x3b, 0xce, 0xb1
+};
+static const u8 enc_assoc099[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff
+};
+static const u8 enc_nonce099[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52
+};
+static const u8 enc_key099[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - edge case intermediate sums in poly1305 */
+static const u8 enc_input100[] __initconst = {
+ 0xff, 0x94, 0x28, 0xd0, 0x79, 0x35, 0x1f, 0x66,
+ 0x5c, 0xd0, 0x01, 0x35, 0x43, 0x19, 0x87, 0x5c,
+ 0x6b, 0x6d, 0xc9, 0xd2, 0x1a, 0x81, 0x9e, 0x70,
+ 0xb5, 0x77, 0xf4, 0x41, 0x37, 0xd3, 0xd6, 0xbd,
+ 0x13, 0x35, 0xf5, 0xeb, 0x44, 0x49, 0x40, 0x77,
+ 0xb2, 0x64, 0x49, 0xa5, 0x4b, 0x6c, 0x7c, 0x75,
+ 0x10, 0xb9, 0x2f, 0x5f, 0xfe, 0xf9, 0x8b, 0x84,
+ 0x7c, 0xf1, 0x7a, 0x9c, 0x98, 0xd8, 0x83, 0xe5
+};
+static const u8 enc_output100[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xec, 0xaf, 0x03, 0xdb, 0xf6, 0x98, 0xb8, 0x86,
+ 0x77, 0xb0, 0xe2, 0xcb, 0x0b, 0xa3, 0xca, 0xfa,
+ 0x73, 0xb0, 0xe7, 0x21, 0x70, 0xec, 0x90, 0x42,
+ 0xed, 0xaf, 0xd8, 0xa1, 0x27, 0xf6, 0xd7, 0xee,
+ 0x73, 0xb0, 0xe7, 0x21, 0x70, 0xec, 0x90, 0x42,
+ 0xed, 0xaf, 0xd8, 0xa1, 0x27, 0xf6, 0xd7, 0xee,
+ 0x07, 0x3f, 0x17, 0xcb, 0x67, 0x78, 0x64, 0x59,
+ 0x25, 0x04, 0x9d, 0x88, 0x22, 0xcb, 0xca, 0xb6
+};
+static const u8 enc_assoc100[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff
+};
+static const u8 enc_nonce100[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52
+};
+static const u8 enc_key100[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - edge case intermediate sums in poly1305 */
+static const u8 enc_input101[] __initconst = {
+ 0xff, 0xcb, 0x2b, 0x11, 0x06, 0xf8, 0x23, 0x4c,
+ 0x5e, 0x99, 0xd4, 0xdb, 0x4c, 0x70, 0x48, 0xde,
+ 0x32, 0x3d, 0x35, 0xf6, 0x13, 0xe6, 0xd9, 0x09,
+ 0x3d, 0x38, 0xe9, 0x75, 0xc3, 0x8f, 0xe3, 0xb8,
+ 0x16, 0xe9, 0x88, 0x4a, 0x11, 0x4f, 0x0e, 0x92,
+ 0x66, 0xce, 0xa3, 0x88, 0x5f, 0xe3, 0x6b, 0x9f,
+ 0xd6, 0xf6, 0x37, 0x81, 0x71, 0xea, 0xe4, 0x39,
+ 0x6e, 0xa1, 0x5d, 0xc2, 0x40, 0xd1, 0xab, 0xf4,
+ 0xce, 0xbe, 0xf5, 0xe9, 0x88, 0x5a, 0x80, 0xea,
+ 0x76, 0xd9, 0x75, 0xc1, 0x44, 0xa4, 0x18, 0x88
+};
+static const u8 enc_output101[] __initconst = {
+ 0xff, 0xa0, 0xfc, 0x3e, 0x80, 0x32, 0xc3, 0xd5,
+ 0xfd, 0xb6, 0x2a, 0x11, 0xf0, 0x96, 0x30, 0x7d,
+ 0xb5, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x76, 0x6c, 0x9a, 0x80, 0x25, 0xea, 0xde, 0xa7,
+ 0x39, 0x05, 0x32, 0x8c, 0x33, 0x79, 0xc0, 0x04,
+ 0xb5, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x76, 0x6c, 0x9a, 0x80, 0x25, 0xea, 0xde, 0xa7,
+ 0x39, 0x05, 0x32, 0x8c, 0x33, 0x79, 0xc0, 0x04,
+ 0x8b, 0x9b, 0xb4, 0xb4, 0x86, 0x12, 0x89, 0x65,
+ 0x8c, 0x69, 0x6a, 0x83, 0x40, 0x15, 0x04, 0x05
+};
+static const u8 enc_assoc101[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff
+};
+static const u8 enc_nonce101[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52
+};
+static const u8 enc_key101[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - edge case intermediate sums in poly1305 */
+static const u8 enc_input102[] __initconst = {
+ 0x6f, 0x9e, 0x70, 0xed, 0x3b, 0x8b, 0xac, 0xa0,
+ 0x26, 0xe4, 0x6a, 0x5a, 0x09, 0x43, 0x15, 0x8d,
+ 0x21, 0x3d, 0x35, 0xf6, 0x13, 0xe6, 0xd9, 0x09,
+ 0x3d, 0x38, 0xe9, 0x75, 0xc3, 0x8f, 0xe3, 0xb8,
+ 0x0c, 0x61, 0x2c, 0x5e, 0x8d, 0x89, 0xa8, 0x73,
+ 0xdb, 0xca, 0xad, 0x5b, 0x73, 0x46, 0x42, 0x9b,
+ 0xc5, 0xf6, 0x37, 0x81, 0x71, 0xea, 0xe4, 0x39,
+ 0x6e, 0xa1, 0x5d, 0xc2, 0x40, 0xd1, 0xab, 0xf4,
+ 0xd4, 0x36, 0x51, 0xfd, 0x14, 0x9c, 0x26, 0x0b,
+ 0xcb, 0xdd, 0x7b, 0x12, 0x68, 0x01, 0x31, 0x8c
+};
+static const u8 enc_output102[] __initconst = {
+ 0x6f, 0xf5, 0xa7, 0xc2, 0xbd, 0x41, 0x4c, 0x39,
+ 0x85, 0xcb, 0x94, 0x90, 0xb5, 0xa5, 0x6d, 0x2e,
+ 0xa6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x6c, 0xe4, 0x3e, 0x94, 0xb9, 0x2c, 0x78, 0x46,
+ 0x84, 0x01, 0x3c, 0x5f, 0x1f, 0xdc, 0xe9, 0x00,
+ 0xa6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x6c, 0xe4, 0x3e, 0x94, 0xb9, 0x2c, 0x78, 0x46,
+ 0x84, 0x01, 0x3c, 0x5f, 0x1f, 0xdc, 0xe9, 0x00,
+ 0x8b, 0x3b, 0xbd, 0x51, 0x64, 0x44, 0x59, 0x56,
+ 0x8d, 0x81, 0xca, 0x1f, 0xa7, 0x2c, 0xe4, 0x04
+};
+static const u8 enc_assoc102[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff
+};
+static const u8 enc_nonce102[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52
+};
+static const u8 enc_key102[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - edge case intermediate sums in poly1305 */
+static const u8 enc_input103[] __initconst = {
+ 0x41, 0x2b, 0x08, 0x0a, 0x3e, 0x19, 0xc1, 0x0d,
+ 0x44, 0xa1, 0xaf, 0x1e, 0xab, 0xde, 0xb4, 0xce,
+ 0x35, 0x3d, 0x35, 0xf6, 0x13, 0xe6, 0xd9, 0x09,
+ 0x3d, 0x38, 0xe9, 0x75, 0xc3, 0x8f, 0xe3, 0xb8,
+ 0x6b, 0x83, 0x94, 0x33, 0x09, 0x21, 0x48, 0x6c,
+ 0xa1, 0x1d, 0x29, 0x1c, 0x3e, 0x97, 0xee, 0x9a,
+ 0xd1, 0xf6, 0x37, 0x81, 0x71, 0xea, 0xe4, 0x39,
+ 0x6e, 0xa1, 0x5d, 0xc2, 0x40, 0xd1, 0xab, 0xf4,
+ 0xb3, 0xd4, 0xe9, 0x90, 0x90, 0x34, 0xc6, 0x14,
+ 0xb1, 0x0a, 0xff, 0x55, 0x25, 0xd0, 0x9d, 0x8d
+};
+static const u8 enc_output103[] __initconst = {
+ 0x41, 0x40, 0xdf, 0x25, 0xb8, 0xd3, 0x21, 0x94,
+ 0xe7, 0x8e, 0x51, 0xd4, 0x17, 0x38, 0xcc, 0x6d,
+ 0xb2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x0b, 0x06, 0x86, 0xf9, 0x3d, 0x84, 0x98, 0x59,
+ 0xfe, 0xd6, 0xb8, 0x18, 0x52, 0x0d, 0x45, 0x01,
+ 0xb2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x0b, 0x06, 0x86, 0xf9, 0x3d, 0x84, 0x98, 0x59,
+ 0xfe, 0xd6, 0xb8, 0x18, 0x52, 0x0d, 0x45, 0x01,
+ 0x86, 0xfb, 0xab, 0x2b, 0x4a, 0x94, 0xf4, 0x7a,
+ 0xa5, 0x6f, 0x0a, 0xea, 0x65, 0xd1, 0x10, 0x08
+};
+static const u8 enc_assoc103[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff
+};
+static const u8 enc_nonce103[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52
+};
+static const u8 enc_key103[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - edge case intermediate sums in poly1305 */
+static const u8 enc_input104[] __initconst = {
+ 0xb2, 0x47, 0xa7, 0x47, 0x23, 0x49, 0x1a, 0xac,
+ 0xac, 0xaa, 0xd7, 0x09, 0xc9, 0x1e, 0x93, 0x2b,
+ 0x31, 0x3d, 0x35, 0xf6, 0x13, 0xe6, 0xd9, 0x09,
+ 0x3d, 0x38, 0xe9, 0x75, 0xc3, 0x8f, 0xe3, 0xb8,
+ 0x9a, 0xde, 0x04, 0xe7, 0x5b, 0xb7, 0x01, 0xd9,
+ 0x66, 0x06, 0x01, 0xb3, 0x47, 0x65, 0xde, 0x98,
+ 0xd5, 0xf6, 0x37, 0x81, 0x71, 0xea, 0xe4, 0x39,
+ 0x6e, 0xa1, 0x5d, 0xc2, 0x40, 0xd1, 0xab, 0xf4,
+ 0x42, 0x89, 0x79, 0x44, 0xc2, 0xa2, 0x8f, 0xa1,
+ 0x76, 0x11, 0xd7, 0xfa, 0x5c, 0x22, 0xad, 0x8f
+};
+static const u8 enc_output104[] __initconst = {
+ 0xb2, 0x2c, 0x70, 0x68, 0xa5, 0x83, 0xfa, 0x35,
+ 0x0f, 0x85, 0x29, 0xc3, 0x75, 0xf8, 0xeb, 0x88,
+ 0xb6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xfa, 0x5b, 0x16, 0x2d, 0x6f, 0x12, 0xd1, 0xec,
+ 0x39, 0xcd, 0x90, 0xb7, 0x2b, 0xff, 0x75, 0x03,
+ 0xb6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xfa, 0x5b, 0x16, 0x2d, 0x6f, 0x12, 0xd1, 0xec,
+ 0x39, 0xcd, 0x90, 0xb7, 0x2b, 0xff, 0x75, 0x03,
+ 0xa0, 0x19, 0xac, 0x2e, 0xd6, 0x67, 0xe1, 0x7d,
+ 0xa1, 0x6f, 0x0a, 0xfa, 0x19, 0x61, 0x0d, 0x0d
+};
+static const u8 enc_assoc104[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff
+};
+static const u8 enc_nonce104[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52
+};
+static const u8 enc_key104[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - edge case intermediate sums in poly1305 */
+static const u8 enc_input105[] __initconst = {
+ 0x74, 0x0f, 0x9e, 0x49, 0xf6, 0x10, 0xef, 0xa5,
+ 0x85, 0xb6, 0x59, 0xca, 0x6e, 0xd8, 0xb4, 0x99,
+ 0x2d, 0x3d, 0x35, 0xf6, 0x13, 0xe6, 0xd9, 0x09,
+ 0x3d, 0x38, 0xe9, 0x75, 0xc3, 0x8f, 0xe3, 0xb8,
+ 0x41, 0x2d, 0x96, 0xaf, 0xbe, 0x80, 0xec, 0x3e,
+ 0x79, 0xd4, 0x51, 0xb0, 0x0a, 0x2d, 0xb2, 0x9a,
+ 0xc9, 0xf6, 0x37, 0x81, 0x71, 0xea, 0xe4, 0x39,
+ 0x6e, 0xa1, 0x5d, 0xc2, 0x40, 0xd1, 0xab, 0xf4,
+ 0x99, 0x7a, 0xeb, 0x0c, 0x27, 0x95, 0x62, 0x46,
+ 0x69, 0xc3, 0x87, 0xf9, 0x11, 0x6a, 0xc1, 0x8d
+};
+static const u8 enc_output105[] __initconst = {
+ 0x74, 0x64, 0x49, 0x66, 0x70, 0xda, 0x0f, 0x3c,
+ 0x26, 0x99, 0xa7, 0x00, 0xd2, 0x3e, 0xcc, 0x3a,
+ 0xaa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x21, 0xa8, 0x84, 0x65, 0x8a, 0x25, 0x3c, 0x0b,
+ 0x26, 0x1f, 0xc0, 0xb4, 0x66, 0xb7, 0x19, 0x01,
+ 0xaa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x21, 0xa8, 0x84, 0x65, 0x8a, 0x25, 0x3c, 0x0b,
+ 0x26, 0x1f, 0xc0, 0xb4, 0x66, 0xb7, 0x19, 0x01,
+ 0x73, 0x6e, 0x18, 0x18, 0x16, 0x96, 0xa5, 0x88,
+ 0x9c, 0x31, 0x59, 0xfa, 0xab, 0xab, 0x20, 0xfd
+};
+static const u8 enc_assoc105[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff
+};
+static const u8 enc_nonce105[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52
+};
+static const u8 enc_key105[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - edge case intermediate sums in poly1305 */
+static const u8 enc_input106[] __initconst = {
+ 0xad, 0xba, 0x5d, 0x10, 0x5b, 0xc8, 0xaa, 0x06,
+ 0x2c, 0x23, 0x36, 0xcb, 0x88, 0x9d, 0xdb, 0xd5,
+ 0x37, 0x3d, 0x35, 0xf6, 0x13, 0xe6, 0xd9, 0x09,
+ 0x3d, 0x38, 0xe9, 0x75, 0xc3, 0x8f, 0xe3, 0xb8,
+ 0x17, 0x7c, 0x5f, 0xfe, 0x28, 0x75, 0xf4, 0x68,
+ 0xf6, 0xc2, 0x96, 0x57, 0x48, 0xf3, 0x59, 0x9a,
+ 0xd3, 0xf6, 0x37, 0x81, 0x71, 0xea, 0xe4, 0x39,
+ 0x6e, 0xa1, 0x5d, 0xc2, 0x40, 0xd1, 0xab, 0xf4,
+ 0xcf, 0x2b, 0x22, 0x5d, 0xb1, 0x60, 0x7a, 0x10,
+ 0xe6, 0xd5, 0x40, 0x1e, 0x53, 0xb4, 0x2a, 0x8d
+};
+static const u8 enc_output106[] __initconst = {
+ 0xad, 0xd1, 0x8a, 0x3f, 0xdd, 0x02, 0x4a, 0x9f,
+ 0x8f, 0x0c, 0xc8, 0x01, 0x34, 0x7b, 0xa3, 0x76,
+ 0xb0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x77, 0xf9, 0x4d, 0x34, 0x1c, 0xd0, 0x24, 0x5d,
+ 0xa9, 0x09, 0x07, 0x53, 0x24, 0x69, 0xf2, 0x01,
+ 0xb0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x77, 0xf9, 0x4d, 0x34, 0x1c, 0xd0, 0x24, 0x5d,
+ 0xa9, 0x09, 0x07, 0x53, 0x24, 0x69, 0xf2, 0x01,
+ 0xba, 0xd5, 0x8f, 0x10, 0xa9, 0x1e, 0x6a, 0x88,
+ 0x9a, 0xba, 0x32, 0xfd, 0x17, 0xd8, 0x33, 0x1a
+};
+static const u8 enc_assoc106[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff
+};
+static const u8 enc_nonce106[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52
+};
+static const u8 enc_key106[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - edge case intermediate sums in poly1305 */
+static const u8 enc_input107[] __initconst = {
+ 0xfe, 0x94, 0x28, 0xd0, 0x79, 0x35, 0x1f, 0x66,
+ 0x5c, 0xd0, 0x01, 0x35, 0x43, 0x19, 0x87, 0x5c,
+ 0xc0, 0x01, 0xed, 0xc5, 0xda, 0x44, 0x2e, 0x71,
+ 0x9b, 0xce, 0x9a, 0xbe, 0x27, 0x3a, 0xf1, 0x44,
+ 0xb4, 0x7a, 0xed, 0x35, 0xcb, 0x5a, 0x2f, 0xca,
+ 0xa0, 0x34, 0x6e, 0xfb, 0x93, 0x65, 0x54, 0x64,
+ 0x48, 0x02, 0x5f, 0x41, 0xfa, 0x4e, 0x33, 0x6c,
+ 0x78, 0x69, 0x57, 0xa2, 0xa7, 0xc4, 0x93, 0x0a,
+ 0x6c, 0x2d, 0x90, 0x96, 0x52, 0x4f, 0xa1, 0xb2,
+ 0xb0, 0x23, 0xb8, 0xb2, 0x88, 0x22, 0x27, 0x73,
+ 0x00, 0x26, 0x6e, 0xa1, 0xe4, 0x36, 0x44, 0xa3,
+ 0x4d, 0x8d, 0xd1, 0xdc, 0x93, 0xf2, 0xfa, 0x13
+};
+static const u8 enc_output107[] __initconst = {
+ 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x47, 0xc3, 0x27, 0xcc, 0x36, 0x5d, 0x08, 0x87,
+ 0x59, 0x09, 0x8c, 0x34, 0x1b, 0x4a, 0xed, 0x03,
+ 0xd4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x2b, 0x0b, 0x97, 0x3f, 0x74, 0x5b, 0x28, 0xaa,
+ 0xe9, 0x37, 0xf5, 0x9f, 0x18, 0xea, 0xc7, 0x01,
+ 0xd4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x2b, 0x0b, 0x97, 0x3f, 0x74, 0x5b, 0x28, 0xaa,
+ 0xe9, 0x37, 0xf5, 0x9f, 0x18, 0xea, 0xc7, 0x01,
+ 0xd6, 0x8c, 0xe1, 0x74, 0x07, 0x9a, 0xdd, 0x02,
+ 0x8d, 0xd0, 0x5c, 0xf8, 0x14, 0x63, 0x04, 0x88
+};
+static const u8 enc_assoc107[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff
+};
+static const u8 enc_nonce107[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52
+};
+static const u8 enc_key107[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - edge case intermediate sums in poly1305 */
+static const u8 enc_input108[] __initconst = {
+ 0xb5, 0x13, 0xb0, 0x6a, 0xb9, 0xac, 0x14, 0x43,
+ 0x5a, 0xcb, 0x8a, 0xa3, 0xa3, 0x7a, 0xfd, 0xb6,
+ 0x54, 0x3d, 0x35, 0xf6, 0x13, 0xe6, 0xd9, 0x09,
+ 0x3d, 0x38, 0xe9, 0x75, 0xc3, 0x8f, 0xe3, 0xb8,
+ 0x61, 0x95, 0x01, 0x93, 0xb1, 0xbf, 0x03, 0x11,
+ 0xff, 0x11, 0x79, 0x89, 0xae, 0xd9, 0xa9, 0x99,
+ 0xb0, 0xf6, 0x37, 0x81, 0x71, 0xea, 0xe4, 0x39,
+ 0x6e, 0xa1, 0x5d, 0xc2, 0x40, 0xd1, 0xab, 0xf4,
+ 0xb9, 0xc2, 0x7c, 0x30, 0x28, 0xaa, 0x8d, 0x69,
+ 0xef, 0x06, 0xaf, 0xc0, 0xb5, 0x9e, 0xda, 0x8e
+};
+static const u8 enc_output108[] __initconst = {
+ 0xb5, 0x78, 0x67, 0x45, 0x3f, 0x66, 0xf4, 0xda,
+ 0xf9, 0xe4, 0x74, 0x69, 0x1f, 0x9c, 0x85, 0x15,
+ 0xd3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x01, 0x10, 0x13, 0x59, 0x85, 0x1a, 0xd3, 0x24,
+ 0xa0, 0xda, 0xe8, 0x8d, 0xc2, 0x43, 0x02, 0x02,
+ 0xd3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x01, 0x10, 0x13, 0x59, 0x85, 0x1a, 0xd3, 0x24,
+ 0xa0, 0xda, 0xe8, 0x8d, 0xc2, 0x43, 0x02, 0x02,
+ 0xaa, 0x48, 0xa3, 0x88, 0x7d, 0x4b, 0x05, 0x96,
+ 0x99, 0xc2, 0xfd, 0xf9, 0xc6, 0x78, 0x7e, 0x0a
+};
+static const u8 enc_assoc108[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff
+};
+static const u8 enc_nonce108[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52
+};
+static const u8 enc_key108[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - edge case intermediate sums in poly1305 */
+static const u8 enc_input109[] __initconst = {
+ 0xff, 0x94, 0x28, 0xd0, 0x79, 0x35, 0x1f, 0x66,
+ 0x5c, 0xd0, 0x01, 0x35, 0x43, 0x19, 0x87, 0x5c,
+ 0xd4, 0xf1, 0x09, 0xe8, 0x14, 0xce, 0xa8, 0x5a,
+ 0x08, 0xc0, 0x11, 0xd8, 0x50, 0xdd, 0x1d, 0xcb,
+ 0xcf, 0x7a, 0xed, 0x35, 0xcb, 0x5a, 0x2f, 0xca,
+ 0xa0, 0x34, 0x6e, 0xfb, 0x93, 0x65, 0x54, 0x64,
+ 0x53, 0x40, 0xb8, 0x5a, 0x9a, 0xa0, 0x82, 0x96,
+ 0xb7, 0x7a, 0x5f, 0xc3, 0x96, 0x1f, 0x66, 0x0f,
+ 0x17, 0x2d, 0x90, 0x96, 0x52, 0x4f, 0xa1, 0xb2,
+ 0xb0, 0x23, 0xb8, 0xb2, 0x88, 0x22, 0x27, 0x73,
+ 0x1b, 0x64, 0x89, 0xba, 0x84, 0xd8, 0xf5, 0x59,
+ 0x82, 0x9e, 0xd9, 0xbd, 0xa2, 0x29, 0x0f, 0x16
+};
+static const u8 enc_output109[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x53, 0x33, 0xc3, 0xe1, 0xf8, 0xd7, 0x8e, 0xac,
+ 0xca, 0x07, 0x07, 0x52, 0x6c, 0xad, 0x01, 0x8c,
+ 0xaf, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x30, 0x49, 0x70, 0x24, 0x14, 0xb5, 0x99, 0x50,
+ 0x26, 0x24, 0xfd, 0xfe, 0x29, 0x31, 0x32, 0x04,
+ 0xaf, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x30, 0x49, 0x70, 0x24, 0x14, 0xb5, 0x99, 0x50,
+ 0x26, 0x24, 0xfd, 0xfe, 0x29, 0x31, 0x32, 0x04,
+ 0xb9, 0x36, 0xa8, 0x17, 0xf2, 0x21, 0x1a, 0xf1,
+ 0x29, 0xe2, 0xcf, 0x16, 0x0f, 0xd4, 0x2b, 0xcb
+};
+static const u8 enc_assoc109[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff
+};
+static const u8 enc_nonce109[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52
+};
+static const u8 enc_key109[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - edge case intermediate sums in poly1305 */
+static const u8 enc_input110[] __initconst = {
+ 0xff, 0x94, 0x28, 0xd0, 0x79, 0x35, 0x1f, 0x66,
+ 0x5c, 0xd0, 0x01, 0x35, 0x43, 0x19, 0x87, 0x5c,
+ 0xdf, 0x4c, 0x62, 0x03, 0x2d, 0x41, 0x19, 0xb5,
+ 0x88, 0x47, 0x7e, 0x99, 0x92, 0x5a, 0x56, 0xd9,
+ 0xd6, 0x7a, 0xed, 0x35, 0xcb, 0x5a, 0x2f, 0xca,
+ 0xa0, 0x34, 0x6e, 0xfb, 0x93, 0x65, 0x54, 0x64,
+ 0xfa, 0x84, 0xf0, 0x64, 0x55, 0x36, 0x42, 0x1b,
+ 0x2b, 0xb9, 0x24, 0x6e, 0xc2, 0x19, 0xed, 0x0b,
+ 0x0e, 0x2d, 0x90, 0x96, 0x52, 0x4f, 0xa1, 0xb2,
+ 0xb0, 0x23, 0xb8, 0xb2, 0x88, 0x22, 0x27, 0x73,
+ 0xb2, 0xa0, 0xc1, 0x84, 0x4b, 0x4e, 0x35, 0xd4,
+ 0x1e, 0x5d, 0xa2, 0x10, 0xf6, 0x2f, 0x84, 0x12
+};
+static const u8 enc_output110[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x58, 0x8e, 0xa8, 0x0a, 0xc1, 0x58, 0x3f, 0x43,
+ 0x4a, 0x80, 0x68, 0x13, 0xae, 0x2a, 0x4a, 0x9e,
+ 0xb6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x99, 0x8d, 0x38, 0x1a, 0xdb, 0x23, 0x59, 0xdd,
+ 0xba, 0xe7, 0x86, 0x53, 0x7d, 0x37, 0xb9, 0x00,
+ 0xb6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x99, 0x8d, 0x38, 0x1a, 0xdb, 0x23, 0x59, 0xdd,
+ 0xba, 0xe7, 0x86, 0x53, 0x7d, 0x37, 0xb9, 0x00,
+ 0x9f, 0x7a, 0xc4, 0x35, 0x1f, 0x6b, 0x91, 0xe6,
+ 0x30, 0x97, 0xa7, 0x13, 0x11, 0x5d, 0x05, 0xbe
+};
+static const u8 enc_assoc110[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff
+};
+static const u8 enc_nonce110[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52
+};
+static const u8 enc_key110[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - edge case intermediate sums in poly1305 */
+static const u8 enc_input111[] __initconst = {
+ 0xff, 0x94, 0x28, 0xd0, 0x79, 0x35, 0x1f, 0x66,
+ 0x5c, 0xd0, 0x01, 0x35, 0x43, 0x19, 0x87, 0x5c,
+ 0x13, 0xf8, 0x0a, 0x00, 0x6d, 0xc1, 0xbb, 0xda,
+ 0xd6, 0x39, 0xa9, 0x2f, 0xc7, 0xec, 0xa6, 0x55,
+ 0xf7, 0x7a, 0xed, 0x35, 0xcb, 0x5a, 0x2f, 0xca,
+ 0xa0, 0x34, 0x6e, 0xfb, 0x93, 0x65, 0x54, 0x64,
+ 0x63, 0x48, 0xb8, 0xfd, 0x29, 0xbf, 0x96, 0xd5,
+ 0x63, 0xa5, 0x17, 0xe2, 0x7d, 0x7b, 0xfc, 0x0f,
+ 0x2f, 0x2d, 0x90, 0x96, 0x52, 0x4f, 0xa1, 0xb2,
+ 0xb0, 0x23, 0xb8, 0xb2, 0x88, 0x22, 0x27, 0x73,
+ 0x2b, 0x6c, 0x89, 0x1d, 0x37, 0xc7, 0xe1, 0x1a,
+ 0x56, 0x41, 0x91, 0x9c, 0x49, 0x4d, 0x95, 0x16
+};
+static const u8 enc_output111[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x94, 0x3a, 0xc0, 0x09, 0x81, 0xd8, 0x9d, 0x2c,
+ 0x14, 0xfe, 0xbf, 0xa5, 0xfb, 0x9c, 0xba, 0x12,
+ 0x97, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x00, 0x41, 0x70, 0x83, 0xa7, 0xaa, 0x8d, 0x13,
+ 0xf2, 0xfb, 0xb5, 0xdf, 0xc2, 0x55, 0xa8, 0x04,
+ 0x97, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x00, 0x41, 0x70, 0x83, 0xa7, 0xaa, 0x8d, 0x13,
+ 0xf2, 0xfb, 0xb5, 0xdf, 0xc2, 0x55, 0xa8, 0x04,
+ 0x9a, 0x18, 0xa8, 0x28, 0x07, 0x02, 0x69, 0xf4,
+ 0x47, 0x00, 0xd0, 0x09, 0xe7, 0x17, 0x1c, 0xc9
+};
+static const u8 enc_assoc111[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff
+};
+static const u8 enc_nonce111[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52
+};
+static const u8 enc_key111[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - edge case intermediate sums in poly1305 */
+static const u8 enc_input112[] __initconst = {
+ 0xff, 0x94, 0x28, 0xd0, 0x79, 0x35, 0x1f, 0x66,
+ 0x5c, 0xd0, 0x01, 0x35, 0x43, 0x19, 0x87, 0x5c,
+ 0x82, 0xe5, 0x9b, 0x45, 0x82, 0x91, 0x50, 0x38,
+ 0xf9, 0x33, 0x81, 0x1e, 0x65, 0x2d, 0xc6, 0x6a,
+ 0xfc, 0x7a, 0xed, 0x35, 0xcb, 0x5a, 0x2f, 0xca,
+ 0xa0, 0x34, 0x6e, 0xfb, 0x93, 0x65, 0x54, 0x64,
+ 0xb6, 0x71, 0xc8, 0xca, 0xc2, 0x70, 0xc2, 0x65,
+ 0xa0, 0xac, 0x2f, 0x53, 0x57, 0x99, 0x88, 0x0a,
+ 0x24, 0x2d, 0x90, 0x96, 0x52, 0x4f, 0xa1, 0xb2,
+ 0xb0, 0x23, 0xb8, 0xb2, 0x88, 0x22, 0x27, 0x73,
+ 0xfe, 0x55, 0xf9, 0x2a, 0xdc, 0x08, 0xb5, 0xaa,
+ 0x95, 0x48, 0xa9, 0x2d, 0x63, 0xaf, 0xe1, 0x13
+};
+static const u8 enc_output112[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x05, 0x27, 0x51, 0x4c, 0x6e, 0x88, 0x76, 0xce,
+ 0x3b, 0xf4, 0x97, 0x94, 0x59, 0x5d, 0xda, 0x2d,
+ 0x9c, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xd5, 0x78, 0x00, 0xb4, 0x4c, 0x65, 0xd9, 0xa3,
+ 0x31, 0xf2, 0x8d, 0x6e, 0xe8, 0xb7, 0xdc, 0x01,
+ 0x9c, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xd5, 0x78, 0x00, 0xb4, 0x4c, 0x65, 0xd9, 0xa3,
+ 0x31, 0xf2, 0x8d, 0x6e, 0xe8, 0xb7, 0xdc, 0x01,
+ 0xb4, 0x36, 0xa8, 0x2b, 0x93, 0xd5, 0x55, 0xf7,
+ 0x43, 0x00, 0xd0, 0x19, 0x9b, 0xa7, 0x18, 0xce
+};
+static const u8 enc_assoc112[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff
+};
+static const u8 enc_nonce112[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52
+};
+static const u8 enc_key112[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - edge case intermediate sums in poly1305 */
+static const u8 enc_input113[] __initconst = {
+ 0xff, 0x94, 0x28, 0xd0, 0x79, 0x35, 0x1f, 0x66,
+ 0x5c, 0xd0, 0x01, 0x35, 0x43, 0x19, 0x87, 0x5c,
+ 0xf1, 0xd1, 0x28, 0x87, 0xb7, 0x21, 0x69, 0x86,
+ 0xa1, 0x2d, 0x79, 0x09, 0x8b, 0x6d, 0xe6, 0x0f,
+ 0xc0, 0x7a, 0xed, 0x35, 0xcb, 0x5a, 0x2f, 0xca,
+ 0xa0, 0x34, 0x6e, 0xfb, 0x93, 0x65, 0x54, 0x64,
+ 0xa7, 0xc7, 0x58, 0x99, 0xf3, 0xe6, 0x0a, 0xf1,
+ 0xfc, 0xb6, 0xc7, 0x30, 0x7d, 0x87, 0x59, 0x0f,
+ 0x18, 0x2d, 0x90, 0x96, 0x52, 0x4f, 0xa1, 0xb2,
+ 0xb0, 0x23, 0xb8, 0xb2, 0x88, 0x22, 0x27, 0x73,
+ 0xef, 0xe3, 0x69, 0x79, 0xed, 0x9e, 0x7d, 0x3e,
+ 0xc9, 0x52, 0x41, 0x4e, 0x49, 0xb1, 0x30, 0x16
+};
+static const u8 enc_output113[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x76, 0x13, 0xe2, 0x8e, 0x5b, 0x38, 0x4f, 0x70,
+ 0x63, 0xea, 0x6f, 0x83, 0xb7, 0x1d, 0xfa, 0x48,
+ 0xa0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xc4, 0xce, 0x90, 0xe7, 0x7d, 0xf3, 0x11, 0x37,
+ 0x6d, 0xe8, 0x65, 0x0d, 0xc2, 0xa9, 0x0d, 0x04,
+ 0xa0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xc4, 0xce, 0x90, 0xe7, 0x7d, 0xf3, 0x11, 0x37,
+ 0x6d, 0xe8, 0x65, 0x0d, 0xc2, 0xa9, 0x0d, 0x04,
+ 0xce, 0x54, 0xa8, 0x2e, 0x1f, 0xa9, 0x42, 0xfa,
+ 0x3f, 0x00, 0xd0, 0x29, 0x4f, 0x37, 0x15, 0xd3
+};
+static const u8 enc_assoc113[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff
+};
+static const u8 enc_nonce113[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52
+};
+static const u8 enc_key113[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - edge case intermediate sums in poly1305 */
+static const u8 enc_input114[] __initconst = {
+ 0xcb, 0xf1, 0xda, 0x9e, 0x0b, 0xa9, 0x37, 0x73,
+ 0x74, 0xe6, 0x9e, 0x1c, 0x0e, 0x60, 0x0c, 0xfc,
+ 0x34, 0x3d, 0x35, 0xf6, 0x13, 0xe6, 0xd9, 0x09,
+ 0x3d, 0x38, 0xe9, 0x75, 0xc3, 0x8f, 0xe3, 0xb8,
+ 0xbe, 0x3f, 0xa6, 0x6b, 0x6c, 0xe7, 0x80, 0x8a,
+ 0xa3, 0xe4, 0x59, 0x49, 0xf9, 0x44, 0x64, 0x9f,
+ 0xd0, 0xf6, 0x37, 0x81, 0x71, 0xea, 0xe4, 0x39,
+ 0x6e, 0xa1, 0x5d, 0xc2, 0x40, 0xd1, 0xab, 0xf4,
+ 0x66, 0x68, 0xdb, 0xc8, 0xf5, 0xf2, 0x0e, 0xf2,
+ 0xb3, 0xf3, 0x8f, 0x00, 0xe2, 0x03, 0x17, 0x88
+};
+static const u8 enc_output114[] __initconst = {
+ 0xcb, 0x9a, 0x0d, 0xb1, 0x8d, 0x63, 0xd7, 0xea,
+ 0xd7, 0xc9, 0x60, 0xd6, 0xb2, 0x86, 0x74, 0x5f,
+ 0xb3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xde, 0xba, 0xb4, 0xa1, 0x58, 0x42, 0x50, 0xbf,
+ 0xfc, 0x2f, 0xc8, 0x4d, 0x95, 0xde, 0xcf, 0x04,
+ 0xb3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xde, 0xba, 0xb4, 0xa1, 0x58, 0x42, 0x50, 0xbf,
+ 0xfc, 0x2f, 0xc8, 0x4d, 0x95, 0xde, 0xcf, 0x04,
+ 0x23, 0x83, 0xab, 0x0b, 0x79, 0x92, 0x05, 0x69,
+ 0x9b, 0x51, 0x0a, 0xa7, 0x09, 0xbf, 0x31, 0xf1
+};
+static const u8 enc_assoc114[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff
+};
+static const u8 enc_nonce114[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52
+};
+static const u8 enc_key114[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - edge case intermediate sums in poly1305 */
+static const u8 enc_input115[] __initconst = {
+ 0x8f, 0x27, 0x86, 0x94, 0xc4, 0xe9, 0xda, 0xeb,
+ 0xd5, 0x8d, 0x3e, 0x5b, 0x96, 0x6e, 0x8b, 0x68,
+ 0x42, 0x3d, 0x35, 0xf6, 0x13, 0xe6, 0xd9, 0x09,
+ 0x3d, 0x38, 0xe9, 0x75, 0xc3, 0x8f, 0xe3, 0xb8,
+ 0x06, 0x53, 0xe7, 0xa3, 0x31, 0x71, 0x88, 0x33,
+ 0xac, 0xc3, 0xb9, 0xad, 0xff, 0x1c, 0x31, 0x98,
+ 0xa6, 0xf6, 0x37, 0x81, 0x71, 0xea, 0xe4, 0x39,
+ 0x6e, 0xa1, 0x5d, 0xc2, 0x40, 0xd1, 0xab, 0xf4,
+ 0xde, 0x04, 0x9a, 0x00, 0xa8, 0x64, 0x06, 0x4b,
+ 0xbc, 0xd4, 0x6f, 0xe4, 0xe4, 0x5b, 0x42, 0x8f
+};
+static const u8 enc_output115[] __initconst = {
+ 0x8f, 0x4c, 0x51, 0xbb, 0x42, 0x23, 0x3a, 0x72,
+ 0x76, 0xa2, 0xc0, 0x91, 0x2a, 0x88, 0xf3, 0xcb,
+ 0xc5, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x66, 0xd6, 0xf5, 0x69, 0x05, 0xd4, 0x58, 0x06,
+ 0xf3, 0x08, 0x28, 0xa9, 0x93, 0x86, 0x9a, 0x03,
+ 0xc5, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x66, 0xd6, 0xf5, 0x69, 0x05, 0xd4, 0x58, 0x06,
+ 0xf3, 0x08, 0x28, 0xa9, 0x93, 0x86, 0x9a, 0x03,
+ 0x8b, 0xfb, 0xab, 0x17, 0xa9, 0xe0, 0xb8, 0x74,
+ 0x8b, 0x51, 0x0a, 0xe7, 0xd9, 0xfd, 0x23, 0x05
+};
+static const u8 enc_assoc115[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff
+};
+static const u8 enc_nonce115[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52
+};
+static const u8 enc_key115[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - edge case intermediate sums in poly1305 */
+static const u8 enc_input116[] __initconst = {
+ 0xd5, 0x94, 0x28, 0xd0, 0x79, 0x35, 0x1f, 0x66,
+ 0x5c, 0xd0, 0x01, 0x35, 0x43, 0x19, 0x87, 0x5c,
+ 0x9a, 0x22, 0xd7, 0x0a, 0x48, 0xe2, 0x4f, 0xdd,
+ 0xcd, 0xd4, 0x41, 0x9d, 0xe6, 0x4c, 0x8f, 0x44,
+ 0xfc, 0x7a, 0xed, 0x35, 0xcb, 0x5a, 0x2f, 0xca,
+ 0xa0, 0x34, 0x6e, 0xfb, 0x93, 0x65, 0x54, 0x64,
+ 0x77, 0xb5, 0xc9, 0x07, 0xd9, 0xc9, 0xe1, 0xea,
+ 0x51, 0x85, 0x1a, 0x20, 0x4a, 0xad, 0x9f, 0x0a,
+ 0x24, 0x2d, 0x90, 0x96, 0x52, 0x4f, 0xa1, 0xb2,
+ 0xb0, 0x23, 0xb8, 0xb2, 0x88, 0x22, 0x27, 0x73,
+ 0x3f, 0x91, 0xf8, 0xe7, 0xc7, 0xb1, 0x96, 0x25,
+ 0x64, 0x61, 0x9c, 0x5e, 0x7e, 0x9b, 0xf6, 0x13
+};
+static const u8 enc_output116[] __initconst = {
+ 0xd5, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x1d, 0xe0, 0x1d, 0x03, 0xa4, 0xfb, 0x69, 0x2b,
+ 0x0f, 0x13, 0x57, 0x17, 0xda, 0x3c, 0x93, 0x03,
+ 0x9c, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x14, 0xbc, 0x01, 0x79, 0x57, 0xdc, 0xfa, 0x2c,
+ 0xc0, 0xdb, 0xb8, 0x1d, 0xf5, 0x83, 0xcb, 0x01,
+ 0x9c, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x14, 0xbc, 0x01, 0x79, 0x57, 0xdc, 0xfa, 0x2c,
+ 0xc0, 0xdb, 0xb8, 0x1d, 0xf5, 0x83, 0xcb, 0x01,
+ 0x49, 0xbc, 0x6e, 0x9f, 0xc5, 0x1c, 0x4d, 0x50,
+ 0x30, 0x36, 0x64, 0x4d, 0x84, 0x27, 0x73, 0xd2
+};
+static const u8 enc_assoc116[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff
+};
+static const u8 enc_nonce116[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52
+};
+static const u8 enc_key116[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - edge case intermediate sums in poly1305 */
+static const u8 enc_input117[] __initconst = {
+ 0xdb, 0x94, 0x28, 0xd0, 0x79, 0x35, 0x1f, 0x66,
+ 0x5c, 0xd0, 0x01, 0x35, 0x43, 0x19, 0x87, 0x5c,
+ 0x75, 0xd5, 0x64, 0x3a, 0xa5, 0xaf, 0x93, 0x4d,
+ 0x8c, 0xce, 0x39, 0x2c, 0xc3, 0xee, 0xdb, 0x47,
+ 0xc0, 0x7a, 0xed, 0x35, 0xcb, 0x5a, 0x2f, 0xca,
+ 0xa0, 0x34, 0x6e, 0xfb, 0x93, 0x65, 0x54, 0x64,
+ 0x60, 0x1b, 0x5a, 0xd2, 0x06, 0x7f, 0x28, 0x06,
+ 0x6a, 0x8f, 0x32, 0x81, 0x71, 0x5b, 0xa8, 0x08,
+ 0x18, 0x2d, 0x90, 0x96, 0x52, 0x4f, 0xa1, 0xb2,
+ 0xb0, 0x23, 0xb8, 0xb2, 0x88, 0x22, 0x27, 0x73,
+ 0x28, 0x3f, 0x6b, 0x32, 0x18, 0x07, 0x5f, 0xc9,
+ 0x5f, 0x6b, 0xb4, 0xff, 0x45, 0x6d, 0xc1, 0x11
+};
+static const u8 enc_output117[] __initconst = {
+ 0xdb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xf2, 0x17, 0xae, 0x33, 0x49, 0xb6, 0xb5, 0xbb,
+ 0x4e, 0x09, 0x2f, 0xa6, 0xff, 0x9e, 0xc7, 0x00,
+ 0xa0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x03, 0x12, 0x92, 0xac, 0x88, 0x6a, 0x33, 0xc0,
+ 0xfb, 0xd1, 0x90, 0xbc, 0xce, 0x75, 0xfc, 0x03,
+ 0xa0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x03, 0x12, 0x92, 0xac, 0x88, 0x6a, 0x33, 0xc0,
+ 0xfb, 0xd1, 0x90, 0xbc, 0xce, 0x75, 0xfc, 0x03,
+ 0x63, 0xda, 0x6e, 0xa2, 0x51, 0xf0, 0x39, 0x53,
+ 0x2c, 0x36, 0x64, 0x5d, 0x38, 0xb7, 0x6f, 0xd7
+};
+static const u8 enc_assoc117[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff
+};
+static const u8 enc_nonce117[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52
+};
+static const u8 enc_key117[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - edge case intermediate sums in poly1305 */
+static const u8 enc_input118[] __initconst = {
+ 0x93, 0x94, 0x28, 0xd0, 0x79, 0x35, 0x1f, 0x66,
+ 0x5c, 0xd0, 0x01, 0x35, 0x43, 0x19, 0x87, 0x5c,
+ 0x62, 0x48, 0x39, 0x60, 0x42, 0x16, 0xe4, 0x03,
+ 0xeb, 0xcc, 0x6a, 0xf5, 0x59, 0xec, 0x8b, 0x43,
+ 0x97, 0x7a, 0xed, 0x35, 0xcb, 0x5a, 0x2f, 0xca,
+ 0xa0, 0x34, 0x6e, 0xfb, 0x93, 0x65, 0x54, 0x64,
+ 0xd8, 0xc8, 0xc3, 0xfa, 0x1a, 0x9e, 0x47, 0x4a,
+ 0xbe, 0x52, 0xd0, 0x2c, 0x81, 0x87, 0xe9, 0x0f,
+ 0x4f, 0x2d, 0x90, 0x96, 0x52, 0x4f, 0xa1, 0xb2,
+ 0xb0, 0x23, 0xb8, 0xb2, 0x88, 0x22, 0x27, 0x73,
+ 0x90, 0xec, 0xf2, 0x1a, 0x04, 0xe6, 0x30, 0x85,
+ 0x8b, 0xb6, 0x56, 0x52, 0xb5, 0xb1, 0x80, 0x16
+};
+static const u8 enc_output118[] __initconst = {
+ 0x93, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xe5, 0x8a, 0xf3, 0x69, 0xae, 0x0f, 0xc2, 0xf5,
+ 0x29, 0x0b, 0x7c, 0x7f, 0x65, 0x9c, 0x97, 0x04,
+ 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xbb, 0xc1, 0x0b, 0x84, 0x94, 0x8b, 0x5c, 0x8c,
+ 0x2f, 0x0c, 0x72, 0x11, 0x3e, 0xa9, 0xbd, 0x04,
+ 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xbb, 0xc1, 0x0b, 0x84, 0x94, 0x8b, 0x5c, 0x8c,
+ 0x2f, 0x0c, 0x72, 0x11, 0x3e, 0xa9, 0xbd, 0x04,
+ 0x73, 0xeb, 0x27, 0x24, 0xb5, 0xc4, 0x05, 0xf0,
+ 0x4d, 0x00, 0xd0, 0xf1, 0x58, 0x40, 0xa1, 0xc1
+};
+static const u8 enc_assoc118[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff
+};
+static const u8 enc_nonce118[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52
+};
+static const u8 enc_key118[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+static const struct chacha20poly1305_testvec
+chacha20poly1305_enc_vectors[] __initconst = {
+ { enc_input001, enc_output001, enc_assoc001, enc_nonce001, enc_key001,
+ sizeof(enc_input001), sizeof(enc_assoc001), sizeof(enc_nonce001) },
+ { enc_input002, enc_output002, enc_assoc002, enc_nonce002, enc_key002,
+ sizeof(enc_input002), sizeof(enc_assoc002), sizeof(enc_nonce002) },
+ { enc_input003, enc_output003, enc_assoc003, enc_nonce003, enc_key003,
+ sizeof(enc_input003), sizeof(enc_assoc003), sizeof(enc_nonce003) },
+ { enc_input004, enc_output004, enc_assoc004, enc_nonce004, enc_key004,
+ sizeof(enc_input004), sizeof(enc_assoc004), sizeof(enc_nonce004) },
+ { enc_input005, enc_output005, enc_assoc005, enc_nonce005, enc_key005,
+ sizeof(enc_input005), sizeof(enc_assoc005), sizeof(enc_nonce005) },
+ { enc_input006, enc_output006, enc_assoc006, enc_nonce006, enc_key006,
+ sizeof(enc_input006), sizeof(enc_assoc006), sizeof(enc_nonce006) },
+ { enc_input007, enc_output007, enc_assoc007, enc_nonce007, enc_key007,
+ sizeof(enc_input007), sizeof(enc_assoc007), sizeof(enc_nonce007) },
+ { enc_input008, enc_output008, enc_assoc008, enc_nonce008, enc_key008,
+ sizeof(enc_input008), sizeof(enc_assoc008), sizeof(enc_nonce008) },
+ { enc_input009, enc_output009, enc_assoc009, enc_nonce009, enc_key009,
+ sizeof(enc_input009), sizeof(enc_assoc009), sizeof(enc_nonce009) },
+ { enc_input010, enc_output010, enc_assoc010, enc_nonce010, enc_key010,
+ sizeof(enc_input010), sizeof(enc_assoc010), sizeof(enc_nonce010) },
+ { enc_input011, enc_output011, enc_assoc011, enc_nonce011, enc_key011,
+ sizeof(enc_input011), sizeof(enc_assoc011), sizeof(enc_nonce011) },
+ { enc_input012, enc_output012, enc_assoc012, enc_nonce012, enc_key012,
+ sizeof(enc_input012), sizeof(enc_assoc012), sizeof(enc_nonce012) },
+ { enc_input013, enc_output013, enc_assoc013, enc_nonce013, enc_key013,
+ sizeof(enc_input013), sizeof(enc_assoc013), sizeof(enc_nonce013) },
+ { enc_input014, enc_output014, enc_assoc014, enc_nonce014, enc_key014,
+ sizeof(enc_input014), sizeof(enc_assoc014), sizeof(enc_nonce014) },
+ { enc_input015, enc_output015, enc_assoc015, enc_nonce015, enc_key015,
+ sizeof(enc_input015), sizeof(enc_assoc015), sizeof(enc_nonce015) },
+ { enc_input016, enc_output016, enc_assoc016, enc_nonce016, enc_key016,
+ sizeof(enc_input016), sizeof(enc_assoc016), sizeof(enc_nonce016) },
+ { enc_input017, enc_output017, enc_assoc017, enc_nonce017, enc_key017,
+ sizeof(enc_input017), sizeof(enc_assoc017), sizeof(enc_nonce017) },
+ { enc_input018, enc_output018, enc_assoc018, enc_nonce018, enc_key018,
+ sizeof(enc_input018), sizeof(enc_assoc018), sizeof(enc_nonce018) },
+ { enc_input019, enc_output019, enc_assoc019, enc_nonce019, enc_key019,
+ sizeof(enc_input019), sizeof(enc_assoc019), sizeof(enc_nonce019) },
+ { enc_input020, enc_output020, enc_assoc020, enc_nonce020, enc_key020,
+ sizeof(enc_input020), sizeof(enc_assoc020), sizeof(enc_nonce020) },
+ { enc_input021, enc_output021, enc_assoc021, enc_nonce021, enc_key021,
+ sizeof(enc_input021), sizeof(enc_assoc021), sizeof(enc_nonce021) },
+ { enc_input022, enc_output022, enc_assoc022, enc_nonce022, enc_key022,
+ sizeof(enc_input022), sizeof(enc_assoc022), sizeof(enc_nonce022) },
+ { enc_input023, enc_output023, enc_assoc023, enc_nonce023, enc_key023,
+ sizeof(enc_input023), sizeof(enc_assoc023), sizeof(enc_nonce023) },
+ { enc_input024, enc_output024, enc_assoc024, enc_nonce024, enc_key024,
+ sizeof(enc_input024), sizeof(enc_assoc024), sizeof(enc_nonce024) },
+ { enc_input025, enc_output025, enc_assoc025, enc_nonce025, enc_key025,
+ sizeof(enc_input025), sizeof(enc_assoc025), sizeof(enc_nonce025) },
+ { enc_input026, enc_output026, enc_assoc026, enc_nonce026, enc_key026,
+ sizeof(enc_input026), sizeof(enc_assoc026), sizeof(enc_nonce026) },
+ { enc_input027, enc_output027, enc_assoc027, enc_nonce027, enc_key027,
+ sizeof(enc_input027), sizeof(enc_assoc027), sizeof(enc_nonce027) },
+ { enc_input028, enc_output028, enc_assoc028, enc_nonce028, enc_key028,
+ sizeof(enc_input028), sizeof(enc_assoc028), sizeof(enc_nonce028) },
+ { enc_input029, enc_output029, enc_assoc029, enc_nonce029, enc_key029,
+ sizeof(enc_input029), sizeof(enc_assoc029), sizeof(enc_nonce029) },
+ { enc_input030, enc_output030, enc_assoc030, enc_nonce030, enc_key030,
+ sizeof(enc_input030), sizeof(enc_assoc030), sizeof(enc_nonce030) },
+ { enc_input031, enc_output031, enc_assoc031, enc_nonce031, enc_key031,
+ sizeof(enc_input031), sizeof(enc_assoc031), sizeof(enc_nonce031) },
+ { enc_input032, enc_output032, enc_assoc032, enc_nonce032, enc_key032,
+ sizeof(enc_input032), sizeof(enc_assoc032), sizeof(enc_nonce032) },
+ { enc_input033, enc_output033, enc_assoc033, enc_nonce033, enc_key033,
+ sizeof(enc_input033), sizeof(enc_assoc033), sizeof(enc_nonce033) },
+ { enc_input034, enc_output034, enc_assoc034, enc_nonce034, enc_key034,
+ sizeof(enc_input034), sizeof(enc_assoc034), sizeof(enc_nonce034) },
+ { enc_input035, enc_output035, enc_assoc035, enc_nonce035, enc_key035,
+ sizeof(enc_input035), sizeof(enc_assoc035), sizeof(enc_nonce035) },
+ { enc_input036, enc_output036, enc_assoc036, enc_nonce036, enc_key036,
+ sizeof(enc_input036), sizeof(enc_assoc036), sizeof(enc_nonce036) },
+ { enc_input037, enc_output037, enc_assoc037, enc_nonce037, enc_key037,
+ sizeof(enc_input037), sizeof(enc_assoc037), sizeof(enc_nonce037) },
+ { enc_input038, enc_output038, enc_assoc038, enc_nonce038, enc_key038,
+ sizeof(enc_input038), sizeof(enc_assoc038), sizeof(enc_nonce038) },
+ { enc_input039, enc_output039, enc_assoc039, enc_nonce039, enc_key039,
+ sizeof(enc_input039), sizeof(enc_assoc039), sizeof(enc_nonce039) },
+ { enc_input040, enc_output040, enc_assoc040, enc_nonce040, enc_key040,
+ sizeof(enc_input040), sizeof(enc_assoc040), sizeof(enc_nonce040) },
+ { enc_input041, enc_output041, enc_assoc041, enc_nonce041, enc_key041,
+ sizeof(enc_input041), sizeof(enc_assoc041), sizeof(enc_nonce041) },
+ { enc_input042, enc_output042, enc_assoc042, enc_nonce042, enc_key042,
+ sizeof(enc_input042), sizeof(enc_assoc042), sizeof(enc_nonce042) },
+ { enc_input043, enc_output043, enc_assoc043, enc_nonce043, enc_key043,
+ sizeof(enc_input043), sizeof(enc_assoc043), sizeof(enc_nonce043) },
+ { enc_input044, enc_output044, enc_assoc044, enc_nonce044, enc_key044,
+ sizeof(enc_input044), sizeof(enc_assoc044), sizeof(enc_nonce044) },
+ { enc_input045, enc_output045, enc_assoc045, enc_nonce045, enc_key045,
+ sizeof(enc_input045), sizeof(enc_assoc045), sizeof(enc_nonce045) },
+ { enc_input046, enc_output046, enc_assoc046, enc_nonce046, enc_key046,
+ sizeof(enc_input046), sizeof(enc_assoc046), sizeof(enc_nonce046) },
+ { enc_input047, enc_output047, enc_assoc047, enc_nonce047, enc_key047,
+ sizeof(enc_input047), sizeof(enc_assoc047), sizeof(enc_nonce047) },
+ { enc_input048, enc_output048, enc_assoc048, enc_nonce048, enc_key048,
+ sizeof(enc_input048), sizeof(enc_assoc048), sizeof(enc_nonce048) },
+ { enc_input049, enc_output049, enc_assoc049, enc_nonce049, enc_key049,
+ sizeof(enc_input049), sizeof(enc_assoc049), sizeof(enc_nonce049) },
+ { enc_input050, enc_output050, enc_assoc050, enc_nonce050, enc_key050,
+ sizeof(enc_input050), sizeof(enc_assoc050), sizeof(enc_nonce050) },
+ { enc_input051, enc_output051, enc_assoc051, enc_nonce051, enc_key051,
+ sizeof(enc_input051), sizeof(enc_assoc051), sizeof(enc_nonce051) },
+ { enc_input052, enc_output052, enc_assoc052, enc_nonce052, enc_key052,
+ sizeof(enc_input052), sizeof(enc_assoc052), sizeof(enc_nonce052) },
+ { enc_input053, enc_output053, enc_assoc053, enc_nonce053, enc_key053,
+ sizeof(enc_input053), sizeof(enc_assoc053), sizeof(enc_nonce053) },
+ { enc_input054, enc_output054, enc_assoc054, enc_nonce054, enc_key054,
+ sizeof(enc_input054), sizeof(enc_assoc054), sizeof(enc_nonce054) },
+ { enc_input055, enc_output055, enc_assoc055, enc_nonce055, enc_key055,
+ sizeof(enc_input055), sizeof(enc_assoc055), sizeof(enc_nonce055) },
+ { enc_input056, enc_output056, enc_assoc056, enc_nonce056, enc_key056,
+ sizeof(enc_input056), sizeof(enc_assoc056), sizeof(enc_nonce056) },
+ { enc_input057, enc_output057, enc_assoc057, enc_nonce057, enc_key057,
+ sizeof(enc_input057), sizeof(enc_assoc057), sizeof(enc_nonce057) },
+ { enc_input058, enc_output058, enc_assoc058, enc_nonce058, enc_key058,
+ sizeof(enc_input058), sizeof(enc_assoc058), sizeof(enc_nonce058) },
+ { enc_input059, enc_output059, enc_assoc059, enc_nonce059, enc_key059,
+ sizeof(enc_input059), sizeof(enc_assoc059), sizeof(enc_nonce059) },
+ { enc_input060, enc_output060, enc_assoc060, enc_nonce060, enc_key060,
+ sizeof(enc_input060), sizeof(enc_assoc060), sizeof(enc_nonce060) },
+ { enc_input061, enc_output061, enc_assoc061, enc_nonce061, enc_key061,
+ sizeof(enc_input061), sizeof(enc_assoc061), sizeof(enc_nonce061) },
+ { enc_input062, enc_output062, enc_assoc062, enc_nonce062, enc_key062,
+ sizeof(enc_input062), sizeof(enc_assoc062), sizeof(enc_nonce062) },
+ { enc_input063, enc_output063, enc_assoc063, enc_nonce063, enc_key063,
+ sizeof(enc_input063), sizeof(enc_assoc063), sizeof(enc_nonce063) },
+ { enc_input064, enc_output064, enc_assoc064, enc_nonce064, enc_key064,
+ sizeof(enc_input064), sizeof(enc_assoc064), sizeof(enc_nonce064) },
+ { enc_input065, enc_output065, enc_assoc065, enc_nonce065, enc_key065,
+ sizeof(enc_input065), sizeof(enc_assoc065), sizeof(enc_nonce065) },
+ { enc_input066, enc_output066, enc_assoc066, enc_nonce066, enc_key066,
+ sizeof(enc_input066), sizeof(enc_assoc066), sizeof(enc_nonce066) },
+ { enc_input067, enc_output067, enc_assoc067, enc_nonce067, enc_key067,
+ sizeof(enc_input067), sizeof(enc_assoc067), sizeof(enc_nonce067) },
+ { enc_input068, enc_output068, enc_assoc068, enc_nonce068, enc_key068,
+ sizeof(enc_input068), sizeof(enc_assoc068), sizeof(enc_nonce068) },
+ { enc_input069, enc_output069, enc_assoc069, enc_nonce069, enc_key069,
+ sizeof(enc_input069), sizeof(enc_assoc069), sizeof(enc_nonce069) },
+ { enc_input070, enc_output070, enc_assoc070, enc_nonce070, enc_key070,
+ sizeof(enc_input070), sizeof(enc_assoc070), sizeof(enc_nonce070) },
+ { enc_input071, enc_output071, enc_assoc071, enc_nonce071, enc_key071,
+ sizeof(enc_input071), sizeof(enc_assoc071), sizeof(enc_nonce071) },
+ { enc_input072, enc_output072, enc_assoc072, enc_nonce072, enc_key072,
+ sizeof(enc_input072), sizeof(enc_assoc072), sizeof(enc_nonce072) },
+ { enc_input073, enc_output073, enc_assoc073, enc_nonce073, enc_key073,
+ sizeof(enc_input073), sizeof(enc_assoc073), sizeof(enc_nonce073) },
+ { enc_input074, enc_output074, enc_assoc074, enc_nonce074, enc_key074,
+ sizeof(enc_input074), sizeof(enc_assoc074), sizeof(enc_nonce074) },
+ { enc_input075, enc_output075, enc_assoc075, enc_nonce075, enc_key075,
+ sizeof(enc_input075), sizeof(enc_assoc075), sizeof(enc_nonce075) },
+ { enc_input076, enc_output076, enc_assoc076, enc_nonce076, enc_key076,
+ sizeof(enc_input076), sizeof(enc_assoc076), sizeof(enc_nonce076) },
+ { enc_input077, enc_output077, enc_assoc077, enc_nonce077, enc_key077,
+ sizeof(enc_input077), sizeof(enc_assoc077), sizeof(enc_nonce077) },
+ { enc_input078, enc_output078, enc_assoc078, enc_nonce078, enc_key078,
+ sizeof(enc_input078), sizeof(enc_assoc078), sizeof(enc_nonce078) },
+ { enc_input079, enc_output079, enc_assoc079, enc_nonce079, enc_key079,
+ sizeof(enc_input079), sizeof(enc_assoc079), sizeof(enc_nonce079) },
+ { enc_input080, enc_output080, enc_assoc080, enc_nonce080, enc_key080,
+ sizeof(enc_input080), sizeof(enc_assoc080), sizeof(enc_nonce080) },
+ { enc_input081, enc_output081, enc_assoc081, enc_nonce081, enc_key081,
+ sizeof(enc_input081), sizeof(enc_assoc081), sizeof(enc_nonce081) },
+ { enc_input082, enc_output082, enc_assoc082, enc_nonce082, enc_key082,
+ sizeof(enc_input082), sizeof(enc_assoc082), sizeof(enc_nonce082) },
+ { enc_input083, enc_output083, enc_assoc083, enc_nonce083, enc_key083,
+ sizeof(enc_input083), sizeof(enc_assoc083), sizeof(enc_nonce083) },
+ { enc_input084, enc_output084, enc_assoc084, enc_nonce084, enc_key084,
+ sizeof(enc_input084), sizeof(enc_assoc084), sizeof(enc_nonce084) },
+ { enc_input085, enc_output085, enc_assoc085, enc_nonce085, enc_key085,
+ sizeof(enc_input085), sizeof(enc_assoc085), sizeof(enc_nonce085) },
+ { enc_input086, enc_output086, enc_assoc086, enc_nonce086, enc_key086,
+ sizeof(enc_input086), sizeof(enc_assoc086), sizeof(enc_nonce086) },
+ { enc_input087, enc_output087, enc_assoc087, enc_nonce087, enc_key087,
+ sizeof(enc_input087), sizeof(enc_assoc087), sizeof(enc_nonce087) },
+ { enc_input088, enc_output088, enc_assoc088, enc_nonce088, enc_key088,
+ sizeof(enc_input088), sizeof(enc_assoc088), sizeof(enc_nonce088) },
+ { enc_input089, enc_output089, enc_assoc089, enc_nonce089, enc_key089,
+ sizeof(enc_input089), sizeof(enc_assoc089), sizeof(enc_nonce089) },
+ { enc_input090, enc_output090, enc_assoc090, enc_nonce090, enc_key090,
+ sizeof(enc_input090), sizeof(enc_assoc090), sizeof(enc_nonce090) },
+ { enc_input091, enc_output091, enc_assoc091, enc_nonce091, enc_key091,
+ sizeof(enc_input091), sizeof(enc_assoc091), sizeof(enc_nonce091) },
+ { enc_input092, enc_output092, enc_assoc092, enc_nonce092, enc_key092,
+ sizeof(enc_input092), sizeof(enc_assoc092), sizeof(enc_nonce092) },
+ { enc_input093, enc_output093, enc_assoc093, enc_nonce093, enc_key093,
+ sizeof(enc_input093), sizeof(enc_assoc093), sizeof(enc_nonce093) },
+ { enc_input094, enc_output094, enc_assoc094, enc_nonce094, enc_key094,
+ sizeof(enc_input094), sizeof(enc_assoc094), sizeof(enc_nonce094) },
+ { enc_input095, enc_output095, enc_assoc095, enc_nonce095, enc_key095,
+ sizeof(enc_input095), sizeof(enc_assoc095), sizeof(enc_nonce095) },
+ { enc_input096, enc_output096, enc_assoc096, enc_nonce096, enc_key096,
+ sizeof(enc_input096), sizeof(enc_assoc096), sizeof(enc_nonce096) },
+ { enc_input097, enc_output097, enc_assoc097, enc_nonce097, enc_key097,
+ sizeof(enc_input097), sizeof(enc_assoc097), sizeof(enc_nonce097) },
+ { enc_input098, enc_output098, enc_assoc098, enc_nonce098, enc_key098,
+ sizeof(enc_input098), sizeof(enc_assoc098), sizeof(enc_nonce098) },
+ { enc_input099, enc_output099, enc_assoc099, enc_nonce099, enc_key099,
+ sizeof(enc_input099), sizeof(enc_assoc099), sizeof(enc_nonce099) },
+ { enc_input100, enc_output100, enc_assoc100, enc_nonce100, enc_key100,
+ sizeof(enc_input100), sizeof(enc_assoc100), sizeof(enc_nonce100) },
+ { enc_input101, enc_output101, enc_assoc101, enc_nonce101, enc_key101,
+ sizeof(enc_input101), sizeof(enc_assoc101), sizeof(enc_nonce101) },
+ { enc_input102, enc_output102, enc_assoc102, enc_nonce102, enc_key102,
+ sizeof(enc_input102), sizeof(enc_assoc102), sizeof(enc_nonce102) },
+ { enc_input103, enc_output103, enc_assoc103, enc_nonce103, enc_key103,
+ sizeof(enc_input103), sizeof(enc_assoc103), sizeof(enc_nonce103) },
+ { enc_input104, enc_output104, enc_assoc104, enc_nonce104, enc_key104,
+ sizeof(enc_input104), sizeof(enc_assoc104), sizeof(enc_nonce104) },
+ { enc_input105, enc_output105, enc_assoc105, enc_nonce105, enc_key105,
+ sizeof(enc_input105), sizeof(enc_assoc105), sizeof(enc_nonce105) },
+ { enc_input106, enc_output106, enc_assoc106, enc_nonce106, enc_key106,
+ sizeof(enc_input106), sizeof(enc_assoc106), sizeof(enc_nonce106) },
+ { enc_input107, enc_output107, enc_assoc107, enc_nonce107, enc_key107,
+ sizeof(enc_input107), sizeof(enc_assoc107), sizeof(enc_nonce107) },
+ { enc_input108, enc_output108, enc_assoc108, enc_nonce108, enc_key108,
+ sizeof(enc_input108), sizeof(enc_assoc108), sizeof(enc_nonce108) },
+ { enc_input109, enc_output109, enc_assoc109, enc_nonce109, enc_key109,
+ sizeof(enc_input109), sizeof(enc_assoc109), sizeof(enc_nonce109) },
+ { enc_input110, enc_output110, enc_assoc110, enc_nonce110, enc_key110,
+ sizeof(enc_input110), sizeof(enc_assoc110), sizeof(enc_nonce110) },
+ { enc_input111, enc_output111, enc_assoc111, enc_nonce111, enc_key111,
+ sizeof(enc_input111), sizeof(enc_assoc111), sizeof(enc_nonce111) },
+ { enc_input112, enc_output112, enc_assoc112, enc_nonce112, enc_key112,
+ sizeof(enc_input112), sizeof(enc_assoc112), sizeof(enc_nonce112) },
+ { enc_input113, enc_output113, enc_assoc113, enc_nonce113, enc_key113,
+ sizeof(enc_input113), sizeof(enc_assoc113), sizeof(enc_nonce113) },
+ { enc_input114, enc_output114, enc_assoc114, enc_nonce114, enc_key114,
+ sizeof(enc_input114), sizeof(enc_assoc114), sizeof(enc_nonce114) },
+ { enc_input115, enc_output115, enc_assoc115, enc_nonce115, enc_key115,
+ sizeof(enc_input115), sizeof(enc_assoc115), sizeof(enc_nonce115) },
+ { enc_input116, enc_output116, enc_assoc116, enc_nonce116, enc_key116,
+ sizeof(enc_input116), sizeof(enc_assoc116), sizeof(enc_nonce116) },
+ { enc_input117, enc_output117, enc_assoc117, enc_nonce117, enc_key117,
+ sizeof(enc_input117), sizeof(enc_assoc117), sizeof(enc_nonce117) },
+ { enc_input118, enc_output118, enc_assoc118, enc_nonce118, enc_key118,
+ sizeof(enc_input118), sizeof(enc_assoc118), sizeof(enc_nonce118) }
+};
+
+static const u8 dec_input001[] __initconst = {
+ 0x64, 0xa0, 0x86, 0x15, 0x75, 0x86, 0x1a, 0xf4,
+ 0x60, 0xf0, 0x62, 0xc7, 0x9b, 0xe6, 0x43, 0xbd,
+ 0x5e, 0x80, 0x5c, 0xfd, 0x34, 0x5c, 0xf3, 0x89,
+ 0xf1, 0x08, 0x67, 0x0a, 0xc7, 0x6c, 0x8c, 0xb2,
+ 0x4c, 0x6c, 0xfc, 0x18, 0x75, 0x5d, 0x43, 0xee,
+ 0xa0, 0x9e, 0xe9, 0x4e, 0x38, 0x2d, 0x26, 0xb0,
+ 0xbd, 0xb7, 0xb7, 0x3c, 0x32, 0x1b, 0x01, 0x00,
+ 0xd4, 0xf0, 0x3b, 0x7f, 0x35, 0x58, 0x94, 0xcf,
+ 0x33, 0x2f, 0x83, 0x0e, 0x71, 0x0b, 0x97, 0xce,
+ 0x98, 0xc8, 0xa8, 0x4a, 0xbd, 0x0b, 0x94, 0x81,
+ 0x14, 0xad, 0x17, 0x6e, 0x00, 0x8d, 0x33, 0xbd,
+ 0x60, 0xf9, 0x82, 0xb1, 0xff, 0x37, 0xc8, 0x55,
+ 0x97, 0x97, 0xa0, 0x6e, 0xf4, 0xf0, 0xef, 0x61,
+ 0xc1, 0x86, 0x32, 0x4e, 0x2b, 0x35, 0x06, 0x38,
+ 0x36, 0x06, 0x90, 0x7b, 0x6a, 0x7c, 0x02, 0xb0,
+ 0xf9, 0xf6, 0x15, 0x7b, 0x53, 0xc8, 0x67, 0xe4,
+ 0xb9, 0x16, 0x6c, 0x76, 0x7b, 0x80, 0x4d, 0x46,
+ 0xa5, 0x9b, 0x52, 0x16, 0xcd, 0xe7, 0xa4, 0xe9,
+ 0x90, 0x40, 0xc5, 0xa4, 0x04, 0x33, 0x22, 0x5e,
+ 0xe2, 0x82, 0xa1, 0xb0, 0xa0, 0x6c, 0x52, 0x3e,
+ 0xaf, 0x45, 0x34, 0xd7, 0xf8, 0x3f, 0xa1, 0x15,
+ 0x5b, 0x00, 0x47, 0x71, 0x8c, 0xbc, 0x54, 0x6a,
+ 0x0d, 0x07, 0x2b, 0x04, 0xb3, 0x56, 0x4e, 0xea,
+ 0x1b, 0x42, 0x22, 0x73, 0xf5, 0x48, 0x27, 0x1a,
+ 0x0b, 0xb2, 0x31, 0x60, 0x53, 0xfa, 0x76, 0x99,
+ 0x19, 0x55, 0xeb, 0xd6, 0x31, 0x59, 0x43, 0x4e,
+ 0xce, 0xbb, 0x4e, 0x46, 0x6d, 0xae, 0x5a, 0x10,
+ 0x73, 0xa6, 0x72, 0x76, 0x27, 0x09, 0x7a, 0x10,
+ 0x49, 0xe6, 0x17, 0xd9, 0x1d, 0x36, 0x10, 0x94,
+ 0xfa, 0x68, 0xf0, 0xff, 0x77, 0x98, 0x71, 0x30,
+ 0x30, 0x5b, 0xea, 0xba, 0x2e, 0xda, 0x04, 0xdf,
+ 0x99, 0x7b, 0x71, 0x4d, 0x6c, 0x6f, 0x2c, 0x29,
+ 0xa6, 0xad, 0x5c, 0xb4, 0x02, 0x2b, 0x02, 0x70,
+ 0x9b, 0xee, 0xad, 0x9d, 0x67, 0x89, 0x0c, 0xbb,
+ 0x22, 0x39, 0x23, 0x36, 0xfe, 0xa1, 0x85, 0x1f,
+ 0x38
+};
+static const u8 dec_output001[] __initconst = {
+ 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x65, 0x74,
+ 0x2d, 0x44, 0x72, 0x61, 0x66, 0x74, 0x73, 0x20,
+ 0x61, 0x72, 0x65, 0x20, 0x64, 0x72, 0x61, 0x66,
+ 0x74, 0x20, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65,
+ 0x6e, 0x74, 0x73, 0x20, 0x76, 0x61, 0x6c, 0x69,
+ 0x64, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x61, 0x20,
+ 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x20,
+ 0x6f, 0x66, 0x20, 0x73, 0x69, 0x78, 0x20, 0x6d,
+ 0x6f, 0x6e, 0x74, 0x68, 0x73, 0x20, 0x61, 0x6e,
+ 0x64, 0x20, 0x6d, 0x61, 0x79, 0x20, 0x62, 0x65,
+ 0x20, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64,
+ 0x2c, 0x20, 0x72, 0x65, 0x70, 0x6c, 0x61, 0x63,
+ 0x65, 0x64, 0x2c, 0x20, 0x6f, 0x72, 0x20, 0x6f,
+ 0x62, 0x73, 0x6f, 0x6c, 0x65, 0x74, 0x65, 0x64,
+ 0x20, 0x62, 0x79, 0x20, 0x6f, 0x74, 0x68, 0x65,
+ 0x72, 0x20, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65,
+ 0x6e, 0x74, 0x73, 0x20, 0x61, 0x74, 0x20, 0x61,
+ 0x6e, 0x79, 0x20, 0x74, 0x69, 0x6d, 0x65, 0x2e,
+ 0x20, 0x49, 0x74, 0x20, 0x69, 0x73, 0x20, 0x69,
+ 0x6e, 0x61, 0x70, 0x70, 0x72, 0x6f, 0x70, 0x72,
+ 0x69, 0x61, 0x74, 0x65, 0x20, 0x74, 0x6f, 0x20,
+ 0x75, 0x73, 0x65, 0x20, 0x49, 0x6e, 0x74, 0x65,
+ 0x72, 0x6e, 0x65, 0x74, 0x2d, 0x44, 0x72, 0x61,
+ 0x66, 0x74, 0x73, 0x20, 0x61, 0x73, 0x20, 0x72,
+ 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65,
+ 0x20, 0x6d, 0x61, 0x74, 0x65, 0x72, 0x69, 0x61,
+ 0x6c, 0x20, 0x6f, 0x72, 0x20, 0x74, 0x6f, 0x20,
+ 0x63, 0x69, 0x74, 0x65, 0x20, 0x74, 0x68, 0x65,
+ 0x6d, 0x20, 0x6f, 0x74, 0x68, 0x65, 0x72, 0x20,
+ 0x74, 0x68, 0x61, 0x6e, 0x20, 0x61, 0x73, 0x20,
+ 0x2f, 0xe2, 0x80, 0x9c, 0x77, 0x6f, 0x72, 0x6b,
+ 0x20, 0x69, 0x6e, 0x20, 0x70, 0x72, 0x6f, 0x67,
+ 0x72, 0x65, 0x73, 0x73, 0x2e, 0x2f, 0xe2, 0x80,
+ 0x9d
+};
+static const u8 dec_assoc001[] __initconst = {
+ 0xf3, 0x33, 0x88, 0x86, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x4e, 0x91
+};
+static const u8 dec_nonce001[] __initconst = {
+ 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08
+};
+static const u8 dec_key001[] __initconst = {
+ 0x1c, 0x92, 0x40, 0xa5, 0xeb, 0x55, 0xd3, 0x8a,
+ 0xf3, 0x33, 0x88, 0x86, 0x04, 0xf6, 0xb5, 0xf0,
+ 0x47, 0x39, 0x17, 0xc1, 0x40, 0x2b, 0x80, 0x09,
+ 0x9d, 0xca, 0x5c, 0xbc, 0x20, 0x70, 0x75, 0xc0
+};
+
+static const u8 dec_input002[] __initconst = {
+ 0xea, 0xe0, 0x1e, 0x9e, 0x2c, 0x91, 0xaa, 0xe1,
+ 0xdb, 0x5d, 0x99, 0x3f, 0x8a, 0xf7, 0x69, 0x92
+};
+static const u8 dec_output002[] __initconst = { };
+static const u8 dec_assoc002[] __initconst = { };
+static const u8 dec_nonce002[] __initconst = {
+ 0xca, 0xbf, 0x33, 0x71, 0x32, 0x45, 0x77, 0x8e
+};
+static const u8 dec_key002[] __initconst = {
+ 0x4c, 0xf5, 0x96, 0x83, 0x38, 0xe6, 0xae, 0x7f,
+ 0x2d, 0x29, 0x25, 0x76, 0xd5, 0x75, 0x27, 0x86,
+ 0x91, 0x9a, 0x27, 0x7a, 0xfb, 0x46, 0xc5, 0xef,
+ 0x94, 0x81, 0x79, 0x57, 0x14, 0x59, 0x40, 0x68
+};
+
+static const u8 dec_input003[] __initconst = {
+ 0xdd, 0x6b, 0x3b, 0x82, 0xce, 0x5a, 0xbd, 0xd6,
+ 0xa9, 0x35, 0x83, 0xd8, 0x8c, 0x3d, 0x85, 0x77
+};
+static const u8 dec_output003[] __initconst = { };
+static const u8 dec_assoc003[] __initconst = {
+ 0x33, 0x10, 0x41, 0x12, 0x1f, 0xf3, 0xd2, 0x6b
+};
+static const u8 dec_nonce003[] __initconst = {
+ 0x3d, 0x86, 0xb5, 0x6b, 0xc8, 0xa3, 0x1f, 0x1d
+};
+static const u8 dec_key003[] __initconst = {
+ 0x2d, 0xb0, 0x5d, 0x40, 0xc8, 0xed, 0x44, 0x88,
+ 0x34, 0xd1, 0x13, 0xaf, 0x57, 0xa1, 0xeb, 0x3a,
+ 0x2a, 0x80, 0x51, 0x36, 0xec, 0x5b, 0xbc, 0x08,
+ 0x93, 0x84, 0x21, 0xb5, 0x13, 0x88, 0x3c, 0x0d
+};
+
+static const u8 dec_input004[] __initconst = {
+ 0xb7, 0x1b, 0xb0, 0x73, 0x59, 0xb0, 0x84, 0xb2,
+ 0x6d, 0x8e, 0xab, 0x94, 0x31, 0xa1, 0xae, 0xac,
+ 0x89
+};
+static const u8 dec_output004[] __initconst = {
+ 0xa4
+};
+static const u8 dec_assoc004[] __initconst = {
+ 0x6a, 0xe2, 0xad, 0x3f, 0x88, 0x39, 0x5a, 0x40
+};
+static const u8 dec_nonce004[] __initconst = {
+ 0xd2, 0x32, 0x1f, 0x29, 0x28, 0xc6, 0xc4, 0xc4
+};
+static const u8 dec_key004[] __initconst = {
+ 0x4b, 0x28, 0x4b, 0xa3, 0x7b, 0xbe, 0xe9, 0xf8,
+ 0x31, 0x80, 0x82, 0xd7, 0xd8, 0xe8, 0xb5, 0xa1,
+ 0xe2, 0x18, 0x18, 0x8a, 0x9c, 0xfa, 0xa3, 0x3d,
+ 0x25, 0x71, 0x3e, 0x40, 0xbc, 0x54, 0x7a, 0x3e
+};
+
+static const u8 dec_input005[] __initconst = {
+ 0xbf, 0xe1, 0x5b, 0x0b, 0xdb, 0x6b, 0xf5, 0x5e,
+ 0x6c, 0x5d, 0x84, 0x44, 0x39, 0x81, 0xc1, 0x9c,
+ 0xac
+};
+static const u8 dec_output005[] __initconst = {
+ 0x2d
+};
+static const u8 dec_assoc005[] __initconst = { };
+static const u8 dec_nonce005[] __initconst = {
+ 0x20, 0x1c, 0xaa, 0x5f, 0x9c, 0xbf, 0x92, 0x30
+};
+static const u8 dec_key005[] __initconst = {
+ 0x66, 0xca, 0x9c, 0x23, 0x2a, 0x4b, 0x4b, 0x31,
+ 0x0e, 0x92, 0x89, 0x8b, 0xf4, 0x93, 0xc7, 0x87,
+ 0x98, 0xa3, 0xd8, 0x39, 0xf8, 0xf4, 0xa7, 0x01,
+ 0xc0, 0x2e, 0x0a, 0xa6, 0x7e, 0x5a, 0x78, 0x87
+};
+
+static const u8 dec_input006[] __initconst = {
+ 0x8b, 0x06, 0xd3, 0x31, 0xb0, 0x93, 0x45, 0xb1,
+ 0x75, 0x6e, 0x26, 0xf9, 0x67, 0xbc, 0x90, 0x15,
+ 0x81, 0x2c, 0xb5, 0xf0, 0xc6, 0x2b, 0xc7, 0x8c,
+ 0x56, 0xd1, 0xbf, 0x69, 0x6c, 0x07, 0xa0, 0xda,
+ 0x65, 0x27, 0xc9, 0x90, 0x3d, 0xef, 0x4b, 0x11,
+ 0x0f, 0x19, 0x07, 0xfd, 0x29, 0x92, 0xd9, 0xc8,
+ 0xf7, 0x99, 0x2e, 0x4a, 0xd0, 0xb8, 0x2c, 0xdc,
+ 0x93, 0xf5, 0x9e, 0x33, 0x78, 0xd1, 0x37, 0xc3,
+ 0x66, 0xd7, 0x5e, 0xbc, 0x44, 0xbf, 0x53, 0xa5,
+ 0xbc, 0xc4, 0xcb, 0x7b, 0x3a, 0x8e, 0x7f, 0x02,
+ 0xbd, 0xbb, 0xe7, 0xca, 0xa6, 0x6c, 0x6b, 0x93,
+ 0x21, 0x93, 0x10, 0x61, 0xe7, 0x69, 0xd0, 0x78,
+ 0xf3, 0x07, 0x5a, 0x1a, 0x8f, 0x73, 0xaa, 0xb1,
+ 0x4e, 0xd3, 0xda, 0x4f, 0xf3, 0x32, 0xe1, 0x66,
+ 0x3e, 0x6c, 0xc6, 0x13, 0xba, 0x06, 0x5b, 0xfc,
+ 0x6a, 0xe5, 0x6f, 0x60, 0xfb, 0x07, 0x40, 0xb0,
+ 0x8c, 0x9d, 0x84, 0x43, 0x6b, 0xc1, 0xf7, 0x8d,
+ 0x8d, 0x31, 0xf7, 0x7a, 0x39, 0x4d, 0x8f, 0x9a,
+ 0xeb
+};
+static const u8 dec_output006[] __initconst = {
+ 0x33, 0x2f, 0x94, 0xc1, 0xa4, 0xef, 0xcc, 0x2a,
+ 0x5b, 0xa6, 0xe5, 0x8f, 0x1d, 0x40, 0xf0, 0x92,
+ 0x3c, 0xd9, 0x24, 0x11, 0xa9, 0x71, 0xf9, 0x37,
+ 0x14, 0x99, 0xfa, 0xbe, 0xe6, 0x80, 0xde, 0x50,
+ 0xc9, 0x96, 0xd4, 0xb0, 0xec, 0x9e, 0x17, 0xec,
+ 0xd2, 0x5e, 0x72, 0x99, 0xfc, 0x0a, 0xe1, 0xcb,
+ 0x48, 0xd2, 0x85, 0xdd, 0x2f, 0x90, 0xe0, 0x66,
+ 0x3b, 0xe6, 0x20, 0x74, 0xbe, 0x23, 0x8f, 0xcb,
+ 0xb4, 0xe4, 0xda, 0x48, 0x40, 0xa6, 0xd1, 0x1b,
+ 0xc7, 0x42, 0xce, 0x2f, 0x0c, 0xa6, 0x85, 0x6e,
+ 0x87, 0x37, 0x03, 0xb1, 0x7c, 0x25, 0x96, 0xa3,
+ 0x05, 0xd8, 0xb0, 0xf4, 0xed, 0xea, 0xc2, 0xf0,
+ 0x31, 0x98, 0x6c, 0xd1, 0x14, 0x25, 0xc0, 0xcb,
+ 0x01, 0x74, 0xd0, 0x82, 0xf4, 0x36, 0xf5, 0x41,
+ 0xd5, 0xdc, 0xca, 0xc5, 0xbb, 0x98, 0xfe, 0xfc,
+ 0x69, 0x21, 0x70, 0xd8, 0xa4, 0x4b, 0xc8, 0xde,
+ 0x8f
+};
+static const u8 dec_assoc006[] __initconst = {
+ 0x70, 0xd3, 0x33, 0xf3, 0x8b, 0x18, 0x0b
+};
+static const u8 dec_nonce006[] __initconst = {
+ 0xdf, 0x51, 0x84, 0x82, 0x42, 0x0c, 0x75, 0x9c
+};
+static const u8 dec_key006[] __initconst = {
+ 0x68, 0x7b, 0x8d, 0x8e, 0xe3, 0xc4, 0xdd, 0xae,
+ 0xdf, 0x72, 0x7f, 0x53, 0x72, 0x25, 0x1e, 0x78,
+ 0x91, 0xcb, 0x69, 0x76, 0x1f, 0x49, 0x93, 0xf9,
+ 0x6f, 0x21, 0xcc, 0x39, 0x9c, 0xad, 0xb1, 0x01
+};
+
+static const u8 dec_input007[] __initconst = {
+ 0x85, 0x04, 0xc2, 0xed, 0x8d, 0xfd, 0x97, 0x5c,
+ 0xd2, 0xb7, 0xe2, 0xc1, 0x6b, 0xa3, 0xba, 0xf8,
+ 0xc9, 0x50, 0xc3, 0xc6, 0xa5, 0xe3, 0xa4, 0x7c,
+ 0xc3, 0x23, 0x49, 0x5e, 0xa9, 0xb9, 0x32, 0xeb,
+ 0x8a, 0x7c, 0xca, 0xe5, 0xec, 0xfb, 0x7c, 0xc0,
+ 0xcb, 0x7d, 0xdc, 0x2c, 0x9d, 0x92, 0x55, 0x21,
+ 0x0a, 0xc8, 0x43, 0x63, 0x59, 0x0a, 0x31, 0x70,
+ 0x82, 0x67, 0x41, 0x03, 0xf8, 0xdf, 0xf2, 0xac,
+ 0xa7, 0x02, 0xd4, 0xd5, 0x8a, 0x2d, 0xc8, 0x99,
+ 0x19, 0x66, 0xd0, 0xf6, 0x88, 0x2c, 0x77, 0xd9,
+ 0xd4, 0x0d, 0x6c, 0xbd, 0x98, 0xde, 0xe7, 0x7f,
+ 0xad, 0x7e, 0x8a, 0xfb, 0xe9, 0x4b, 0xe5, 0xf7,
+ 0xe5, 0x50, 0xa0, 0x90, 0x3f, 0xd6, 0x22, 0x53,
+ 0xe3, 0xfe, 0x1b, 0xcc, 0x79, 0x3b, 0xec, 0x12,
+ 0x47, 0x52, 0xa7, 0xd6, 0x04, 0xe3, 0x52, 0xe6,
+ 0x93, 0x90, 0x91, 0x32, 0x73, 0x79, 0xb8, 0xd0,
+ 0x31, 0xde, 0x1f, 0x9f, 0x2f, 0x05, 0x38, 0x54,
+ 0x2f, 0x35, 0x04, 0x39, 0xe0, 0xa7, 0xba, 0xc6,
+ 0x52, 0xf6, 0x37, 0x65, 0x4c, 0x07, 0xa9, 0x7e,
+ 0xb3, 0x21, 0x6f, 0x74, 0x8c, 0xc9, 0xde, 0xdb,
+ 0x65, 0x1b, 0x9b, 0xaa, 0x60, 0xb1, 0x03, 0x30,
+ 0x6b, 0xb2, 0x03, 0xc4, 0x1c, 0x04, 0xf8, 0x0f,
+ 0x64, 0xaf, 0x46, 0xe4, 0x65, 0x99, 0x49, 0xe2,
+ 0xea, 0xce, 0x78, 0x00, 0xd8, 0x8b, 0xd5, 0x2e,
+ 0xcf, 0xfc, 0x40, 0x49, 0xe8, 0x58, 0xdc, 0x34,
+ 0x9c, 0x8c, 0x61, 0xbf, 0x0a, 0x8e, 0xec, 0x39,
+ 0xa9, 0x30, 0x05, 0x5a, 0xd2, 0x56, 0x01, 0xc7,
+ 0xda, 0x8f, 0x4e, 0xbb, 0x43, 0xa3, 0x3a, 0xf9,
+ 0x15, 0x2a, 0xd0, 0xa0, 0x7a, 0x87, 0x34, 0x82,
+ 0xfe, 0x8a, 0xd1, 0x2d, 0x5e, 0xc7, 0xbf, 0x04,
+ 0x53, 0x5f, 0x3b, 0x36, 0xd4, 0x25, 0x5c, 0x34,
+ 0x7a, 0x8d, 0xd5, 0x05, 0xce, 0x72, 0xca, 0xef,
+ 0x7a, 0x4b, 0xbc, 0xb0, 0x10, 0x5c, 0x96, 0x42,
+ 0x3a, 0x00, 0x98, 0xcd, 0x15, 0xe8, 0xb7, 0x53
+};
+static const u8 dec_output007[] __initconst = {
+ 0x9b, 0x18, 0xdb, 0xdd, 0x9a, 0x0f, 0x3e, 0xa5,
+ 0x15, 0x17, 0xde, 0xdf, 0x08, 0x9d, 0x65, 0x0a,
+ 0x67, 0x30, 0x12, 0xe2, 0x34, 0x77, 0x4b, 0xc1,
+ 0xd9, 0xc6, 0x1f, 0xab, 0xc6, 0x18, 0x50, 0x17,
+ 0xa7, 0x9d, 0x3c, 0xa6, 0xc5, 0x35, 0x8c, 0x1c,
+ 0xc0, 0xa1, 0x7c, 0x9f, 0x03, 0x89, 0xca, 0xe1,
+ 0xe6, 0xe9, 0xd4, 0xd3, 0x88, 0xdb, 0xb4, 0x51,
+ 0x9d, 0xec, 0xb4, 0xfc, 0x52, 0xee, 0x6d, 0xf1,
+ 0x75, 0x42, 0xc6, 0xfd, 0xbd, 0x7a, 0x8e, 0x86,
+ 0xfc, 0x44, 0xb3, 0x4f, 0xf3, 0xea, 0x67, 0x5a,
+ 0x41, 0x13, 0xba, 0xb0, 0xdc, 0xe1, 0xd3, 0x2a,
+ 0x7c, 0x22, 0xb3, 0xca, 0xac, 0x6a, 0x37, 0x98,
+ 0x3e, 0x1d, 0x40, 0x97, 0xf7, 0x9b, 0x1d, 0x36,
+ 0x6b, 0xb3, 0x28, 0xbd, 0x60, 0x82, 0x47, 0x34,
+ 0xaa, 0x2f, 0x7d, 0xe9, 0xa8, 0x70, 0x81, 0x57,
+ 0xd4, 0xb9, 0x77, 0x0a, 0x9d, 0x29, 0xa7, 0x84,
+ 0x52, 0x4f, 0xc2, 0x4a, 0x40, 0x3b, 0x3c, 0xd4,
+ 0xc9, 0x2a, 0xdb, 0x4a, 0x53, 0xc4, 0xbe, 0x80,
+ 0xe9, 0x51, 0x7f, 0x8f, 0xc7, 0xa2, 0xce, 0x82,
+ 0x5c, 0x91, 0x1e, 0x74, 0xd9, 0xd0, 0xbd, 0xd5,
+ 0xf3, 0xfd, 0xda, 0x4d, 0x25, 0xb4, 0xbb, 0x2d,
+ 0xac, 0x2f, 0x3d, 0x71, 0x85, 0x7b, 0xcf, 0x3c,
+ 0x7b, 0x3e, 0x0e, 0x22, 0x78, 0x0c, 0x29, 0xbf,
+ 0xe4, 0xf4, 0x57, 0xb3, 0xcb, 0x49, 0xa0, 0xfc,
+ 0x1e, 0x05, 0x4e, 0x16, 0xbc, 0xd5, 0xa8, 0xa3,
+ 0xee, 0x05, 0x35, 0xc6, 0x7c, 0xab, 0x60, 0x14,
+ 0x55, 0x1a, 0x8e, 0xc5, 0x88, 0x5d, 0xd5, 0x81,
+ 0xc2, 0x81, 0xa5, 0xc4, 0x60, 0xdb, 0xaf, 0x77,
+ 0x91, 0xe1, 0xce, 0xa2, 0x7e, 0x7f, 0x42, 0xe3,
+ 0xb0, 0x13, 0x1c, 0x1f, 0x25, 0x60, 0x21, 0xe2,
+ 0x40, 0x5f, 0x99, 0xb7, 0x73, 0xec, 0x9b, 0x2b,
+ 0xf0, 0x65, 0x11, 0xc8, 0xd0, 0x0a, 0x9f, 0xd3
+};
+static const u8 dec_assoc007[] __initconst = { };
+static const u8 dec_nonce007[] __initconst = {
+ 0xde, 0x7b, 0xef, 0xc3, 0x65, 0x1b, 0x68, 0xb0
+};
+static const u8 dec_key007[] __initconst = {
+ 0x8d, 0xb8, 0x91, 0x48, 0xf0, 0xe7, 0x0a, 0xbd,
+ 0xf9, 0x3f, 0xcd, 0xd9, 0xa0, 0x1e, 0x42, 0x4c,
+ 0xe7, 0xde, 0x25, 0x3d, 0xa3, 0xd7, 0x05, 0x80,
+ 0x8d, 0xf2, 0x82, 0xac, 0x44, 0x16, 0x51, 0x01
+};
+
+static const u8 dec_input008[] __initconst = {
+ 0x14, 0xf6, 0x41, 0x37, 0xa6, 0xd4, 0x27, 0xcd,
+ 0xdb, 0x06, 0x3e, 0x9a, 0x4e, 0xab, 0xd5, 0xb1,
+ 0x1e, 0x6b, 0xd2, 0xbc, 0x11, 0xf4, 0x28, 0x93,
+ 0x63, 0x54, 0xef, 0xbb, 0x5e, 0x1d, 0x3a, 0x1d,
+ 0x37, 0x3c, 0x0a, 0x6c, 0x1e, 0xc2, 0xd1, 0x2c,
+ 0xb5, 0xa3, 0xb5, 0x7b, 0xb8, 0x8f, 0x25, 0xa6,
+ 0x1b, 0x61, 0x1c, 0xec, 0x28, 0x58, 0x26, 0xa4,
+ 0xa8, 0x33, 0x28, 0x25, 0x5c, 0x45, 0x05, 0xe5,
+ 0x6c, 0x99, 0xe5, 0x45, 0xc4, 0xa2, 0x03, 0x84,
+ 0x03, 0x73, 0x1e, 0x8c, 0x49, 0xac, 0x20, 0xdd,
+ 0x8d, 0xb3, 0xc4, 0xf5, 0xe7, 0x4f, 0xf1, 0xed,
+ 0xa1, 0x98, 0xde, 0xa4, 0x96, 0xdd, 0x2f, 0xab,
+ 0xab, 0x97, 0xcf, 0x3e, 0xd2, 0x9e, 0xb8, 0x13,
+ 0x07, 0x28, 0x29, 0x19, 0xaf, 0xfd, 0xf2, 0x49,
+ 0x43, 0xea, 0x49, 0x26, 0x91, 0xc1, 0x07, 0xd6,
+ 0xbb, 0x81, 0x75, 0x35, 0x0d, 0x24, 0x7f, 0xc8,
+ 0xda, 0xd4, 0xb7, 0xeb, 0xe8, 0x5c, 0x09, 0xa2,
+ 0x2f, 0xdc, 0x28, 0x7d, 0x3a, 0x03, 0xfa, 0x94,
+ 0xb5, 0x1d, 0x17, 0x99, 0x36, 0xc3, 0x1c, 0x18,
+ 0x34, 0xe3, 0x9f, 0xf5, 0x55, 0x7c, 0xb0, 0x60,
+ 0x9d, 0xff, 0xac, 0xd4, 0x61, 0xf2, 0xad, 0xf8,
+ 0xce, 0xc7, 0xbe, 0x5c, 0xd2, 0x95, 0xa8, 0x4b,
+ 0x77, 0x13, 0x19, 0x59, 0x26, 0xc9, 0xb7, 0x8f,
+ 0x6a, 0xcb, 0x2d, 0x37, 0x91, 0xea, 0x92, 0x9c,
+ 0x94, 0x5b, 0xda, 0x0b, 0xce, 0xfe, 0x30, 0x20,
+ 0xf8, 0x51, 0xad, 0xf2, 0xbe, 0xe7, 0xc7, 0xff,
+ 0xb3, 0x33, 0x91, 0x6a, 0xc9, 0x1a, 0x41, 0xc9,
+ 0x0f, 0xf3, 0x10, 0x0e, 0xfd, 0x53, 0xff, 0x6c,
+ 0x16, 0x52, 0xd9, 0xf3, 0xf7, 0x98, 0x2e, 0xc9,
+ 0x07, 0x31, 0x2c, 0x0c, 0x72, 0xd7, 0xc5, 0xc6,
+ 0x08, 0x2a, 0x7b, 0xda, 0xbd, 0x7e, 0x02, 0xea,
+ 0x1a, 0xbb, 0xf2, 0x04, 0x27, 0x61, 0x28, 0x8e,
+ 0xf5, 0x04, 0x03, 0x1f, 0x4c, 0x07, 0x55, 0x82,
+ 0xec, 0x1e, 0xd7, 0x8b, 0x2f, 0x65, 0x56, 0xd1,
+ 0xd9, 0x1e, 0x3c, 0xe9, 0x1f, 0x5e, 0x98, 0x70,
+ 0x38, 0x4a, 0x8c, 0x49, 0xc5, 0x43, 0xa0, 0xa1,
+ 0x8b, 0x74, 0x9d, 0x4c, 0x62, 0x0d, 0x10, 0x0c,
+ 0xf4, 0x6c, 0x8f, 0xe0, 0xaa, 0x9a, 0x8d, 0xb7,
+ 0xe0, 0xbe, 0x4c, 0x87, 0xf1, 0x98, 0x2f, 0xcc,
+ 0xed, 0xc0, 0x52, 0x29, 0xdc, 0x83, 0xf8, 0xfc,
+ 0x2c, 0x0e, 0xa8, 0x51, 0x4d, 0x80, 0x0d, 0xa3,
+ 0xfe, 0xd8, 0x37, 0xe7, 0x41, 0x24, 0xfc, 0xfb,
+ 0x75, 0xe3, 0x71, 0x7b, 0x57, 0x45, 0xf5, 0x97,
+ 0x73, 0x65, 0x63, 0x14, 0x74, 0xb8, 0x82, 0x9f,
+ 0xf8, 0x60, 0x2f, 0x8a, 0xf2, 0x4e, 0xf1, 0x39,
+ 0xda, 0x33, 0x91, 0xf8, 0x36, 0xe0, 0x8d, 0x3f,
+ 0x1f, 0x3b, 0x56, 0xdc, 0xa0, 0x8f, 0x3c, 0x9d,
+ 0x71, 0x52, 0xa7, 0xb8, 0xc0, 0xa5, 0xc6, 0xa2,
+ 0x73, 0xda, 0xf4, 0x4b, 0x74, 0x5b, 0x00, 0x3d,
+ 0x99, 0xd7, 0x96, 0xba, 0xe6, 0xe1, 0xa6, 0x96,
+ 0x38, 0xad, 0xb3, 0xc0, 0xd2, 0xba, 0x91, 0x6b,
+ 0xf9, 0x19, 0xdd, 0x3b, 0xbe, 0xbe, 0x9c, 0x20,
+ 0x50, 0xba, 0xa1, 0xd0, 0xce, 0x11, 0xbd, 0x95,
+ 0xd8, 0xd1, 0xdd, 0x33, 0x85, 0x74, 0xdc, 0xdb,
+ 0x66, 0x76, 0x44, 0xdc, 0x03, 0x74, 0x48, 0x35,
+ 0x98, 0xb1, 0x18, 0x47, 0x94, 0x7d, 0xff, 0x62,
+ 0xe4, 0x58, 0x78, 0xab, 0xed, 0x95, 0x36, 0xd9,
+ 0x84, 0x91, 0x82, 0x64, 0x41, 0xbb, 0x58, 0xe6,
+ 0x1c, 0x20, 0x6d, 0x15, 0x6b, 0x13, 0x96, 0xe8,
+ 0x35, 0x7f, 0xdc, 0x40, 0x2c, 0xe9, 0xbc, 0x8a,
+ 0x4f, 0x92, 0xec, 0x06, 0x2d, 0x50, 0xdf, 0x93,
+ 0x5d, 0x65, 0x5a, 0xa8, 0xfc, 0x20, 0x50, 0x14,
+ 0xa9, 0x8a, 0x7e, 0x1d, 0x08, 0x1f, 0xe2, 0x99,
+ 0xd0, 0xbe, 0xfb, 0x3a, 0x21, 0x9d, 0xad, 0x86,
+ 0x54, 0xfd, 0x0d, 0x98, 0x1c, 0x5a, 0x6f, 0x1f,
+ 0x9a, 0x40, 0xcd, 0xa2, 0xff, 0x6a, 0xf1, 0x54
+};
+static const u8 dec_output008[] __initconst = {
+ 0xc3, 0x09, 0x94, 0x62, 0xe6, 0x46, 0x2e, 0x10,
+ 0xbe, 0x00, 0xe4, 0xfc, 0xf3, 0x40, 0xa3, 0xe2,
+ 0x0f, 0xc2, 0x8b, 0x28, 0xdc, 0xba, 0xb4, 0x3c,
+ 0xe4, 0x21, 0x58, 0x61, 0xcd, 0x8b, 0xcd, 0xfb,
+ 0xac, 0x94, 0xa1, 0x45, 0xf5, 0x1c, 0xe1, 0x12,
+ 0xe0, 0x3b, 0x67, 0x21, 0x54, 0x5e, 0x8c, 0xaa,
+ 0xcf, 0xdb, 0xb4, 0x51, 0xd4, 0x13, 0xda, 0xe6,
+ 0x83, 0x89, 0xb6, 0x92, 0xe9, 0x21, 0x76, 0xa4,
+ 0x93, 0x7d, 0x0e, 0xfd, 0x96, 0x36, 0x03, 0x91,
+ 0x43, 0x5c, 0x92, 0x49, 0x62, 0x61, 0x7b, 0xeb,
+ 0x43, 0x89, 0xb8, 0x12, 0x20, 0x43, 0xd4, 0x47,
+ 0x06, 0x84, 0xee, 0x47, 0xe9, 0x8a, 0x73, 0x15,
+ 0x0f, 0x72, 0xcf, 0xed, 0xce, 0x96, 0xb2, 0x7f,
+ 0x21, 0x45, 0x76, 0xeb, 0x26, 0x28, 0x83, 0x6a,
+ 0xad, 0xaa, 0xa6, 0x81, 0xd8, 0x55, 0xb1, 0xa3,
+ 0x85, 0xb3, 0x0c, 0xdf, 0xf1, 0x69, 0x2d, 0x97,
+ 0x05, 0x2a, 0xbc, 0x7c, 0x7b, 0x25, 0xf8, 0x80,
+ 0x9d, 0x39, 0x25, 0xf3, 0x62, 0xf0, 0x66, 0x5e,
+ 0xf4, 0xa0, 0xcf, 0xd8, 0xfd, 0x4f, 0xb1, 0x1f,
+ 0x60, 0x3a, 0x08, 0x47, 0xaf, 0xe1, 0xf6, 0x10,
+ 0x77, 0x09, 0xa7, 0x27, 0x8f, 0x9a, 0x97, 0x5a,
+ 0x26, 0xfa, 0xfe, 0x41, 0x32, 0x83, 0x10, 0xe0,
+ 0x1d, 0xbf, 0x64, 0x0d, 0xf4, 0x1c, 0x32, 0x35,
+ 0xe5, 0x1b, 0x36, 0xef, 0xd4, 0x4a, 0x93, 0x4d,
+ 0x00, 0x7c, 0xec, 0x02, 0x07, 0x8b, 0x5d, 0x7d,
+ 0x1b, 0x0e, 0xd1, 0xa6, 0xa5, 0x5d, 0x7d, 0x57,
+ 0x88, 0xa8, 0xcc, 0x81, 0xb4, 0x86, 0x4e, 0xb4,
+ 0x40, 0xe9, 0x1d, 0xc3, 0xb1, 0x24, 0x3e, 0x7f,
+ 0xcc, 0x8a, 0x24, 0x9b, 0xdf, 0x6d, 0xf0, 0x39,
+ 0x69, 0x3e, 0x4c, 0xc0, 0x96, 0xe4, 0x13, 0xda,
+ 0x90, 0xda, 0xf4, 0x95, 0x66, 0x8b, 0x17, 0x17,
+ 0xfe, 0x39, 0x43, 0x25, 0xaa, 0xda, 0xa0, 0x43,
+ 0x3c, 0xb1, 0x41, 0x02, 0xa3, 0xf0, 0xa7, 0x19,
+ 0x59, 0xbc, 0x1d, 0x7d, 0x6c, 0x6d, 0x91, 0x09,
+ 0x5c, 0xb7, 0x5b, 0x01, 0xd1, 0x6f, 0x17, 0x21,
+ 0x97, 0xbf, 0x89, 0x71, 0xa5, 0xb0, 0x6e, 0x07,
+ 0x45, 0xfd, 0x9d, 0xea, 0x07, 0xf6, 0x7a, 0x9f,
+ 0x10, 0x18, 0x22, 0x30, 0x73, 0xac, 0xd4, 0x6b,
+ 0x72, 0x44, 0xed, 0xd9, 0x19, 0x9b, 0x2d, 0x4a,
+ 0x41, 0xdd, 0xd1, 0x85, 0x5e, 0x37, 0x19, 0xed,
+ 0xd2, 0x15, 0x8f, 0x5e, 0x91, 0xdb, 0x33, 0xf2,
+ 0xe4, 0xdb, 0xff, 0x98, 0xfb, 0xa3, 0xb5, 0xca,
+ 0x21, 0x69, 0x08, 0xe7, 0x8a, 0xdf, 0x90, 0xff,
+ 0x3e, 0xe9, 0x20, 0x86, 0x3c, 0xe9, 0xfc, 0x0b,
+ 0xfe, 0x5c, 0x61, 0xaa, 0x13, 0x92, 0x7f, 0x7b,
+ 0xec, 0xe0, 0x6d, 0xa8, 0x23, 0x22, 0xf6, 0x6b,
+ 0x77, 0xc4, 0xfe, 0x40, 0x07, 0x3b, 0xb6, 0xf6,
+ 0x8e, 0x5f, 0xd4, 0xb9, 0xb7, 0x0f, 0x21, 0x04,
+ 0xef, 0x83, 0x63, 0x91, 0x69, 0x40, 0xa3, 0x48,
+ 0x5c, 0xd2, 0x60, 0xf9, 0x4f, 0x6c, 0x47, 0x8b,
+ 0x3b, 0xb1, 0x9f, 0x8e, 0xee, 0x16, 0x8a, 0x13,
+ 0xfc, 0x46, 0x17, 0xc3, 0xc3, 0x32, 0x56, 0xf8,
+ 0x3c, 0x85, 0x3a, 0xb6, 0x3e, 0xaa, 0x89, 0x4f,
+ 0xb3, 0xdf, 0x38, 0xfd, 0xf1, 0xe4, 0x3a, 0xc0,
+ 0xe6, 0x58, 0xb5, 0x8f, 0xc5, 0x29, 0xa2, 0x92,
+ 0x4a, 0xb6, 0xa0, 0x34, 0x7f, 0xab, 0xb5, 0x8a,
+ 0x90, 0xa1, 0xdb, 0x4d, 0xca, 0xb6, 0x2c, 0x41,
+ 0x3c, 0xf7, 0x2b, 0x21, 0xc3, 0xfd, 0xf4, 0x17,
+ 0x5c, 0xb5, 0x33, 0x17, 0x68, 0x2b, 0x08, 0x30,
+ 0xf3, 0xf7, 0x30, 0x3c, 0x96, 0xe6, 0x6a, 0x20,
+ 0x97, 0xe7, 0x4d, 0x10, 0x5f, 0x47, 0x5f, 0x49,
+ 0x96, 0x09, 0xf0, 0x27, 0x91, 0xc8, 0xf8, 0x5a,
+ 0x2e, 0x79, 0xb5, 0xe2, 0xb8, 0xe8, 0xb9, 0x7b,
+ 0xd5, 0x10, 0xcb, 0xff, 0x5d, 0x14, 0x73, 0xf3
+};
+static const u8 dec_assoc008[] __initconst = { };
+static const u8 dec_nonce008[] __initconst = {
+ 0x0e, 0x0d, 0x57, 0xbb, 0x7b, 0x40, 0x54, 0x02
+};
+static const u8 dec_key008[] __initconst = {
+ 0xf2, 0xaa, 0x4f, 0x99, 0xfd, 0x3e, 0xa8, 0x53,
+ 0xc1, 0x44, 0xe9, 0x81, 0x18, 0xdc, 0xf5, 0xf0,
+ 0x3e, 0x44, 0x15, 0x59, 0xe0, 0xc5, 0x44, 0x86,
+ 0xc3, 0x91, 0xa8, 0x75, 0xc0, 0x12, 0x46, 0xba
+};
+
+static const u8 dec_input009[] __initconst = {
+ 0xfd, 0x81, 0x8d, 0xd0, 0x3d, 0xb4, 0xd5, 0xdf,
+ 0xd3, 0x42, 0x47, 0x5a, 0x6d, 0x19, 0x27, 0x66,
+ 0x4b, 0x2e, 0x0c, 0x27, 0x9c, 0x96, 0x4c, 0x72,
+ 0x02, 0xa3, 0x65, 0xc3, 0xb3, 0x6f, 0x2e, 0xbd,
+ 0x63, 0x8a, 0x4a, 0x5d, 0x29, 0xa2, 0xd0, 0x28,
+ 0x48, 0xc5, 0x3d, 0x98, 0xa3, 0xbc, 0xe0, 0xbe,
+ 0x3b, 0x3f, 0xe6, 0x8a, 0xa4, 0x7f, 0x53, 0x06,
+ 0xfa, 0x7f, 0x27, 0x76, 0x72, 0x31, 0xa1, 0xf5,
+ 0xd6, 0x0c, 0x52, 0x47, 0xba, 0xcd, 0x4f, 0xd7,
+ 0xeb, 0x05, 0x48, 0x0d, 0x7c, 0x35, 0x4a, 0x09,
+ 0xc9, 0x76, 0x71, 0x02, 0xa3, 0xfb, 0xb7, 0x1a,
+ 0x65, 0xb7, 0xed, 0x98, 0xc6, 0x30, 0x8a, 0x00,
+ 0xae, 0xa1, 0x31, 0xe5, 0xb5, 0x9e, 0x6d, 0x62,
+ 0xda, 0xda, 0x07, 0x0f, 0x38, 0x38, 0xd3, 0xcb,
+ 0xc1, 0xb0, 0xad, 0xec, 0x72, 0xec, 0xb1, 0xa2,
+ 0x7b, 0x59, 0xf3, 0x3d, 0x2b, 0xef, 0xcd, 0x28,
+ 0x5b, 0x83, 0xcc, 0x18, 0x91, 0x88, 0xb0, 0x2e,
+ 0xf9, 0x29, 0x31, 0x18, 0xf9, 0x4e, 0xe9, 0x0a,
+ 0x91, 0x92, 0x9f, 0xae, 0x2d, 0xad, 0xf4, 0xe6,
+ 0x1a, 0xe2, 0xa4, 0xee, 0x47, 0x15, 0xbf, 0x83,
+ 0x6e, 0xd7, 0x72, 0x12, 0x3b, 0x2d, 0x24, 0xe9,
+ 0xb2, 0x55, 0xcb, 0x3c, 0x10, 0xf0, 0x24, 0x8a,
+ 0x4a, 0x02, 0xea, 0x90, 0x25, 0xf0, 0xb4, 0x79,
+ 0x3a, 0xef, 0x6e, 0xf5, 0x52, 0xdf, 0xb0, 0x0a,
+ 0xcd, 0x24, 0x1c, 0xd3, 0x2e, 0x22, 0x74, 0xea,
+ 0x21, 0x6f, 0xe9, 0xbd, 0xc8, 0x3e, 0x36, 0x5b,
+ 0x19, 0xf1, 0xca, 0x99, 0x0a, 0xb4, 0xa7, 0x52,
+ 0x1a, 0x4e, 0xf2, 0xad, 0x8d, 0x56, 0x85, 0xbb,
+ 0x64, 0x89, 0xba, 0x26, 0xf9, 0xc7, 0xe1, 0x89,
+ 0x19, 0x22, 0x77, 0xc3, 0xa8, 0xfc, 0xff, 0xad,
+ 0xfe, 0xb9, 0x48, 0xae, 0x12, 0x30, 0x9f, 0x19,
+ 0xfb, 0x1b, 0xef, 0x14, 0x87, 0x8a, 0x78, 0x71,
+ 0xf3, 0xf4, 0xb7, 0x00, 0x9c, 0x1d, 0xb5, 0x3d,
+ 0x49, 0x00, 0x0c, 0x06, 0xd4, 0x50, 0xf9, 0x54,
+ 0x45, 0xb2, 0x5b, 0x43, 0xdb, 0x6d, 0xcf, 0x1a,
+ 0xe9, 0x7a, 0x7a, 0xcf, 0xfc, 0x8a, 0x4e, 0x4d,
+ 0x0b, 0x07, 0x63, 0x28, 0xd8, 0xe7, 0x08, 0x95,
+ 0xdf, 0xa6, 0x72, 0x93, 0x2e, 0xbb, 0xa0, 0x42,
+ 0x89, 0x16, 0xf1, 0xd9, 0x0c, 0xf9, 0xa1, 0x16,
+ 0xfd, 0xd9, 0x03, 0xb4, 0x3b, 0x8a, 0xf5, 0xf6,
+ 0xe7, 0x6b, 0x2e, 0x8e, 0x4c, 0x3d, 0xe2, 0xaf,
+ 0x08, 0x45, 0x03, 0xff, 0x09, 0xb6, 0xeb, 0x2d,
+ 0xc6, 0x1b, 0x88, 0x94, 0xac, 0x3e, 0xf1, 0x9f,
+ 0x0e, 0x0e, 0x2b, 0xd5, 0x00, 0x4d, 0x3f, 0x3b,
+ 0x53, 0xae, 0xaf, 0x1c, 0x33, 0x5f, 0x55, 0x6e,
+ 0x8d, 0xaf, 0x05, 0x7a, 0x10, 0x34, 0xc9, 0xf4,
+ 0x66, 0xcb, 0x62, 0x12, 0xa6, 0xee, 0xe8, 0x1c,
+ 0x5d, 0x12, 0x86, 0xdb, 0x6f, 0x1c, 0x33, 0xc4,
+ 0x1c, 0xda, 0x82, 0x2d, 0x3b, 0x59, 0xfe, 0xb1,
+ 0xa4, 0x59, 0x41, 0x86, 0xd0, 0xef, 0xae, 0xfb,
+ 0xda, 0x6d, 0x11, 0xb8, 0xca, 0xe9, 0x6e, 0xff,
+ 0xf7, 0xa9, 0xd9, 0x70, 0x30, 0xfc, 0x53, 0xe2,
+ 0xd7, 0xa2, 0x4e, 0xc7, 0x91, 0xd9, 0x07, 0x06,
+ 0xaa, 0xdd, 0xb0, 0x59, 0x28, 0x1d, 0x00, 0x66,
+ 0xc5, 0x54, 0xc2, 0xfc, 0x06, 0xda, 0x05, 0x90,
+ 0x52, 0x1d, 0x37, 0x66, 0xee, 0xf0, 0xb2, 0x55,
+ 0x8a, 0x5d, 0xd2, 0x38, 0x86, 0x94, 0x9b, 0xfc,
+ 0x10, 0x4c, 0xa1, 0xb9, 0x64, 0x3e, 0x44, 0xb8,
+ 0x5f, 0xb0, 0x0c, 0xec, 0xe0, 0xc9, 0xe5, 0x62,
+ 0x75, 0x3f, 0x09, 0xd5, 0xf5, 0xd9, 0x26, 0xba,
+ 0x9e, 0xd2, 0xf4, 0xb9, 0x48, 0x0a, 0xbc, 0xa2,
+ 0xd6, 0x7c, 0x36, 0x11, 0x7d, 0x26, 0x81, 0x89,
+ 0xcf, 0xa4, 0xad, 0x73, 0x0e, 0xee, 0xcc, 0x06,
+ 0xa9, 0xdb, 0xb1, 0xfd, 0xfb, 0x09, 0x7f, 0x90,
+ 0x42, 0x37, 0x2f, 0xe1, 0x9c, 0x0f, 0x6f, 0xcf,
+ 0x43, 0xb5, 0xd9, 0x90, 0xe1, 0x85, 0xf5, 0xa8,
+ 0xae
+};
+static const u8 dec_output009[] __initconst = {
+ 0xe6, 0xc3, 0xdb, 0x63, 0x55, 0x15, 0xe3, 0x5b,
+ 0xb7, 0x4b, 0x27, 0x8b, 0x5a, 0xdd, 0xc2, 0xe8,
+ 0x3a, 0x6b, 0xd7, 0x81, 0x96, 0x35, 0x97, 0xca,
+ 0xd7, 0x68, 0xe8, 0xef, 0xce, 0xab, 0xda, 0x09,
+ 0x6e, 0xd6, 0x8e, 0xcb, 0x55, 0xb5, 0xe1, 0xe5,
+ 0x57, 0xfd, 0xc4, 0xe3, 0xe0, 0x18, 0x4f, 0x85,
+ 0xf5, 0x3f, 0x7e, 0x4b, 0x88, 0xc9, 0x52, 0x44,
+ 0x0f, 0xea, 0xaf, 0x1f, 0x71, 0x48, 0x9f, 0x97,
+ 0x6d, 0xb9, 0x6f, 0x00, 0xa6, 0xde, 0x2b, 0x77,
+ 0x8b, 0x15, 0xad, 0x10, 0xa0, 0x2b, 0x7b, 0x41,
+ 0x90, 0x03, 0x2d, 0x69, 0xae, 0xcc, 0x77, 0x7c,
+ 0xa5, 0x9d, 0x29, 0x22, 0xc2, 0xea, 0xb4, 0x00,
+ 0x1a, 0xd2, 0x7a, 0x98, 0x8a, 0xf9, 0xf7, 0x82,
+ 0xb0, 0xab, 0xd8, 0xa6, 0x94, 0x8d, 0x58, 0x2f,
+ 0x01, 0x9e, 0x00, 0x20, 0xfc, 0x49, 0xdc, 0x0e,
+ 0x03, 0xe8, 0x45, 0x10, 0xd6, 0xa8, 0xda, 0x55,
+ 0x10, 0x9a, 0xdf, 0x67, 0x22, 0x8b, 0x43, 0xab,
+ 0x00, 0xbb, 0x02, 0xc8, 0xdd, 0x7b, 0x97, 0x17,
+ 0xd7, 0x1d, 0x9e, 0x02, 0x5e, 0x48, 0xde, 0x8e,
+ 0xcf, 0x99, 0x07, 0x95, 0x92, 0x3c, 0x5f, 0x9f,
+ 0xc5, 0x8a, 0xc0, 0x23, 0xaa, 0xd5, 0x8c, 0x82,
+ 0x6e, 0x16, 0x92, 0xb1, 0x12, 0x17, 0x07, 0xc3,
+ 0xfb, 0x36, 0xf5, 0x6c, 0x35, 0xd6, 0x06, 0x1f,
+ 0x9f, 0xa7, 0x94, 0xa2, 0x38, 0x63, 0x9c, 0xb0,
+ 0x71, 0xb3, 0xa5, 0xd2, 0xd8, 0xba, 0x9f, 0x08,
+ 0x01, 0xb3, 0xff, 0x04, 0x97, 0x73, 0x45, 0x1b,
+ 0xd5, 0xa9, 0x9c, 0x80, 0xaf, 0x04, 0x9a, 0x85,
+ 0xdb, 0x32, 0x5b, 0x5d, 0x1a, 0xc1, 0x36, 0x28,
+ 0x10, 0x79, 0xf1, 0x3c, 0xbf, 0x1a, 0x41, 0x5c,
+ 0x4e, 0xdf, 0xb2, 0x7c, 0x79, 0x3b, 0x7a, 0x62,
+ 0x3d, 0x4b, 0xc9, 0x9b, 0x2a, 0x2e, 0x7c, 0xa2,
+ 0xb1, 0x11, 0x98, 0xa7, 0x34, 0x1a, 0x00, 0xf3,
+ 0xd1, 0xbc, 0x18, 0x22, 0xba, 0x02, 0x56, 0x62,
+ 0x31, 0x10, 0x11, 0x6d, 0xe0, 0x54, 0x9d, 0x40,
+ 0x1f, 0x26, 0x80, 0x41, 0xca, 0x3f, 0x68, 0x0f,
+ 0x32, 0x1d, 0x0a, 0x8e, 0x79, 0xd8, 0xa4, 0x1b,
+ 0x29, 0x1c, 0x90, 0x8e, 0xc5, 0xe3, 0xb4, 0x91,
+ 0x37, 0x9a, 0x97, 0x86, 0x99, 0xd5, 0x09, 0xc5,
+ 0xbb, 0xa3, 0x3f, 0x21, 0x29, 0x82, 0x14, 0x5c,
+ 0xab, 0x25, 0xfb, 0xf2, 0x4f, 0x58, 0x26, 0xd4,
+ 0x83, 0xaa, 0x66, 0x89, 0x67, 0x7e, 0xc0, 0x49,
+ 0xe1, 0x11, 0x10, 0x7f, 0x7a, 0xda, 0x29, 0x04,
+ 0xff, 0xf0, 0xcb, 0x09, 0x7c, 0x9d, 0xfa, 0x03,
+ 0x6f, 0x81, 0x09, 0x31, 0x60, 0xfb, 0x08, 0xfa,
+ 0x74, 0xd3, 0x64, 0x44, 0x7c, 0x55, 0x85, 0xec,
+ 0x9c, 0x6e, 0x25, 0xb7, 0x6c, 0xc5, 0x37, 0xb6,
+ 0x83, 0x87, 0x72, 0x95, 0x8b, 0x9d, 0xe1, 0x69,
+ 0x5c, 0x31, 0x95, 0x42, 0xa6, 0x2c, 0xd1, 0x36,
+ 0x47, 0x1f, 0xec, 0x54, 0xab, 0xa2, 0x1c, 0xd8,
+ 0x00, 0xcc, 0xbc, 0x0d, 0x65, 0xe2, 0x67, 0xbf,
+ 0xbc, 0xea, 0xee, 0x9e, 0xe4, 0x36, 0x95, 0xbe,
+ 0x73, 0xd9, 0xa6, 0xd9, 0x0f, 0xa0, 0xcc, 0x82,
+ 0x76, 0x26, 0xad, 0x5b, 0x58, 0x6c, 0x4e, 0xab,
+ 0x29, 0x64, 0xd3, 0xd9, 0xa9, 0x08, 0x8c, 0x1d,
+ 0xa1, 0x4f, 0x80, 0xd8, 0x3f, 0x94, 0xfb, 0xd3,
+ 0x7b, 0xfc, 0xd1, 0x2b, 0xc3, 0x21, 0xeb, 0xe5,
+ 0x1c, 0x84, 0x23, 0x7f, 0x4b, 0xfa, 0xdb, 0x34,
+ 0x18, 0xa2, 0xc2, 0xe5, 0x13, 0xfe, 0x6c, 0x49,
+ 0x81, 0xd2, 0x73, 0xe7, 0xe2, 0xd7, 0xe4, 0x4f,
+ 0x4b, 0x08, 0x6e, 0xb1, 0x12, 0x22, 0x10, 0x9d,
+ 0xac, 0x51, 0x1e, 0x17, 0xd9, 0x8a, 0x0b, 0x42,
+ 0x88, 0x16, 0x81, 0x37, 0x7c, 0x6a, 0xf7, 0xef,
+ 0x2d, 0xe3, 0xd9, 0xf8, 0x5f, 0xe0, 0x53, 0x27,
+ 0x74, 0xb9, 0xe2, 0xd6, 0x1c, 0x80, 0x2c, 0x52,
+ 0x65
+};
+static const u8 dec_assoc009[] __initconst = {
+ 0x5a, 0x27, 0xff, 0xeb, 0xdf, 0x84, 0xb2, 0x9e,
+ 0xef
+};
+static const u8 dec_nonce009[] __initconst = {
+ 0xef, 0x2d, 0x63, 0xee, 0x6b, 0x80, 0x8b, 0x78
+};
+static const u8 dec_key009[] __initconst = {
+ 0xea, 0xbc, 0x56, 0x99, 0xe3, 0x50, 0xff, 0xc5,
+ 0xcc, 0x1a, 0xd7, 0xc1, 0x57, 0x72, 0xea, 0x86,
+ 0x5b, 0x89, 0x88, 0x61, 0x3d, 0x2f, 0x9b, 0xb2,
+ 0xe7, 0x9c, 0xec, 0x74, 0x6e, 0x3e, 0xf4, 0x3b
+};
+
+static const u8 dec_input010[] __initconst = {
+ 0xe5, 0x26, 0xa4, 0x3d, 0xbd, 0x33, 0xd0, 0x4b,
+ 0x6f, 0x05, 0xa7, 0x6e, 0x12, 0x7a, 0xd2, 0x74,
+ 0xa6, 0xdd, 0xbd, 0x95, 0xeb, 0xf9, 0xa4, 0xf1,
+ 0x59, 0x93, 0x91, 0x70, 0xd9, 0xfe, 0x9a, 0xcd,
+ 0x53, 0x1f, 0x3a, 0xab, 0xa6, 0x7c, 0x9f, 0xa6,
+ 0x9e, 0xbd, 0x99, 0xd9, 0xb5, 0x97, 0x44, 0xd5,
+ 0x14, 0x48, 0x4d, 0x9d, 0xc0, 0xd0, 0x05, 0x96,
+ 0xeb, 0x4c, 0x78, 0x55, 0x09, 0x08, 0x01, 0x02,
+ 0x30, 0x90, 0x7b, 0x96, 0x7a, 0x7b, 0x5f, 0x30,
+ 0x41, 0x24, 0xce, 0x68, 0x61, 0x49, 0x86, 0x57,
+ 0x82, 0xdd, 0x53, 0x1c, 0x51, 0x28, 0x2b, 0x53,
+ 0x6e, 0x2d, 0xc2, 0x20, 0x4c, 0xdd, 0x8f, 0x65,
+ 0x10, 0x20, 0x50, 0xdd, 0x9d, 0x50, 0xe5, 0x71,
+ 0x40, 0x53, 0x69, 0xfc, 0x77, 0x48, 0x11, 0xb9,
+ 0xde, 0xa4, 0x8d, 0x58, 0xe4, 0xa6, 0x1a, 0x18,
+ 0x47, 0x81, 0x7e, 0xfc, 0xdd, 0xf6, 0xef, 0xce,
+ 0x2f, 0x43, 0x68, 0xd6, 0x06, 0xe2, 0x74, 0x6a,
+ 0xad, 0x90, 0xf5, 0x37, 0xf3, 0x3d, 0x82, 0x69,
+ 0x40, 0xe9, 0x6b, 0xa7, 0x3d, 0xa8, 0x1e, 0xd2,
+ 0x02, 0x7c, 0xb7, 0x9b, 0xe4, 0xda, 0x8f, 0x95,
+ 0x06, 0xc5, 0xdf, 0x73, 0xa3, 0x20, 0x9a, 0x49,
+ 0xde, 0x9c, 0xbc, 0xee, 0x14, 0x3f, 0x81, 0x5e,
+ 0xf8, 0x3b, 0x59, 0x3c, 0xe1, 0x68, 0x12, 0x5a,
+ 0x3a, 0x76, 0x3a, 0x3f, 0xf7, 0x87, 0x33, 0x0a,
+ 0x01, 0xb8, 0xd4, 0xed, 0xb6, 0xbe, 0x94, 0x5e,
+ 0x70, 0x40, 0x56, 0x67, 0x1f, 0x50, 0x44, 0x19,
+ 0xce, 0x82, 0x70, 0x10, 0x87, 0x13, 0x20, 0x0b,
+ 0x4c, 0x5a, 0xb6, 0xf6, 0xa7, 0xae, 0x81, 0x75,
+ 0x01, 0x81, 0xe6, 0x4b, 0x57, 0x7c, 0xdd, 0x6d,
+ 0xf8, 0x1c, 0x29, 0x32, 0xf7, 0xda, 0x3c, 0x2d,
+ 0xf8, 0x9b, 0x25, 0x6e, 0x00, 0xb4, 0xf7, 0x2f,
+ 0xf7, 0x04, 0xf7, 0xa1, 0x56, 0xac, 0x4f, 0x1a,
+ 0x64, 0xb8, 0x47, 0x55, 0x18, 0x7b, 0x07, 0x4d,
+ 0xbd, 0x47, 0x24, 0x80, 0x5d, 0xa2, 0x70, 0xc5,
+ 0xdd, 0x8e, 0x82, 0xd4, 0xeb, 0xec, 0xb2, 0x0c,
+ 0x39, 0xd2, 0x97, 0xc1, 0xcb, 0xeb, 0xf4, 0x77,
+ 0x59, 0xb4, 0x87, 0xef, 0xcb, 0x43, 0x2d, 0x46,
+ 0x54, 0xd1, 0xa7, 0xd7, 0x15, 0x99, 0x0a, 0x43,
+ 0xa1, 0xe0, 0x99, 0x33, 0x71, 0xc1, 0xed, 0xfe,
+ 0x72, 0x46, 0x33, 0x8e, 0x91, 0x08, 0x9f, 0xc8,
+ 0x2e, 0xca, 0xfa, 0xdc, 0x59, 0xd5, 0xc3, 0x76,
+ 0x84, 0x9f, 0xa3, 0x37, 0x68, 0xc3, 0xf0, 0x47,
+ 0x2c, 0x68, 0xdb, 0x5e, 0xc3, 0x49, 0x4c, 0xe8,
+ 0x92, 0x85, 0xe2, 0x23, 0xd3, 0x3f, 0xad, 0x32,
+ 0xe5, 0x2b, 0x82, 0xd7, 0x8f, 0x99, 0x0a, 0x59,
+ 0x5c, 0x45, 0xd9, 0xb4, 0x51, 0x52, 0xc2, 0xae,
+ 0xbf, 0x80, 0xcf, 0xc9, 0xc9, 0x51, 0x24, 0x2a,
+ 0x3b, 0x3a, 0x4d, 0xae, 0xeb, 0xbd, 0x22, 0xc3,
+ 0x0e, 0x0f, 0x59, 0x25, 0x92, 0x17, 0xe9, 0x74,
+ 0xc7, 0x8b, 0x70, 0x70, 0x36, 0x55, 0x95, 0x75,
+ 0x4b, 0xad, 0x61, 0x2b, 0x09, 0xbc, 0x82, 0xf2,
+ 0x6e, 0x94, 0x43, 0xae, 0xc3, 0xd5, 0xcd, 0x8e,
+ 0xfe, 0x5b, 0x9a, 0x88, 0x43, 0x01, 0x75, 0xb2,
+ 0x23, 0x09, 0xf7, 0x89, 0x83, 0xe7, 0xfa, 0xf9,
+ 0xb4, 0x9b, 0xf8, 0xef, 0xbd, 0x1c, 0x92, 0xc1,
+ 0xda, 0x7e, 0xfe, 0x05, 0xba, 0x5a, 0xcd, 0x07,
+ 0x6a, 0x78, 0x9e, 0x5d, 0xfb, 0x11, 0x2f, 0x79,
+ 0x38, 0xb6, 0xc2, 0x5b, 0x6b, 0x51, 0xb4, 0x71,
+ 0xdd, 0xf7, 0x2a, 0xe4, 0xf4, 0x72, 0x76, 0xad,
+ 0xc2, 0xdd, 0x64, 0x5d, 0x79, 0xb6, 0xf5, 0x7a,
+ 0x77, 0x20, 0x05, 0x3d, 0x30, 0x06, 0xd4, 0x4c,
+ 0x0a, 0x2c, 0x98, 0x5a, 0xb9, 0xd4, 0x98, 0xa9,
+ 0x3f, 0xc6, 0x12, 0xea, 0x3b, 0x4b, 0xc5, 0x79,
+ 0x64, 0x63, 0x6b, 0x09, 0x54, 0x3b, 0x14, 0x27,
+ 0xba, 0x99, 0x80, 0xc8, 0x72, 0xa8, 0x12, 0x90,
+ 0x29, 0xba, 0x40, 0x54, 0x97, 0x2b, 0x7b, 0xfe,
+ 0xeb, 0xcd, 0x01, 0x05, 0x44, 0x72, 0xdb, 0x99,
+ 0xe4, 0x61, 0xc9, 0x69, 0xd6, 0xb9, 0x28, 0xd1,
+ 0x05, 0x3e, 0xf9, 0x0b, 0x49, 0x0a, 0x49, 0xe9,
+ 0x8d, 0x0e, 0xa7, 0x4a, 0x0f, 0xaf, 0x32, 0xd0,
+ 0xe0, 0xb2, 0x3a, 0x55, 0x58, 0xfe, 0x5c, 0x28,
+ 0x70, 0x51, 0x23, 0xb0, 0x7b, 0x6a, 0x5f, 0x1e,
+ 0xb8, 0x17, 0xd7, 0x94, 0x15, 0x8f, 0xee, 0x20,
+ 0xc7, 0x42, 0x25, 0x3e, 0x9a, 0x14, 0xd7, 0x60,
+ 0x72, 0x39, 0x47, 0x48, 0xa9, 0xfe, 0xdd, 0x47,
+ 0x0a, 0xb1, 0xe6, 0x60, 0x28, 0x8c, 0x11, 0x68,
+ 0xe1, 0xff, 0xd7, 0xce, 0xc8, 0xbe, 0xb3, 0xfe,
+ 0x27, 0x30, 0x09, 0x70, 0xd7, 0xfa, 0x02, 0x33,
+ 0x3a, 0x61, 0x2e, 0xc7, 0xff, 0xa4, 0x2a, 0xa8,
+ 0x6e, 0xb4, 0x79, 0x35, 0x6d, 0x4c, 0x1e, 0x38,
+ 0xf8, 0xee, 0xd4, 0x84, 0x4e, 0x6e, 0x28, 0xa7,
+ 0xce, 0xc8, 0xc1, 0xcf, 0x80, 0x05, 0xf3, 0x04,
+ 0xef, 0xc8, 0x18, 0x28, 0x2e, 0x8d, 0x5e, 0x0c,
+ 0xdf, 0xb8, 0x5f, 0x96, 0xe8, 0xc6, 0x9c, 0x2f,
+ 0xe5, 0xa6, 0x44, 0xd7, 0xe7, 0x99, 0x44, 0x0c,
+ 0xec, 0xd7, 0x05, 0x60, 0x97, 0xbb, 0x74, 0x77,
+ 0x58, 0xd5, 0xbb, 0x48, 0xde, 0x5a, 0xb2, 0x54,
+ 0x7f, 0x0e, 0x46, 0x70, 0x6a, 0x6f, 0x78, 0xa5,
+ 0x08, 0x89, 0x05, 0x4e, 0x7e, 0xa0, 0x69, 0xb4,
+ 0x40, 0x60, 0x55, 0x77, 0x75, 0x9b, 0x19, 0xf2,
+ 0xd5, 0x13, 0x80, 0x77, 0xf9, 0x4b, 0x3f, 0x1e,
+ 0xee, 0xe6, 0x76, 0x84, 0x7b, 0x8c, 0xe5, 0x27,
+ 0xa8, 0x0a, 0x91, 0x01, 0x68, 0x71, 0x8a, 0x3f,
+ 0x06, 0xab, 0xf6, 0xa9, 0xa5, 0xe6, 0x72, 0x92,
+ 0xe4, 0x67, 0xe2, 0xa2, 0x46, 0x35, 0x84, 0x55,
+ 0x7d, 0xca, 0xa8, 0x85, 0xd0, 0xf1, 0x3f, 0xbe,
+ 0xd7, 0x34, 0x64, 0xfc, 0xae, 0xe3, 0xe4, 0x04,
+ 0x9f, 0x66, 0x02, 0xb9, 0x88, 0x10, 0xd9, 0xc4,
+ 0x4c, 0x31, 0x43, 0x7a, 0x93, 0xe2, 0x9b, 0x56,
+ 0x43, 0x84, 0xdc, 0xdc, 0xde, 0x1d, 0xa4, 0x02,
+ 0x0e, 0xc2, 0xef, 0xc3, 0xf8, 0x78, 0xd1, 0xb2,
+ 0x6b, 0x63, 0x18, 0xc9, 0xa9, 0xe5, 0x72, 0xd8,
+ 0xf3, 0xb9, 0xd1, 0x8a, 0xc7, 0x1a, 0x02, 0x27,
+ 0x20, 0x77, 0x10, 0xe5, 0xc8, 0xd4, 0x4a, 0x47,
+ 0xe5, 0xdf, 0x5f, 0x01, 0xaa, 0xb0, 0xd4, 0x10,
+ 0xbb, 0x69, 0xe3, 0x36, 0xc8, 0xe1, 0x3d, 0x43,
+ 0xfb, 0x86, 0xcd, 0xcc, 0xbf, 0xf4, 0x88, 0xe0,
+ 0x20, 0xca, 0xb7, 0x1b, 0xf1, 0x2f, 0x5c, 0xee,
+ 0xd4, 0xd3, 0xa3, 0xcc, 0xa4, 0x1e, 0x1c, 0x47,
+ 0xfb, 0xbf, 0xfc, 0xa2, 0x41, 0x55, 0x9d, 0xf6,
+ 0x5a, 0x5e, 0x65, 0x32, 0x34, 0x7b, 0x52, 0x8d,
+ 0xd5, 0xd0, 0x20, 0x60, 0x03, 0xab, 0x3f, 0x8c,
+ 0xd4, 0x21, 0xea, 0x2a, 0xd9, 0xc4, 0xd0, 0xd3,
+ 0x65, 0xd8, 0x7a, 0x13, 0x28, 0x62, 0x32, 0x4b,
+ 0x2c, 0x87, 0x93, 0xa8, 0xb4, 0x52, 0x45, 0x09,
+ 0x44, 0xec, 0xec, 0xc3, 0x17, 0xdb, 0x9a, 0x4d,
+ 0x5c, 0xa9, 0x11, 0xd4, 0x7d, 0xaf, 0x9e, 0xf1,
+ 0x2d, 0xb2, 0x66, 0xc5, 0x1d, 0xed, 0xb7, 0xcd,
+ 0x0b, 0x25, 0x5e, 0x30, 0x47, 0x3f, 0x40, 0xf4,
+ 0xa1, 0xa0, 0x00, 0x94, 0x10, 0xc5, 0x6a, 0x63,
+ 0x1a, 0xd5, 0x88, 0x92, 0x8e, 0x82, 0x39, 0x87,
+ 0x3c, 0x78, 0x65, 0x58, 0x42, 0x75, 0x5b, 0xdd,
+ 0x77, 0x3e, 0x09, 0x4e, 0x76, 0x5b, 0xe6, 0x0e,
+ 0x4d, 0x38, 0xb2, 0xc0, 0xb8, 0x95, 0x01, 0x7a,
+ 0x10, 0xe0, 0xfb, 0x07, 0xf2, 0xab, 0x2d, 0x8c,
+ 0x32, 0xed, 0x2b, 0xc0, 0x46, 0xc2, 0xf5, 0x38,
+ 0x83, 0xf0, 0x17, 0xec, 0xc1, 0x20, 0x6a, 0x9a,
+ 0x0b, 0x00, 0xa0, 0x98, 0x22, 0x50, 0x23, 0xd5,
+ 0x80, 0x6b, 0xf6, 0x1f, 0xc3, 0xcc, 0x97, 0xc9,
+ 0x24, 0x9f, 0xf3, 0xaf, 0x43, 0x14, 0xd5, 0xa0
+};
+static const u8 dec_output010[] __initconst = {
+ 0x42, 0x93, 0xe4, 0xeb, 0x97, 0xb0, 0x57, 0xbf,
+ 0x1a, 0x8b, 0x1f, 0xe4, 0x5f, 0x36, 0x20, 0x3c,
+ 0xef, 0x0a, 0xa9, 0x48, 0x5f, 0x5f, 0x37, 0x22,
+ 0x3a, 0xde, 0xe3, 0xae, 0xbe, 0xad, 0x07, 0xcc,
+ 0xb1, 0xf6, 0xf5, 0xf9, 0x56, 0xdd, 0xe7, 0x16,
+ 0x1e, 0x7f, 0xdf, 0x7a, 0x9e, 0x75, 0xb7, 0xc7,
+ 0xbe, 0xbe, 0x8a, 0x36, 0x04, 0xc0, 0x10, 0xf4,
+ 0x95, 0x20, 0x03, 0xec, 0xdc, 0x05, 0xa1, 0x7d,
+ 0xc4, 0xa9, 0x2c, 0x82, 0xd0, 0xbc, 0x8b, 0xc5,
+ 0xc7, 0x45, 0x50, 0xf6, 0xa2, 0x1a, 0xb5, 0x46,
+ 0x3b, 0x73, 0x02, 0xa6, 0x83, 0x4b, 0x73, 0x82,
+ 0x58, 0x5e, 0x3b, 0x65, 0x2f, 0x0e, 0xfd, 0x2b,
+ 0x59, 0x16, 0xce, 0xa1, 0x60, 0x9c, 0xe8, 0x3a,
+ 0x99, 0xed, 0x8d, 0x5a, 0xcf, 0xf6, 0x83, 0xaf,
+ 0xba, 0xd7, 0x73, 0x73, 0x40, 0x97, 0x3d, 0xca,
+ 0xef, 0x07, 0x57, 0xe6, 0xd9, 0x70, 0x0e, 0x95,
+ 0xae, 0xa6, 0x8d, 0x04, 0xcc, 0xee, 0xf7, 0x09,
+ 0x31, 0x77, 0x12, 0xa3, 0x23, 0x97, 0x62, 0xb3,
+ 0x7b, 0x32, 0xfb, 0x80, 0x14, 0x48, 0x81, 0xc3,
+ 0xe5, 0xea, 0x91, 0x39, 0x52, 0x81, 0xa2, 0x4f,
+ 0xe4, 0xb3, 0x09, 0xff, 0xde, 0x5e, 0xe9, 0x58,
+ 0x84, 0x6e, 0xf9, 0x3d, 0xdf, 0x25, 0xea, 0xad,
+ 0xae, 0xe6, 0x9a, 0xd1, 0x89, 0x55, 0xd3, 0xde,
+ 0x6c, 0x52, 0xdb, 0x70, 0xfe, 0x37, 0xce, 0x44,
+ 0x0a, 0xa8, 0x25, 0x5f, 0x92, 0xc1, 0x33, 0x4a,
+ 0x4f, 0x9b, 0x62, 0x35, 0xff, 0xce, 0xc0, 0xa9,
+ 0x60, 0xce, 0x52, 0x00, 0x97, 0x51, 0x35, 0x26,
+ 0x2e, 0xb9, 0x36, 0xa9, 0x87, 0x6e, 0x1e, 0xcc,
+ 0x91, 0x78, 0x53, 0x98, 0x86, 0x5b, 0x9c, 0x74,
+ 0x7d, 0x88, 0x33, 0xe1, 0xdf, 0x37, 0x69, 0x2b,
+ 0xbb, 0xf1, 0x4d, 0xf4, 0xd1, 0xf1, 0x39, 0x93,
+ 0x17, 0x51, 0x19, 0xe3, 0x19, 0x1e, 0x76, 0x37,
+ 0x25, 0xfb, 0x09, 0x27, 0x6a, 0xab, 0x67, 0x6f,
+ 0x14, 0x12, 0x64, 0xe7, 0xc4, 0x07, 0xdf, 0x4d,
+ 0x17, 0xbb, 0x6d, 0xe0, 0xe9, 0xb9, 0xab, 0xca,
+ 0x10, 0x68, 0xaf, 0x7e, 0xb7, 0x33, 0x54, 0x73,
+ 0x07, 0x6e, 0xf7, 0x81, 0x97, 0x9c, 0x05, 0x6f,
+ 0x84, 0x5f, 0xd2, 0x42, 0xfb, 0x38, 0xcf, 0xd1,
+ 0x2f, 0x14, 0x30, 0x88, 0x98, 0x4d, 0x5a, 0xa9,
+ 0x76, 0xd5, 0x4f, 0x3e, 0x70, 0x6c, 0x85, 0x76,
+ 0xd7, 0x01, 0xa0, 0x1a, 0xc8, 0x4e, 0xaa, 0xac,
+ 0x78, 0xfe, 0x46, 0xde, 0x6a, 0x05, 0x46, 0xa7,
+ 0x43, 0x0c, 0xb9, 0xde, 0xb9, 0x68, 0xfb, 0xce,
+ 0x42, 0x99, 0x07, 0x4d, 0x0b, 0x3b, 0x5a, 0x30,
+ 0x35, 0xa8, 0xf9, 0x3a, 0x73, 0xef, 0x0f, 0xdb,
+ 0x1e, 0x16, 0x42, 0xc4, 0xba, 0xae, 0x58, 0xaa,
+ 0xf8, 0xe5, 0x75, 0x2f, 0x1b, 0x15, 0x5c, 0xfd,
+ 0x0a, 0x97, 0xd0, 0xe4, 0x37, 0x83, 0x61, 0x5f,
+ 0x43, 0xa6, 0xc7, 0x3f, 0x38, 0x59, 0xe6, 0xeb,
+ 0xa3, 0x90, 0xc3, 0xaa, 0xaa, 0x5a, 0xd3, 0x34,
+ 0xd4, 0x17, 0xc8, 0x65, 0x3e, 0x57, 0xbc, 0x5e,
+ 0xdd, 0x9e, 0xb7, 0xf0, 0x2e, 0x5b, 0xb2, 0x1f,
+ 0x8a, 0x08, 0x0d, 0x45, 0x91, 0x0b, 0x29, 0x53,
+ 0x4f, 0x4c, 0x5a, 0x73, 0x56, 0xfe, 0xaf, 0x41,
+ 0x01, 0x39, 0x0a, 0x24, 0x3c, 0x7e, 0xbe, 0x4e,
+ 0x53, 0xf3, 0xeb, 0x06, 0x66, 0x51, 0x28, 0x1d,
+ 0xbd, 0x41, 0x0a, 0x01, 0xab, 0x16, 0x47, 0x27,
+ 0x47, 0x47, 0xf7, 0xcb, 0x46, 0x0a, 0x70, 0x9e,
+ 0x01, 0x9c, 0x09, 0xe1, 0x2a, 0x00, 0x1a, 0xd8,
+ 0xd4, 0x79, 0x9d, 0x80, 0x15, 0x8e, 0x53, 0x2a,
+ 0x65, 0x83, 0x78, 0x3e, 0x03, 0x00, 0x07, 0x12,
+ 0x1f, 0x33, 0x3e, 0x7b, 0x13, 0x37, 0xf1, 0xc3,
+ 0xef, 0xb7, 0xc1, 0x20, 0x3c, 0x3e, 0x67, 0x66,
+ 0x5d, 0x88, 0xa7, 0x7d, 0x33, 0x50, 0x77, 0xb0,
+ 0x28, 0x8e, 0xe7, 0x2c, 0x2e, 0x7a, 0xf4, 0x3c,
+ 0x8d, 0x74, 0x83, 0xaf, 0x8e, 0x87, 0x0f, 0xe4,
+ 0x50, 0xff, 0x84, 0x5c, 0x47, 0x0c, 0x6a, 0x49,
+ 0xbf, 0x42, 0x86, 0x77, 0x15, 0x48, 0xa5, 0x90,
+ 0x5d, 0x93, 0xd6, 0x2a, 0x11, 0xd5, 0xd5, 0x11,
+ 0xaa, 0xce, 0xe7, 0x6f, 0xa5, 0xb0, 0x09, 0x2c,
+ 0x8d, 0xd3, 0x92, 0xf0, 0x5a, 0x2a, 0xda, 0x5b,
+ 0x1e, 0xd5, 0x9a, 0xc4, 0xc4, 0xf3, 0x49, 0x74,
+ 0x41, 0xca, 0xe8, 0xc1, 0xf8, 0x44, 0xd6, 0x3c,
+ 0xae, 0x6c, 0x1d, 0x9a, 0x30, 0x04, 0x4d, 0x27,
+ 0x0e, 0xb1, 0x5f, 0x59, 0xa2, 0x24, 0xe8, 0xe1,
+ 0x98, 0xc5, 0x6a, 0x4c, 0xfe, 0x41, 0xd2, 0x27,
+ 0x42, 0x52, 0xe1, 0xe9, 0x7d, 0x62, 0xe4, 0x88,
+ 0x0f, 0xad, 0xb2, 0x70, 0xcb, 0x9d, 0x4c, 0x27,
+ 0x2e, 0x76, 0x1e, 0x1a, 0x63, 0x65, 0xf5, 0x3b,
+ 0xf8, 0x57, 0x69, 0xeb, 0x5b, 0x38, 0x26, 0x39,
+ 0x33, 0x25, 0x45, 0x3e, 0x91, 0xb8, 0xd8, 0xc7,
+ 0xd5, 0x42, 0xc0, 0x22, 0x31, 0x74, 0xf4, 0xbc,
+ 0x0c, 0x23, 0xf1, 0xca, 0xc1, 0x8d, 0xd7, 0xbe,
+ 0xc9, 0x62, 0xe4, 0x08, 0x1a, 0xcf, 0x36, 0xd5,
+ 0xfe, 0x55, 0x21, 0x59, 0x91, 0x87, 0x87, 0xdf,
+ 0x06, 0xdb, 0xdf, 0x96, 0x45, 0x58, 0xda, 0x05,
+ 0xcd, 0x50, 0x4d, 0xd2, 0x7d, 0x05, 0x18, 0x73,
+ 0x6a, 0x8d, 0x11, 0x85, 0xa6, 0x88, 0xe8, 0xda,
+ 0xe6, 0x30, 0x33, 0xa4, 0x89, 0x31, 0x75, 0xbe,
+ 0x69, 0x43, 0x84, 0x43, 0x50, 0x87, 0xdd, 0x71,
+ 0x36, 0x83, 0xc3, 0x78, 0x74, 0x24, 0x0a, 0xed,
+ 0x7b, 0xdb, 0xa4, 0x24, 0x0b, 0xb9, 0x7e, 0x5d,
+ 0xff, 0xde, 0xb1, 0xef, 0x61, 0x5a, 0x45, 0x33,
+ 0xf6, 0x17, 0x07, 0x08, 0x98, 0x83, 0x92, 0x0f,
+ 0x23, 0x6d, 0xe6, 0xaa, 0x17, 0x54, 0xad, 0x6a,
+ 0xc8, 0xdb, 0x26, 0xbe, 0xb8, 0xb6, 0x08, 0xfa,
+ 0x68, 0xf1, 0xd7, 0x79, 0x6f, 0x18, 0xb4, 0x9e,
+ 0x2d, 0x3f, 0x1b, 0x64, 0xaf, 0x8d, 0x06, 0x0e,
+ 0x49, 0x28, 0xe0, 0x5d, 0x45, 0x68, 0x13, 0x87,
+ 0xfa, 0xde, 0x40, 0x7b, 0xd2, 0xc3, 0x94, 0xd5,
+ 0xe1, 0xd9, 0xc2, 0xaf, 0x55, 0x89, 0xeb, 0xb4,
+ 0x12, 0x59, 0xa8, 0xd4, 0xc5, 0x29, 0x66, 0x38,
+ 0xe6, 0xac, 0x22, 0x22, 0xd9, 0x64, 0x9b, 0x34,
+ 0x0a, 0x32, 0x9f, 0xc2, 0xbf, 0x17, 0x6c, 0x3f,
+ 0x71, 0x7a, 0x38, 0x6b, 0x98, 0xfb, 0x49, 0x36,
+ 0x89, 0xc9, 0xe2, 0xd6, 0xc7, 0x5d, 0xd0, 0x69,
+ 0x5f, 0x23, 0x35, 0xc9, 0x30, 0xe2, 0xfd, 0x44,
+ 0x58, 0x39, 0xd7, 0x97, 0xfb, 0x5c, 0x00, 0xd5,
+ 0x4f, 0x7a, 0x1a, 0x95, 0x8b, 0x62, 0x4b, 0xce,
+ 0xe5, 0x91, 0x21, 0x7b, 0x30, 0x00, 0xd6, 0xdd,
+ 0x6d, 0x02, 0x86, 0x49, 0x0f, 0x3c, 0x1a, 0x27,
+ 0x3c, 0xd3, 0x0e, 0x71, 0xf2, 0xff, 0xf5, 0x2f,
+ 0x87, 0xac, 0x67, 0x59, 0x81, 0xa3, 0xf7, 0xf8,
+ 0xd6, 0x11, 0x0c, 0x84, 0xa9, 0x03, 0xee, 0x2a,
+ 0xc4, 0xf3, 0x22, 0xab, 0x7c, 0xe2, 0x25, 0xf5,
+ 0x67, 0xa3, 0xe4, 0x11, 0xe0, 0x59, 0xb3, 0xca,
+ 0x87, 0xa0, 0xae, 0xc9, 0xa6, 0x62, 0x1b, 0x6e,
+ 0x4d, 0x02, 0x6b, 0x07, 0x9d, 0xfd, 0xd0, 0x92,
+ 0x06, 0xe1, 0xb2, 0x9a, 0x4a, 0x1f, 0x1f, 0x13,
+ 0x49, 0x99, 0x97, 0x08, 0xde, 0x7f, 0x98, 0xaf,
+ 0x51, 0x98, 0xee, 0x2c, 0xcb, 0xf0, 0x0b, 0xc6,
+ 0xb6, 0xb7, 0x2d, 0x9a, 0xb1, 0xac, 0xa6, 0xe3,
+ 0x15, 0x77, 0x9d, 0x6b, 0x1a, 0xe4, 0xfc, 0x8b,
+ 0xf2, 0x17, 0x59, 0x08, 0x04, 0x58, 0x81, 0x9d,
+ 0x1b, 0x1b, 0x69, 0x55, 0xc2, 0xb4, 0x3c, 0x1f,
+ 0x50, 0xf1, 0x7f, 0x77, 0x90, 0x4c, 0x66, 0x40,
+ 0x5a, 0xc0, 0x33, 0x1f, 0xcb, 0x05, 0x6d, 0x5c,
+ 0x06, 0x87, 0x52, 0xa2, 0x8f, 0x26, 0xd5, 0x4f
+};
+static const u8 dec_assoc010[] __initconst = {
+ 0xd2, 0xa1, 0x70, 0xdb, 0x7a, 0xf8, 0xfa, 0x27,
+ 0xba, 0x73, 0x0f, 0xbf, 0x3d, 0x1e, 0x82, 0xb2
+};
+static const u8 dec_nonce010[] __initconst = {
+ 0xdb, 0x92, 0x0f, 0x7f, 0x17, 0x54, 0x0c, 0x30
+};
+static const u8 dec_key010[] __initconst = {
+ 0x47, 0x11, 0xeb, 0x86, 0x2b, 0x2c, 0xab, 0x44,
+ 0x34, 0xda, 0x7f, 0x57, 0x03, 0x39, 0x0c, 0xaf,
+ 0x2c, 0x14, 0xfd, 0x65, 0x23, 0xe9, 0x8e, 0x74,
+ 0xd5, 0x08, 0x68, 0x08, 0xe7, 0xb4, 0x72, 0xd7
+};
+
+static const u8 dec_input011[] __initconst = {
+ 0x6a, 0xfc, 0x4b, 0x25, 0xdf, 0xc0, 0xe4, 0xe8,
+ 0x17, 0x4d, 0x4c, 0xc9, 0x7e, 0xde, 0x3a, 0xcc,
+ 0x3c, 0xba, 0x6a, 0x77, 0x47, 0xdb, 0xe3, 0x74,
+ 0x7a, 0x4d, 0x5f, 0x8d, 0x37, 0x55, 0x80, 0x73,
+ 0x90, 0x66, 0x5d, 0x3a, 0x7d, 0x5d, 0x86, 0x5e,
+ 0x8d, 0xfd, 0x83, 0xff, 0x4e, 0x74, 0x6f, 0xf9,
+ 0xe6, 0x70, 0x17, 0x70, 0x3e, 0x96, 0xa7, 0x7e,
+ 0xcb, 0xab, 0x8f, 0x58, 0x24, 0x9b, 0x01, 0xfd,
+ 0xcb, 0xe6, 0x4d, 0x9b, 0xf0, 0x88, 0x94, 0x57,
+ 0x66, 0xef, 0x72, 0x4c, 0x42, 0x6e, 0x16, 0x19,
+ 0x15, 0xea, 0x70, 0x5b, 0xac, 0x13, 0xdb, 0x9f,
+ 0x18, 0xe2, 0x3c, 0x26, 0x97, 0xbc, 0xdc, 0x45,
+ 0x8c, 0x6c, 0x24, 0x69, 0x9c, 0xf7, 0x65, 0x1e,
+ 0x18, 0x59, 0x31, 0x7c, 0xe4, 0x73, 0xbc, 0x39,
+ 0x62, 0xc6, 0x5c, 0x9f, 0xbf, 0xfa, 0x90, 0x03,
+ 0xc9, 0x72, 0x26, 0xb6, 0x1b, 0xc2, 0xb7, 0x3f,
+ 0xf2, 0x13, 0x77, 0xf2, 0x8d, 0xb9, 0x47, 0xd0,
+ 0x53, 0xdd, 0xc8, 0x91, 0x83, 0x8b, 0xb1, 0xce,
+ 0xa3, 0xfe, 0xcd, 0xd9, 0xdd, 0x92, 0x7b, 0xdb,
+ 0xb8, 0xfb, 0xc9, 0x2d, 0x01, 0x59, 0x39, 0x52,
+ 0xad, 0x1b, 0xec, 0xcf, 0xd7, 0x70, 0x13, 0x21,
+ 0xf5, 0x47, 0xaa, 0x18, 0x21, 0x5c, 0xc9, 0x9a,
+ 0xd2, 0x6b, 0x05, 0x9c, 0x01, 0xa1, 0xda, 0x35,
+ 0x5d, 0xb3, 0x70, 0xe6, 0xa9, 0x80, 0x8b, 0x91,
+ 0xb7, 0xb3, 0x5f, 0x24, 0x9a, 0xb7, 0xd1, 0x6b,
+ 0xa1, 0x1c, 0x50, 0xba, 0x49, 0xe0, 0xee, 0x2e,
+ 0x75, 0xac, 0x69, 0xc0, 0xeb, 0x03, 0xdd, 0x19,
+ 0xe5, 0xf6, 0x06, 0xdd, 0xc3, 0xd7, 0x2b, 0x07,
+ 0x07, 0x30, 0xa7, 0x19, 0x0c, 0xbf, 0xe6, 0x18,
+ 0xcc, 0xb1, 0x01, 0x11, 0x85, 0x77, 0x1d, 0x96,
+ 0xa7, 0xa3, 0x00, 0x84, 0x02, 0xa2, 0x83, 0x68,
+ 0xda, 0x17, 0x27, 0xc8, 0x7f, 0x23, 0xb7, 0xf4,
+ 0x13, 0x85, 0xcf, 0xdd, 0x7a, 0x7d, 0x24, 0x57,
+ 0xfe, 0x05, 0x93, 0xf5, 0x74, 0xce, 0xed, 0x0c,
+ 0x20, 0x98, 0x8d, 0x92, 0x30, 0xa1, 0x29, 0x23,
+ 0x1a, 0xa0, 0x4f, 0x69, 0x56, 0x4c, 0xe1, 0xc8,
+ 0xce, 0xf6, 0x9a, 0x0c, 0xa4, 0xfa, 0x04, 0xf6,
+ 0x62, 0x95, 0xf2, 0xfa, 0xc7, 0x40, 0x68, 0x40,
+ 0x8f, 0x41, 0xda, 0xb4, 0x26, 0x6f, 0x70, 0xab,
+ 0x40, 0x61, 0xa4, 0x0e, 0x75, 0xfb, 0x86, 0xeb,
+ 0x9d, 0x9a, 0x1f, 0xec, 0x76, 0x99, 0xe7, 0xea,
+ 0xaa, 0x1e, 0x2d, 0xb5, 0xd4, 0xa6, 0x1a, 0xb8,
+ 0x61, 0x0a, 0x1d, 0x16, 0x5b, 0x98, 0xc2, 0x31,
+ 0x40, 0xe7, 0x23, 0x1d, 0x66, 0x99, 0xc8, 0xc0,
+ 0xd7, 0xce, 0xf3, 0x57, 0x40, 0x04, 0x3f, 0xfc,
+ 0xea, 0xb3, 0xfc, 0xd2, 0xd3, 0x99, 0xa4, 0x94,
+ 0x69, 0xa0, 0xef, 0xd1, 0x85, 0xb3, 0xa6, 0xb1,
+ 0x28, 0xbf, 0x94, 0x67, 0x22, 0xc3, 0x36, 0x46,
+ 0xf8, 0xd2, 0x0f, 0x5f, 0xf4, 0x59, 0x80, 0xe6,
+ 0x2d, 0x43, 0x08, 0x7d, 0x19, 0x09, 0x97, 0xa7,
+ 0x4c, 0x3d, 0x8d, 0xba, 0x65, 0x62, 0xa3, 0x71,
+ 0x33, 0x29, 0x62, 0xdb, 0xc1, 0x33, 0x34, 0x1a,
+ 0x63, 0x33, 0x16, 0xb6, 0x64, 0x7e, 0xab, 0x33,
+ 0xf0, 0xe6, 0x26, 0x68, 0xba, 0x1d, 0x2e, 0x38,
+ 0x08, 0xe6, 0x02, 0xd3, 0x25, 0x2c, 0x47, 0x23,
+ 0x58, 0x34, 0x0f, 0x9d, 0x63, 0x4f, 0x63, 0xbb,
+ 0x7f, 0x3b, 0x34, 0x38, 0xa7, 0xb5, 0x8d, 0x65,
+ 0xd9, 0x9f, 0x79, 0x55, 0x3e, 0x4d, 0xe7, 0x73,
+ 0xd8, 0xf6, 0x98, 0x97, 0x84, 0x60, 0x9c, 0xc8,
+ 0xa9, 0x3c, 0xf6, 0xdc, 0x12, 0x5c, 0xe1, 0xbb,
+ 0x0b, 0x8b, 0x98, 0x9c, 0x9d, 0x26, 0x7c, 0x4a,
+ 0xe6, 0x46, 0x36, 0x58, 0x21, 0x4a, 0xee, 0xca,
+ 0xd7, 0x3b, 0xc2, 0x6c, 0x49, 0x2f, 0xe5, 0xd5,
+ 0x03, 0x59, 0x84, 0x53, 0xcb, 0xfe, 0x92, 0x71,
+ 0x2e, 0x7c, 0x21, 0xcc, 0x99, 0x85, 0x7f, 0xb8,
+ 0x74, 0x90, 0x13, 0x42, 0x3f, 0xe0, 0x6b, 0x1d,
+ 0xf2, 0x4d, 0x54, 0xd4, 0xfc, 0x3a, 0x05, 0xe6,
+ 0x74, 0xaf, 0xa6, 0xa0, 0x2a, 0x20, 0x23, 0x5d,
+ 0x34, 0x5c, 0xd9, 0x3e, 0x4e, 0xfa, 0x93, 0xe7,
+ 0xaa, 0xe9, 0x6f, 0x08, 0x43, 0x67, 0x41, 0xc5,
+ 0xad, 0xfb, 0x31, 0x95, 0x82, 0x73, 0x32, 0xd8,
+ 0xa6, 0xa3, 0xed, 0x0e, 0x2d, 0xf6, 0x5f, 0xfd,
+ 0x80, 0xa6, 0x7a, 0xe0, 0xdf, 0x78, 0x15, 0x29,
+ 0x74, 0x33, 0xd0, 0x9e, 0x83, 0x86, 0x72, 0x22,
+ 0x57, 0x29, 0xb9, 0x9e, 0x5d, 0xd3, 0x1a, 0xb5,
+ 0x96, 0x72, 0x41, 0x3d, 0xf1, 0x64, 0x43, 0x67,
+ 0xee, 0xaa, 0x5c, 0xd3, 0x9a, 0x96, 0x13, 0x11,
+ 0x5d, 0xf3, 0x0c, 0x87, 0x82, 0x1e, 0x41, 0x9e,
+ 0xd0, 0x27, 0xd7, 0x54, 0x3b, 0x67, 0x73, 0x09,
+ 0x91, 0xe9, 0xd5, 0x36, 0xa7, 0xb5, 0x55, 0xe4,
+ 0xf3, 0x21, 0x51, 0x49, 0x22, 0x07, 0x55, 0x4f,
+ 0x44, 0x4b, 0xd2, 0x15, 0x93, 0x17, 0x2a, 0xfa,
+ 0x4d, 0x4a, 0x57, 0xdb, 0x4c, 0xa6, 0xeb, 0xec,
+ 0x53, 0x25, 0x6c, 0x21, 0xed, 0x00, 0x4c, 0x3b,
+ 0xca, 0x14, 0x57, 0xa9, 0xd6, 0x6a, 0xcd, 0x8d,
+ 0x5e, 0x74, 0xac, 0x72, 0xc1, 0x97, 0xe5, 0x1b,
+ 0x45, 0x4e, 0xda, 0xfc, 0xcc, 0x40, 0xe8, 0x48,
+ 0x88, 0x0b, 0xa3, 0xe3, 0x8d, 0x83, 0x42, 0xc3,
+ 0x23, 0xfd, 0x68, 0xb5, 0x8e, 0xf1, 0x9d, 0x63,
+ 0x77, 0xe9, 0xa3, 0x8e, 0x8c, 0x26, 0x6b, 0xbd,
+ 0x72, 0x73, 0x35, 0x0c, 0x03, 0xf8, 0x43, 0x78,
+ 0x52, 0x71, 0x15, 0x1f, 0x71, 0x5d, 0x6e, 0xed,
+ 0xb9, 0xcc, 0x86, 0x30, 0xdb, 0x2b, 0xd3, 0x82,
+ 0x88, 0x23, 0x71, 0x90, 0x53, 0x5c, 0xa9, 0x2f,
+ 0x76, 0x01, 0xb7, 0x9a, 0xfe, 0x43, 0x55, 0xa3,
+ 0x04, 0x9b, 0x0e, 0xe4, 0x59, 0xdf, 0xc9, 0xe9,
+ 0xb1, 0xea, 0x29, 0x28, 0x3c, 0x5c, 0xae, 0x72,
+ 0x84, 0xb6, 0xc6, 0xeb, 0x0c, 0x27, 0x07, 0x74,
+ 0x90, 0x0d, 0x31, 0xb0, 0x00, 0x77, 0xe9, 0x40,
+ 0x70, 0x6f, 0x68, 0xa7, 0xfd, 0x06, 0xec, 0x4b,
+ 0xc0, 0xb7, 0xac, 0xbc, 0x33, 0xb7, 0x6d, 0x0a,
+ 0xbd, 0x12, 0x1b, 0x59, 0xcb, 0xdd, 0x32, 0xf5,
+ 0x1d, 0x94, 0x57, 0x76, 0x9e, 0x0c, 0x18, 0x98,
+ 0x71, 0xd7, 0x2a, 0xdb, 0x0b, 0x7b, 0xa7, 0x71,
+ 0xb7, 0x67, 0x81, 0x23, 0x96, 0xae, 0xb9, 0x7e,
+ 0x32, 0x43, 0x92, 0x8a, 0x19, 0xa0, 0xc4, 0xd4,
+ 0x3b, 0x57, 0xf9, 0x4a, 0x2c, 0xfb, 0x51, 0x46,
+ 0xbb, 0xcb, 0x5d, 0xb3, 0xef, 0x13, 0x93, 0x6e,
+ 0x68, 0x42, 0x54, 0x57, 0xd3, 0x6a, 0x3a, 0x8f,
+ 0x9d, 0x66, 0xbf, 0xbd, 0x36, 0x23, 0xf5, 0x93,
+ 0x83, 0x7b, 0x9c, 0xc0, 0xdd, 0xc5, 0x49, 0xc0,
+ 0x64, 0xed, 0x07, 0x12, 0xb3, 0xe6, 0xe4, 0xe5,
+ 0x38, 0x95, 0x23, 0xb1, 0xa0, 0x3b, 0x1a, 0x61,
+ 0xda, 0x17, 0xac, 0xc3, 0x58, 0xdd, 0x74, 0x64,
+ 0x22, 0x11, 0xe8, 0x32, 0x1d, 0x16, 0x93, 0x85,
+ 0x99, 0xa5, 0x9c, 0x34, 0x55, 0xb1, 0xe9, 0x20,
+ 0x72, 0xc9, 0x28, 0x7b, 0x79, 0x00, 0xa1, 0xa6,
+ 0xa3, 0x27, 0x40, 0x18, 0x8a, 0x54, 0xe0, 0xcc,
+ 0xe8, 0x4e, 0x8e, 0x43, 0x96, 0xe7, 0x3f, 0xc8,
+ 0xe9, 0xb2, 0xf9, 0xc9, 0xda, 0x04, 0x71, 0x50,
+ 0x47, 0xe4, 0xaa, 0xce, 0xa2, 0x30, 0xc8, 0xe4,
+ 0xac, 0xc7, 0x0d, 0x06, 0x2e, 0xe6, 0xe8, 0x80,
+ 0x36, 0x29, 0x9e, 0x01, 0xb8, 0xc3, 0xf0, 0xa0,
+ 0x5d, 0x7a, 0xca, 0x4d, 0xa0, 0x57, 0xbd, 0x2a,
+ 0x45, 0xa7, 0x7f, 0x9c, 0x93, 0x07, 0x8f, 0x35,
+ 0x67, 0x92, 0xe3, 0xe9, 0x7f, 0xa8, 0x61, 0x43,
+ 0x9e, 0x25, 0x4f, 0x33, 0x76, 0x13, 0x6e, 0x12,
+ 0xb9, 0xdd, 0xa4, 0x7c, 0x08, 0x9f, 0x7c, 0xe7,
+ 0x0a, 0x8d, 0x84, 0x06, 0xa4, 0x33, 0x17, 0x34,
+ 0x5e, 0x10, 0x7c, 0xc0, 0xa8, 0x3d, 0x1f, 0x42,
+ 0x20, 0x51, 0x65, 0x5d, 0x09, 0xc3, 0xaa, 0xc0,
+ 0xc8, 0x0d, 0xf0, 0x79, 0xbc, 0x20, 0x1b, 0x95,
+ 0xe7, 0x06, 0x7d, 0x47, 0x20, 0x03, 0x1a, 0x74,
+ 0xdd, 0xe2, 0xd4, 0xae, 0x38, 0x71, 0x9b, 0xf5,
+ 0x80, 0xec, 0x08, 0x4e, 0x56, 0xba, 0x76, 0x12,
+ 0x1a, 0xdf, 0x48, 0xf3, 0xae, 0xb3, 0xe6, 0xe6,
+ 0xbe, 0xc0, 0x91, 0x2e, 0x01, 0xb3, 0x01, 0x86,
+ 0xa2, 0xb9, 0x52, 0xd1, 0x21, 0xae, 0xd4, 0x97,
+ 0x1d, 0xef, 0x41, 0x12, 0x95, 0x3d, 0x48, 0x45,
+ 0x1c, 0x56, 0x32, 0x8f, 0xb8, 0x43, 0xbb, 0x19,
+ 0xf3, 0xca, 0xe9, 0xeb, 0x6d, 0x84, 0xbe, 0x86,
+ 0x06, 0xe2, 0x36, 0xb2, 0x62, 0x9d, 0xd3, 0x4c,
+ 0x48, 0x18, 0x54, 0x13, 0x4e, 0xcf, 0xfd, 0xba,
+ 0x84, 0xb9, 0x30, 0x53, 0xcf, 0xfb, 0xb9, 0x29,
+ 0x8f, 0xdc, 0x9f, 0xef, 0x60, 0x0b, 0x64, 0xf6,
+ 0x8b, 0xee, 0xa6, 0x91, 0xc2, 0x41, 0x6c, 0xf6,
+ 0xfa, 0x79, 0x67, 0x4b, 0xc1, 0x3f, 0xaf, 0x09,
+ 0x81, 0xd4, 0x5d, 0xcb, 0x09, 0xdf, 0x36, 0x31,
+ 0xc0, 0x14, 0x3c, 0x7c, 0x0e, 0x65, 0x95, 0x99,
+ 0x6d, 0xa3, 0xf4, 0xd7, 0x38, 0xee, 0x1a, 0x2b,
+ 0x37, 0xe2, 0xa4, 0x3b, 0x4b, 0xd0, 0x65, 0xca,
+ 0xf8, 0xc3, 0xe8, 0x15, 0x20, 0xef, 0xf2, 0x00,
+ 0xfd, 0x01, 0x09, 0xc5, 0xc8, 0x17, 0x04, 0x93,
+ 0xd0, 0x93, 0x03, 0x55, 0xc5, 0xfe, 0x32, 0xa3,
+ 0x3e, 0x28, 0x2d, 0x3b, 0x93, 0x8a, 0xcc, 0x07,
+ 0x72, 0x80, 0x8b, 0x74, 0x16, 0x24, 0xbb, 0xda,
+ 0x94, 0x39, 0x30, 0x8f, 0xb1, 0xcd, 0x4a, 0x90,
+ 0x92, 0x7c, 0x14, 0x8f, 0x95, 0x4e, 0xac, 0x9b,
+ 0xd8, 0x8f, 0x1a, 0x87, 0xa4, 0x32, 0x27, 0x8a,
+ 0xba, 0xf7, 0x41, 0xcf, 0x84, 0x37, 0x19, 0xe6,
+ 0x06, 0xf5, 0x0e, 0xcf, 0x36, 0xf5, 0x9e, 0x6c,
+ 0xde, 0xbc, 0xff, 0x64, 0x7e, 0x4e, 0x59, 0x57,
+ 0x48, 0xfe, 0x14, 0xf7, 0x9c, 0x93, 0x5d, 0x15,
+ 0xad, 0xcc, 0x11, 0xb1, 0x17, 0x18, 0xb2, 0x7e,
+ 0xcc, 0xab, 0xe9, 0xce, 0x7d, 0x77, 0x5b, 0x51,
+ 0x1b, 0x1e, 0x20, 0xa8, 0x32, 0x06, 0x0e, 0x75,
+ 0x93, 0xac, 0xdb, 0x35, 0x37, 0x1f, 0xe9, 0x19,
+ 0x1d, 0xb4, 0x71, 0x97, 0xd6, 0x4e, 0x2c, 0x08,
+ 0xa5, 0x13, 0xf9, 0x0e, 0x7e, 0x78, 0x6e, 0x14,
+ 0xe0, 0xa9, 0xb9, 0x96, 0x4c, 0x80, 0x82, 0xba,
+ 0x17, 0xb3, 0x9d, 0x69, 0xb0, 0x84, 0x46, 0xff,
+ 0xf9, 0x52, 0x79, 0x94, 0x58, 0x3a, 0x62, 0x90,
+ 0x15, 0x35, 0x71, 0x10, 0x37, 0xed, 0xa1, 0x8e,
+ 0x53, 0x6e, 0xf4, 0x26, 0x57, 0x93, 0x15, 0x93,
+ 0xf6, 0x81, 0x2c, 0x5a, 0x10, 0xda, 0x92, 0xad,
+ 0x2f, 0xdb, 0x28, 0x31, 0x2d, 0x55, 0x04, 0xd2,
+ 0x06, 0x28, 0x8c, 0x1e, 0xdc, 0xea, 0x54, 0xac,
+ 0xff, 0xb7, 0x6c, 0x30, 0x15, 0xd4, 0xb4, 0x0d,
+ 0x00, 0x93, 0x57, 0xdd, 0xd2, 0x07, 0x07, 0x06,
+ 0xd9, 0x43, 0x9b, 0xcd, 0x3a, 0xf4, 0x7d, 0x4c,
+ 0x36, 0x5d, 0x23, 0xa2, 0xcc, 0x57, 0x40, 0x91,
+ 0xe9, 0x2c, 0x2f, 0x2c, 0xd5, 0x30, 0x9b, 0x17,
+ 0xb0, 0xc9, 0xf7, 0xa7, 0x2f, 0xd1, 0x93, 0x20,
+ 0x6b, 0xc6, 0xc1, 0xe4, 0x6f, 0xcb, 0xd1, 0xe7,
+ 0x09, 0x0f, 0x9e, 0xdc, 0xaa, 0x9f, 0x2f, 0xdf,
+ 0x56, 0x9f, 0xd4, 0x33, 0x04, 0xaf, 0xd3, 0x6c,
+ 0x58, 0x61, 0xf0, 0x30, 0xec, 0xf2, 0x7f, 0xf2,
+ 0x9c, 0xdf, 0x39, 0xbb, 0x6f, 0xa2, 0x8c, 0x7e,
+ 0xc4, 0x22, 0x51, 0x71, 0xc0, 0x4d, 0x14, 0x1a,
+ 0xc4, 0xcd, 0x04, 0xd9, 0x87, 0x08, 0x50, 0x05,
+ 0xcc, 0xaf, 0xf6, 0xf0, 0x8f, 0x92, 0x54, 0x58,
+ 0xc2, 0xc7, 0x09, 0x7a, 0x59, 0x02, 0x05, 0xe8,
+ 0xb0, 0x86, 0xd9, 0xbf, 0x7b, 0x35, 0x51, 0x4d,
+ 0xaf, 0x08, 0x97, 0x2c, 0x65, 0xda, 0x2a, 0x71,
+ 0x3a, 0xa8, 0x51, 0xcc, 0xf2, 0x73, 0x27, 0xc3,
+ 0xfd, 0x62, 0xcf, 0xe3, 0xb2, 0xca, 0xcb, 0xbe,
+ 0x1a, 0x0a, 0xa1, 0x34, 0x7b, 0x77, 0xc4, 0x62,
+ 0x68, 0x78, 0x5f, 0x94, 0x07, 0x04, 0x65, 0x16,
+ 0x4b, 0x61, 0xcb, 0xff, 0x75, 0x26, 0x50, 0x66,
+ 0x1f, 0x6e, 0x93, 0xf8, 0xc5, 0x51, 0xeb, 0xa4,
+ 0x4a, 0x48, 0x68, 0x6b, 0xe2, 0x5e, 0x44, 0xb2,
+ 0x50, 0x2c, 0x6c, 0xae, 0x79, 0x4e, 0x66, 0x35,
+ 0x81, 0x50, 0xac, 0xbc, 0x3f, 0xb1, 0x0c, 0xf3,
+ 0x05, 0x3c, 0x4a, 0xa3, 0x6c, 0x2a, 0x79, 0xb4,
+ 0xb7, 0xab, 0xca, 0xc7, 0x9b, 0x8e, 0xcd, 0x5f,
+ 0x11, 0x03, 0xcb, 0x30, 0xa3, 0xab, 0xda, 0xfe,
+ 0x64, 0xb9, 0xbb, 0xd8, 0x5e, 0x3a, 0x1a, 0x56,
+ 0xe5, 0x05, 0x48, 0x90, 0x1e, 0x61, 0x69, 0x1b,
+ 0x22, 0xe6, 0x1a, 0x3c, 0x75, 0xad, 0x1f, 0x37,
+ 0x28, 0xdc, 0xe4, 0x6d, 0xbd, 0x42, 0xdc, 0xd3,
+ 0xc8, 0xb6, 0x1c, 0x48, 0xfe, 0x94, 0x77, 0x7f,
+ 0xbd, 0x62, 0xac, 0xa3, 0x47, 0x27, 0xcf, 0x5f,
+ 0xd9, 0xdb, 0xaf, 0xec, 0xf7, 0x5e, 0xc1, 0xb0,
+ 0x9d, 0x01, 0x26, 0x99, 0x7e, 0x8f, 0x03, 0x70,
+ 0xb5, 0x42, 0xbe, 0x67, 0x28, 0x1b, 0x7c, 0xbd,
+ 0x61, 0x21, 0x97, 0xcc, 0x5c, 0xe1, 0x97, 0x8f,
+ 0x8d, 0xde, 0x2b, 0xaa, 0xa7, 0x71, 0x1d, 0x1e,
+ 0x02, 0x73, 0x70, 0x58, 0x32, 0x5b, 0x1d, 0x67,
+ 0x3d, 0xe0, 0x74, 0x4f, 0x03, 0xf2, 0x70, 0x51,
+ 0x79, 0xf1, 0x61, 0x70, 0x15, 0x74, 0x9d, 0x23,
+ 0x89, 0xde, 0xac, 0xfd, 0xde, 0xd0, 0x1f, 0xc3,
+ 0x87, 0x44, 0x35, 0x4b, 0xe5, 0xb0, 0x60, 0xc5,
+ 0x22, 0xe4, 0x9e, 0xca, 0xeb, 0xd5, 0x3a, 0x09,
+ 0x45, 0xa4, 0xdb, 0xfa, 0x3f, 0xeb, 0x1b, 0xc7,
+ 0xc8, 0x14, 0x99, 0x51, 0x92, 0x10, 0xed, 0xed,
+ 0x28, 0xe0, 0xa1, 0xf8, 0x26, 0xcf, 0xcd, 0xcb,
+ 0x63, 0xa1, 0x3b, 0xe3, 0xdf, 0x7e, 0xfe, 0xa6,
+ 0xf0, 0x81, 0x9a, 0xbf, 0x55, 0xde, 0x54, 0xd5,
+ 0x56, 0x60, 0x98, 0x10, 0x68, 0xf4, 0x38, 0x96,
+ 0x8e, 0x6f, 0x1d, 0x44, 0x7f, 0xd6, 0x2f, 0xfe,
+ 0x55, 0xfb, 0x0c, 0x7e, 0x67, 0xe2, 0x61, 0x44,
+ 0xed, 0xf2, 0x35, 0x30, 0x5d, 0xe9, 0xc7, 0xd6,
+ 0x6d, 0xe0, 0xa0, 0xed, 0xf3, 0xfc, 0xd8, 0x3e,
+ 0x0a, 0x7b, 0xcd, 0xaf, 0x65, 0x68, 0x18, 0xc0,
+ 0xec, 0x04, 0x1c, 0x74, 0x6d, 0xe2, 0x6e, 0x79,
+ 0xd4, 0x11, 0x2b, 0x62, 0xd5, 0x27, 0xad, 0x4f,
+ 0x01, 0x59, 0x73, 0xcc, 0x6a, 0x53, 0xfb, 0x2d,
+ 0xd5, 0x4e, 0x99, 0x21, 0x65, 0x4d, 0xf5, 0x82,
+ 0xf7, 0xd8, 0x42, 0xce, 0x6f, 0x3d, 0x36, 0x47,
+ 0xf1, 0x05, 0x16, 0xe8, 0x1b, 0x6a, 0x8f, 0x93,
+ 0xf2, 0x8f, 0x37, 0x40, 0x12, 0x28, 0xa3, 0xe6,
+ 0xb9, 0x17, 0x4a, 0x1f, 0xb1, 0xd1, 0x66, 0x69,
+ 0x86, 0xc4, 0xfc, 0x97, 0xae, 0x3f, 0x8f, 0x1e,
+ 0x2b, 0xdf, 0xcd, 0xf9, 0x3c
+};
+static const u8 dec_output011[] __initconst = {
+ 0x7a, 0x57, 0xf2, 0xc7, 0x06, 0x3f, 0x50, 0x7b,
+ 0x36, 0x1a, 0x66, 0x5c, 0xb9, 0x0e, 0x5e, 0x3b,
+ 0x45, 0x60, 0xbe, 0x9a, 0x31, 0x9f, 0xff, 0x5d,
+ 0x66, 0x34, 0xb4, 0xdc, 0xfb, 0x9d, 0x8e, 0xee,
+ 0x6a, 0x33, 0xa4, 0x07, 0x3c, 0xf9, 0x4c, 0x30,
+ 0xa1, 0x24, 0x52, 0xf9, 0x50, 0x46, 0x88, 0x20,
+ 0x02, 0x32, 0x3a, 0x0e, 0x99, 0x63, 0xaf, 0x1f,
+ 0x15, 0x28, 0x2a, 0x05, 0xff, 0x57, 0x59, 0x5e,
+ 0x18, 0xa1, 0x1f, 0xd0, 0x92, 0x5c, 0x88, 0x66,
+ 0x1b, 0x00, 0x64, 0xa5, 0x93, 0x8d, 0x06, 0x46,
+ 0xb0, 0x64, 0x8b, 0x8b, 0xef, 0x99, 0x05, 0x35,
+ 0x85, 0xb3, 0xf3, 0x33, 0xbb, 0xec, 0x66, 0xb6,
+ 0x3d, 0x57, 0x42, 0xe3, 0xb4, 0xc6, 0xaa, 0xb0,
+ 0x41, 0x2a, 0xb9, 0x59, 0xa9, 0xf6, 0x3e, 0x15,
+ 0x26, 0x12, 0x03, 0x21, 0x4c, 0x74, 0x43, 0x13,
+ 0x2a, 0x03, 0x27, 0x09, 0xb4, 0xfb, 0xe7, 0xb7,
+ 0x40, 0xff, 0x5e, 0xce, 0x48, 0x9a, 0x60, 0xe3,
+ 0x8b, 0x80, 0x8c, 0x38, 0x2d, 0xcb, 0x93, 0x37,
+ 0x74, 0x05, 0x52, 0x6f, 0x73, 0x3e, 0xc3, 0xbc,
+ 0xca, 0x72, 0x0a, 0xeb, 0xf1, 0x3b, 0xa0, 0x95,
+ 0xdc, 0x8a, 0xc4, 0xa9, 0xdc, 0xca, 0x44, 0xd8,
+ 0x08, 0x63, 0x6a, 0x36, 0xd3, 0x3c, 0xb8, 0xac,
+ 0x46, 0x7d, 0xfd, 0xaa, 0xeb, 0x3e, 0x0f, 0x45,
+ 0x8f, 0x49, 0xda, 0x2b, 0xf2, 0x12, 0xbd, 0xaf,
+ 0x67, 0x8a, 0x63, 0x48, 0x4b, 0x55, 0x5f, 0x6d,
+ 0x8c, 0xb9, 0x76, 0x34, 0x84, 0xae, 0xc2, 0xfc,
+ 0x52, 0x64, 0x82, 0xf7, 0xb0, 0x06, 0xf0, 0x45,
+ 0x73, 0x12, 0x50, 0x30, 0x72, 0xea, 0x78, 0x9a,
+ 0xa8, 0xaf, 0xb5, 0xe3, 0xbb, 0x77, 0x52, 0xec,
+ 0x59, 0x84, 0xbf, 0x6b, 0x8f, 0xce, 0x86, 0x5e,
+ 0x1f, 0x23, 0xe9, 0xfb, 0x08, 0x86, 0xf7, 0x10,
+ 0xb9, 0xf2, 0x44, 0x96, 0x44, 0x63, 0xa9, 0xa8,
+ 0x78, 0x00, 0x23, 0xd6, 0xc7, 0xe7, 0x6e, 0x66,
+ 0x4f, 0xcc, 0xee, 0x15, 0xb3, 0xbd, 0x1d, 0xa0,
+ 0xe5, 0x9c, 0x1b, 0x24, 0x2c, 0x4d, 0x3c, 0x62,
+ 0x35, 0x9c, 0x88, 0x59, 0x09, 0xdd, 0x82, 0x1b,
+ 0xcf, 0x0a, 0x83, 0x6b, 0x3f, 0xae, 0x03, 0xc4,
+ 0xb4, 0xdd, 0x7e, 0x5b, 0x28, 0x76, 0x25, 0x96,
+ 0xd9, 0xc9, 0x9d, 0x5f, 0x86, 0xfa, 0xf6, 0xd7,
+ 0xd2, 0xe6, 0x76, 0x1d, 0x0f, 0xa1, 0xdc, 0x74,
+ 0x05, 0x1b, 0x1d, 0xe0, 0xcd, 0x16, 0xb0, 0xa8,
+ 0x8a, 0x34, 0x7b, 0x15, 0x11, 0x77, 0xe5, 0x7b,
+ 0x7e, 0x20, 0xf7, 0xda, 0x38, 0xda, 0xce, 0x70,
+ 0xe9, 0xf5, 0x6c, 0xd9, 0xbe, 0x0c, 0x4c, 0x95,
+ 0x4c, 0xc2, 0x9b, 0x34, 0x55, 0x55, 0xe1, 0xf3,
+ 0x46, 0x8e, 0x48, 0x74, 0x14, 0x4f, 0x9d, 0xc9,
+ 0xf5, 0xe8, 0x1a, 0xf0, 0x11, 0x4a, 0xc1, 0x8d,
+ 0xe0, 0x93, 0xa0, 0xbe, 0x09, 0x1c, 0x2b, 0x4e,
+ 0x0f, 0xb2, 0x87, 0x8b, 0x84, 0xfe, 0x92, 0x32,
+ 0x14, 0xd7, 0x93, 0xdf, 0xe7, 0x44, 0xbc, 0xc5,
+ 0xae, 0x53, 0x69, 0xd8, 0xb3, 0x79, 0x37, 0x80,
+ 0xe3, 0x17, 0x5c, 0xec, 0x53, 0x00, 0x9a, 0xe3,
+ 0x8e, 0xdc, 0x38, 0xb8, 0x66, 0xf0, 0xd3, 0xad,
+ 0x1d, 0x02, 0x96, 0x86, 0x3e, 0x9d, 0x3b, 0x5d,
+ 0xa5, 0x7f, 0x21, 0x10, 0xf1, 0x1f, 0x13, 0x20,
+ 0xf9, 0x57, 0x87, 0x20, 0xf5, 0x5f, 0xf1, 0x17,
+ 0x48, 0x0a, 0x51, 0x5a, 0xcd, 0x19, 0x03, 0xa6,
+ 0x5a, 0xd1, 0x12, 0x97, 0xe9, 0x48, 0xe2, 0x1d,
+ 0x83, 0x75, 0x50, 0xd9, 0x75, 0x7d, 0x6a, 0x82,
+ 0xa1, 0xf9, 0x4e, 0x54, 0x87, 0x89, 0xc9, 0x0c,
+ 0xb7, 0x5b, 0x6a, 0x91, 0xc1, 0x9c, 0xb2, 0xa9,
+ 0xdc, 0x9a, 0xa4, 0x49, 0x0a, 0x6d, 0x0d, 0xbb,
+ 0xde, 0x86, 0x44, 0xdd, 0x5d, 0x89, 0x2b, 0x96,
+ 0x0f, 0x23, 0x95, 0xad, 0xcc, 0xa2, 0xb3, 0xb9,
+ 0x7e, 0x74, 0x38, 0xba, 0x9f, 0x73, 0xae, 0x5f,
+ 0xf8, 0x68, 0xa2, 0xe0, 0xa9, 0xce, 0xbd, 0x40,
+ 0xd4, 0x4c, 0x6b, 0xd2, 0x56, 0x62, 0xb0, 0xcc,
+ 0x63, 0x7e, 0x5b, 0xd3, 0xae, 0xd1, 0x75, 0xce,
+ 0xbb, 0xb4, 0x5b, 0xa8, 0xf8, 0xb4, 0xac, 0x71,
+ 0x75, 0xaa, 0xc9, 0x9f, 0xbb, 0x6c, 0xad, 0x0f,
+ 0x55, 0x5d, 0xe8, 0x85, 0x7d, 0xf9, 0x21, 0x35,
+ 0xea, 0x92, 0x85, 0x2b, 0x00, 0xec, 0x84, 0x90,
+ 0x0a, 0x63, 0x96, 0xe4, 0x6b, 0xa9, 0x77, 0xb8,
+ 0x91, 0xf8, 0x46, 0x15, 0x72, 0x63, 0x70, 0x01,
+ 0x40, 0xa3, 0xa5, 0x76, 0x62, 0x2b, 0xbf, 0xf1,
+ 0xe5, 0x8d, 0x9f, 0xa3, 0xfa, 0x9b, 0x03, 0xbe,
+ 0xfe, 0x65, 0x6f, 0xa2, 0x29, 0x0d, 0x54, 0xb4,
+ 0x71, 0xce, 0xa9, 0xd6, 0x3d, 0x88, 0xf9, 0xaf,
+ 0x6b, 0xa8, 0x9e, 0xf4, 0x16, 0x96, 0x36, 0xb9,
+ 0x00, 0xdc, 0x10, 0xab, 0xb5, 0x08, 0x31, 0x1f,
+ 0x00, 0xb1, 0x3c, 0xd9, 0x38, 0x3e, 0xc6, 0x04,
+ 0xa7, 0x4e, 0xe8, 0xae, 0xed, 0x98, 0xc2, 0xf7,
+ 0xb9, 0x00, 0x5f, 0x8c, 0x60, 0xd1, 0xe5, 0x15,
+ 0xf7, 0xae, 0x1e, 0x84, 0x88, 0xd1, 0xf6, 0xbc,
+ 0x3a, 0x89, 0x35, 0x22, 0x83, 0x7c, 0xca, 0xf0,
+ 0x33, 0x82, 0x4c, 0x79, 0x3c, 0xfd, 0xb1, 0xae,
+ 0x52, 0x62, 0x55, 0xd2, 0x41, 0x60, 0xc6, 0xbb,
+ 0xfa, 0x0e, 0x59, 0xd6, 0xa8, 0xfe, 0x5d, 0xed,
+ 0x47, 0x3d, 0xe0, 0xea, 0x1f, 0x6e, 0x43, 0x51,
+ 0xec, 0x10, 0x52, 0x56, 0x77, 0x42, 0x6b, 0x52,
+ 0x87, 0xd8, 0xec, 0xe0, 0xaa, 0x76, 0xa5, 0x84,
+ 0x2a, 0x22, 0x24, 0xfd, 0x92, 0x40, 0x88, 0xd5,
+ 0x85, 0x1c, 0x1f, 0x6b, 0x47, 0xa0, 0xc4, 0xe4,
+ 0xef, 0xf4, 0xea, 0xd7, 0x59, 0xac, 0x2a, 0x9e,
+ 0x8c, 0xfa, 0x1f, 0x42, 0x08, 0xfe, 0x4f, 0x74,
+ 0xa0, 0x26, 0xf5, 0xb3, 0x84, 0xf6, 0x58, 0x5f,
+ 0x26, 0x66, 0x3e, 0xd7, 0xe4, 0x22, 0x91, 0x13,
+ 0xc8, 0xac, 0x25, 0x96, 0x23, 0xd8, 0x09, 0xea,
+ 0x45, 0x75, 0x23, 0xb8, 0x5f, 0xc2, 0x90, 0x8b,
+ 0x09, 0xc4, 0xfc, 0x47, 0x6c, 0x6d, 0x0a, 0xef,
+ 0x69, 0xa4, 0x38, 0x19, 0xcf, 0x7d, 0xf9, 0x09,
+ 0x73, 0x9b, 0x60, 0x5a, 0xf7, 0x37, 0xb5, 0xfe,
+ 0x9f, 0xe3, 0x2b, 0x4c, 0x0d, 0x6e, 0x19, 0xf1,
+ 0xd6, 0xc0, 0x70, 0xf3, 0x9d, 0x22, 0x3c, 0xf9,
+ 0x49, 0xce, 0x30, 0x8e, 0x44, 0xb5, 0x76, 0x15,
+ 0x8f, 0x52, 0xfd, 0xa5, 0x04, 0xb8, 0x55, 0x6a,
+ 0x36, 0x59, 0x7c, 0xc4, 0x48, 0xb8, 0xd7, 0xab,
+ 0x05, 0x66, 0xe9, 0x5e, 0x21, 0x6f, 0x6b, 0x36,
+ 0x29, 0xbb, 0xe9, 0xe3, 0xa2, 0x9a, 0xa8, 0xcd,
+ 0x55, 0x25, 0x11, 0xba, 0x5a, 0x58, 0xa0, 0xde,
+ 0xae, 0x19, 0x2a, 0x48, 0x5a, 0xff, 0x36, 0xcd,
+ 0x6d, 0x16, 0x7a, 0x73, 0x38, 0x46, 0xe5, 0x47,
+ 0x59, 0xc8, 0xa2, 0xf6, 0xe2, 0x6c, 0x83, 0xc5,
+ 0x36, 0x2c, 0x83, 0x7d, 0xb4, 0x01, 0x05, 0x69,
+ 0xe7, 0xaf, 0x5c, 0xc4, 0x64, 0x82, 0x12, 0x21,
+ 0xef, 0xf7, 0xd1, 0x7d, 0xb8, 0x8d, 0x8c, 0x98,
+ 0x7c, 0x5f, 0x7d, 0x92, 0x88, 0xb9, 0x94, 0x07,
+ 0x9c, 0xd8, 0xe9, 0x9c, 0x17, 0x38, 0xe3, 0x57,
+ 0x6c, 0xe0, 0xdc, 0xa5, 0x92, 0x42, 0xb3, 0xbd,
+ 0x50, 0xa2, 0x7e, 0xb5, 0xb1, 0x52, 0x72, 0x03,
+ 0x97, 0xd8, 0xaa, 0x9a, 0x1e, 0x75, 0x41, 0x11,
+ 0xa3, 0x4f, 0xcc, 0xd4, 0xe3, 0x73, 0xad, 0x96,
+ 0xdc, 0x47, 0x41, 0x9f, 0xb0, 0xbe, 0x79, 0x91,
+ 0xf5, 0xb6, 0x18, 0xfe, 0xc2, 0x83, 0x18, 0x7d,
+ 0x73, 0xd9, 0x4f, 0x83, 0x84, 0x03, 0xb3, 0xf0,
+ 0x77, 0x66, 0x3d, 0x83, 0x63, 0x2e, 0x2c, 0xf9,
+ 0xdd, 0xa6, 0x1f, 0x89, 0x82, 0xb8, 0x23, 0x42,
+ 0xeb, 0xe2, 0xca, 0x70, 0x82, 0x61, 0x41, 0x0a,
+ 0x6d, 0x5f, 0x75, 0xc5, 0xe2, 0xc4, 0x91, 0x18,
+ 0x44, 0x22, 0xfa, 0x34, 0x10, 0xf5, 0x20, 0xdc,
+ 0xb7, 0xdd, 0x2a, 0x20, 0x77, 0xf5, 0xf9, 0xce,
+ 0xdb, 0xa0, 0x0a, 0x52, 0x2a, 0x4e, 0xdd, 0xcc,
+ 0x97, 0xdf, 0x05, 0xe4, 0x5e, 0xb7, 0xaa, 0xf0,
+ 0xe2, 0x80, 0xff, 0xba, 0x1a, 0x0f, 0xac, 0xdf,
+ 0x02, 0x32, 0xe6, 0xf7, 0xc7, 0x17, 0x13, 0xb7,
+ 0xfc, 0x98, 0x48, 0x8c, 0x0d, 0x82, 0xc9, 0x80,
+ 0x7a, 0xe2, 0x0a, 0xc5, 0xb4, 0xde, 0x7c, 0x3c,
+ 0x79, 0x81, 0x0e, 0x28, 0x65, 0x79, 0x67, 0x82,
+ 0x69, 0x44, 0x66, 0x09, 0xf7, 0x16, 0x1a, 0xf9,
+ 0x7d, 0x80, 0xa1, 0x79, 0x14, 0xa9, 0xc8, 0x20,
+ 0xfb, 0xa2, 0x46, 0xbe, 0x08, 0x35, 0x17, 0x58,
+ 0xc1, 0x1a, 0xda, 0x2a, 0x6b, 0x2e, 0x1e, 0xe6,
+ 0x27, 0x55, 0x7b, 0x19, 0xe2, 0xfb, 0x64, 0xfc,
+ 0x5e, 0x15, 0x54, 0x3c, 0xe7, 0xc2, 0x11, 0x50,
+ 0x30, 0xb8, 0x72, 0x03, 0x0b, 0x1a, 0x9f, 0x86,
+ 0x27, 0x11, 0x5c, 0x06, 0x2b, 0xbd, 0x75, 0x1a,
+ 0x0a, 0xda, 0x01, 0xfa, 0x5c, 0x4a, 0xc1, 0x80,
+ 0x3a, 0x6e, 0x30, 0xc8, 0x2c, 0xeb, 0x56, 0xec,
+ 0x89, 0xfa, 0x35, 0x7b, 0xb2, 0xf0, 0x97, 0x08,
+ 0x86, 0x53, 0xbe, 0xbd, 0x40, 0x41, 0x38, 0x1c,
+ 0xb4, 0x8b, 0x79, 0x2e, 0x18, 0x96, 0x94, 0xde,
+ 0xe8, 0xca, 0xe5, 0x9f, 0x92, 0x9f, 0x15, 0x5d,
+ 0x56, 0x60, 0x5c, 0x09, 0xf9, 0x16, 0xf4, 0x17,
+ 0x0f, 0xf6, 0x4c, 0xda, 0xe6, 0x67, 0x89, 0x9f,
+ 0xca, 0x6c, 0xe7, 0x9b, 0x04, 0x62, 0x0e, 0x26,
+ 0xa6, 0x52, 0xbd, 0x29, 0xff, 0xc7, 0xa4, 0x96,
+ 0xe6, 0x6a, 0x02, 0xa5, 0x2e, 0x7b, 0xfe, 0x97,
+ 0x68, 0x3e, 0x2e, 0x5f, 0x3b, 0x0f, 0x36, 0xd6,
+ 0x98, 0x19, 0x59, 0x48, 0xd2, 0xc6, 0xe1, 0x55,
+ 0x1a, 0x6e, 0xd6, 0xed, 0x2c, 0xba, 0xc3, 0x9e,
+ 0x64, 0xc9, 0x95, 0x86, 0x35, 0x5e, 0x3e, 0x88,
+ 0x69, 0x99, 0x4b, 0xee, 0xbe, 0x9a, 0x99, 0xb5,
+ 0x6e, 0x58, 0xae, 0xdd, 0x22, 0xdb, 0xdd, 0x6b,
+ 0xfc, 0xaf, 0x90, 0xa3, 0x3d, 0xa4, 0xc1, 0x15,
+ 0x92, 0x18, 0x8d, 0xd2, 0x4b, 0x7b, 0x06, 0xd1,
+ 0x37, 0xb5, 0xe2, 0x7c, 0x2c, 0xf0, 0x25, 0xe4,
+ 0x94, 0x2a, 0xbd, 0xe3, 0x82, 0x70, 0x78, 0xa3,
+ 0x82, 0x10, 0x5a, 0x90, 0xd7, 0xa4, 0xfa, 0xaf,
+ 0x1a, 0x88, 0x59, 0xdc, 0x74, 0x12, 0xb4, 0x8e,
+ 0xd7, 0x19, 0x46, 0xf4, 0x84, 0x69, 0x9f, 0xbb,
+ 0x70, 0xa8, 0x4c, 0x52, 0x81, 0xa9, 0xff, 0x76,
+ 0x1c, 0xae, 0xd8, 0x11, 0x3d, 0x7f, 0x7d, 0xc5,
+ 0x12, 0x59, 0x28, 0x18, 0xc2, 0xa2, 0xb7, 0x1c,
+ 0x88, 0xf8, 0xd6, 0x1b, 0xa6, 0x7d, 0x9e, 0xde,
+ 0x29, 0xf8, 0xed, 0xff, 0xeb, 0x92, 0x24, 0x4f,
+ 0x05, 0xaa, 0xd9, 0x49, 0xba, 0x87, 0x59, 0x51,
+ 0xc9, 0x20, 0x5c, 0x9b, 0x74, 0xcf, 0x03, 0xd9,
+ 0x2d, 0x34, 0xc7, 0x5b, 0xa5, 0x40, 0xb2, 0x99,
+ 0xf5, 0xcb, 0xb4, 0xf6, 0xb7, 0x72, 0x4a, 0xd6,
+ 0xbd, 0xb0, 0xf3, 0x93, 0xe0, 0x1b, 0xa8, 0x04,
+ 0x1e, 0x35, 0xd4, 0x80, 0x20, 0xf4, 0x9c, 0x31,
+ 0x6b, 0x45, 0xb9, 0x15, 0xb0, 0x5e, 0xdd, 0x0a,
+ 0x33, 0x9c, 0x83, 0xcd, 0x58, 0x89, 0x50, 0x56,
+ 0xbb, 0x81, 0x00, 0x91, 0x32, 0xf3, 0x1b, 0x3e,
+ 0xcf, 0x45, 0xe1, 0xf9, 0xe1, 0x2c, 0x26, 0x78,
+ 0x93, 0x9a, 0x60, 0x46, 0xc9, 0xb5, 0x5e, 0x6a,
+ 0x28, 0x92, 0x87, 0x3f, 0x63, 0x7b, 0xdb, 0xf7,
+ 0xd0, 0x13, 0x9d, 0x32, 0x40, 0x5e, 0xcf, 0xfb,
+ 0x79, 0x68, 0x47, 0x4c, 0xfd, 0x01, 0x17, 0xe6,
+ 0x97, 0x93, 0x78, 0xbb, 0xa6, 0x27, 0xa3, 0xe8,
+ 0x1a, 0xe8, 0x94, 0x55, 0x7d, 0x08, 0xe5, 0xdc,
+ 0x66, 0xa3, 0x69, 0xc8, 0xca, 0xc5, 0xa1, 0x84,
+ 0x55, 0xde, 0x08, 0x91, 0x16, 0x3a, 0x0c, 0x86,
+ 0xab, 0x27, 0x2b, 0x64, 0x34, 0x02, 0x6c, 0x76,
+ 0x8b, 0xc6, 0xaf, 0xcc, 0xe1, 0xd6, 0x8c, 0x2a,
+ 0x18, 0x3d, 0xa6, 0x1b, 0x37, 0x75, 0x45, 0x73,
+ 0xc2, 0x75, 0xd7, 0x53, 0x78, 0x3a, 0xd6, 0xe8,
+ 0x29, 0xd2, 0x4a, 0xa8, 0x1e, 0x82, 0xf6, 0xb6,
+ 0x81, 0xde, 0x21, 0xed, 0x2b, 0x56, 0xbb, 0xf2,
+ 0xd0, 0x57, 0xc1, 0x7c, 0xd2, 0x6a, 0xd2, 0x56,
+ 0xf5, 0x13, 0x5f, 0x1c, 0x6a, 0x0b, 0x74, 0xfb,
+ 0xe9, 0xfe, 0x9e, 0xea, 0x95, 0xb2, 0x46, 0xab,
+ 0x0a, 0xfc, 0xfd, 0xf3, 0xbb, 0x04, 0x2b, 0x76,
+ 0x1b, 0xa4, 0x74, 0xb0, 0xc1, 0x78, 0xc3, 0x69,
+ 0xe2, 0xb0, 0x01, 0xe1, 0xde, 0x32, 0x4c, 0x8d,
+ 0x1a, 0xb3, 0x38, 0x08, 0xd5, 0xfc, 0x1f, 0xdc,
+ 0x0e, 0x2c, 0x9c, 0xb1, 0xa1, 0x63, 0x17, 0x22,
+ 0xf5, 0x6c, 0x93, 0x70, 0x74, 0x00, 0xf8, 0x39,
+ 0x01, 0x94, 0xd1, 0x32, 0x23, 0x56, 0x5d, 0xa6,
+ 0x02, 0x76, 0x76, 0x93, 0xce, 0x2f, 0x19, 0xe9,
+ 0x17, 0x52, 0xae, 0x6e, 0x2c, 0x6d, 0x61, 0x7f,
+ 0x3b, 0xaa, 0xe0, 0x52, 0x85, 0xc5, 0x65, 0xc1,
+ 0xbb, 0x8e, 0x5b, 0x21, 0xd5, 0xc9, 0x78, 0x83,
+ 0x07, 0x97, 0x4c, 0x62, 0x61, 0x41, 0xd4, 0xfc,
+ 0xc9, 0x39, 0xe3, 0x9b, 0xd0, 0xcc, 0x75, 0xc4,
+ 0x97, 0xe6, 0xdd, 0x2a, 0x5f, 0xa6, 0xe8, 0x59,
+ 0x6c, 0x98, 0xb9, 0x02, 0xe2, 0xa2, 0xd6, 0x68,
+ 0xee, 0x3b, 0x1d, 0xe3, 0x4d, 0x5b, 0x30, 0xef,
+ 0x03, 0xf2, 0xeb, 0x18, 0x57, 0x36, 0xe8, 0xa1,
+ 0xf4, 0x47, 0xfb, 0xcb, 0x8f, 0xcb, 0xc8, 0xf3,
+ 0x4f, 0x74, 0x9d, 0x9d, 0xb1, 0x8d, 0x14, 0x44,
+ 0xd9, 0x19, 0xb4, 0x54, 0x4f, 0x75, 0x19, 0x09,
+ 0xa0, 0x75, 0xbc, 0x3b, 0x82, 0xc6, 0x3f, 0xb8,
+ 0x83, 0x19, 0x6e, 0xd6, 0x37, 0xfe, 0x6e, 0x8a,
+ 0x4e, 0xe0, 0x4a, 0xab, 0x7b, 0xc8, 0xb4, 0x1d,
+ 0xf4, 0xed, 0x27, 0x03, 0x65, 0xa2, 0xa1, 0xae,
+ 0x11, 0xe7, 0x98, 0x78, 0x48, 0x91, 0xd2, 0xd2,
+ 0xd4, 0x23, 0x78, 0x50, 0xb1, 0x5b, 0x85, 0x10,
+ 0x8d, 0xca, 0x5f, 0x0f, 0x71, 0xae, 0x72, 0x9a,
+ 0xf6, 0x25, 0x19, 0x60, 0x06, 0xf7, 0x10, 0x34,
+ 0x18, 0x0d, 0xc9, 0x9f, 0x7b, 0x0c, 0x9b, 0x8f,
+ 0x91, 0x1b, 0x9f, 0xcd, 0x10, 0xee, 0x75, 0xf9,
+ 0x97, 0x66, 0xfc, 0x4d, 0x33, 0x6e, 0x28, 0x2b,
+ 0x92, 0x85, 0x4f, 0xab, 0x43, 0x8d, 0x8f, 0x7d,
+ 0x86, 0xa7, 0xc7, 0xd8, 0xd3, 0x0b, 0x8b, 0x57,
+ 0xb6, 0x1d, 0x95, 0x0d, 0xe9, 0xbc, 0xd9, 0x03,
+ 0xd9, 0x10, 0x19, 0xc3, 0x46, 0x63, 0x55, 0x87,
+ 0x61, 0x79, 0x6c, 0x95, 0x0e, 0x9c, 0xdd, 0xca,
+ 0xc3, 0xf3, 0x64, 0xf0, 0x7d, 0x76, 0xb7, 0x53,
+ 0x67, 0x2b, 0x1e, 0x44, 0x56, 0x81, 0xea, 0x8f,
+ 0x5c, 0x42, 0x16, 0xb8, 0x28, 0xeb, 0x1b, 0x61,
+ 0x10, 0x1e, 0xbf, 0xec, 0xa8
+};
+static const u8 dec_assoc011[] __initconst = {
+ 0xd6, 0x31, 0xda, 0x5d, 0x42, 0x5e, 0xd7
+};
+static const u8 dec_nonce011[] __initconst = {
+ 0xfd, 0x87, 0xd4, 0xd8, 0x62, 0xfd, 0xec, 0xaa
+};
+static const u8 dec_key011[] __initconst = {
+ 0x35, 0x4e, 0xb5, 0x70, 0x50, 0x42, 0x8a, 0x85,
+ 0xf2, 0xfb, 0xed, 0x7b, 0xd0, 0x9e, 0x97, 0xca,
+ 0xfa, 0x98, 0x66, 0x63, 0xee, 0x37, 0xcc, 0x52,
+ 0xfe, 0xd1, 0xdf, 0x95, 0x15, 0x34, 0x29, 0x38
+};
+
+static const u8 dec_input012[] __initconst = {
+ 0x52, 0x34, 0xb3, 0x65, 0x3b, 0xb7, 0xe5, 0xd3,
+ 0xab, 0x49, 0x17, 0x60, 0xd2, 0x52, 0x56, 0xdf,
+ 0xdf, 0x34, 0x56, 0x82, 0xe2, 0xbe, 0xe5, 0xe1,
+ 0x28, 0xd1, 0x4e, 0x5f, 0x4f, 0x01, 0x7d, 0x3f,
+ 0x99, 0x6b, 0x30, 0x6e, 0x1a, 0x7c, 0x4c, 0x8e,
+ 0x62, 0x81, 0xae, 0x86, 0x3f, 0x6b, 0xd0, 0xb5,
+ 0xa9, 0xcf, 0x50, 0xf1, 0x02, 0x12, 0xa0, 0x0b,
+ 0x24, 0xe9, 0xe6, 0x72, 0x89, 0x2c, 0x52, 0x1b,
+ 0x34, 0x38, 0xf8, 0x75, 0x5f, 0xa0, 0x74, 0xe2,
+ 0x99, 0xdd, 0xa6, 0x4b, 0x14, 0x50, 0x4e, 0xf1,
+ 0xbe, 0xd6, 0x9e, 0xdb, 0xb2, 0x24, 0x27, 0x74,
+ 0x12, 0x4a, 0x78, 0x78, 0x17, 0xa5, 0x58, 0x8e,
+ 0x2f, 0xf9, 0xf4, 0x8d, 0xee, 0x03, 0x88, 0xae,
+ 0xb8, 0x29, 0xa1, 0x2f, 0x4b, 0xee, 0x92, 0xbd,
+ 0x87, 0xb3, 0xce, 0x34, 0x21, 0x57, 0x46, 0x04,
+ 0x49, 0x0c, 0x80, 0xf2, 0x01, 0x13, 0xa1, 0x55,
+ 0xb3, 0xff, 0x44, 0x30, 0x3c, 0x1c, 0xd0, 0xef,
+ 0xbc, 0x18, 0x74, 0x26, 0xad, 0x41, 0x5b, 0x5b,
+ 0x3e, 0x9a, 0x7a, 0x46, 0x4f, 0x16, 0xd6, 0x74,
+ 0x5a, 0xb7, 0x3a, 0x28, 0x31, 0xd8, 0xae, 0x26,
+ 0xac, 0x50, 0x53, 0x86, 0xf2, 0x56, 0xd7, 0x3f,
+ 0x29, 0xbc, 0x45, 0x68, 0x8e, 0xcb, 0x98, 0x64,
+ 0xdd, 0xc9, 0xba, 0xb8, 0x4b, 0x7b, 0x82, 0xdd,
+ 0x14, 0xa7, 0xcb, 0x71, 0x72, 0x00, 0x5c, 0xad,
+ 0x7b, 0x6a, 0x89, 0xa4, 0x3d, 0xbf, 0xb5, 0x4b,
+ 0x3e, 0x7c, 0x5a, 0xcf, 0xb8, 0xa1, 0xc5, 0x6e,
+ 0xc8, 0xb6, 0x31, 0x57, 0x7b, 0xdf, 0xa5, 0x7e,
+ 0xb1, 0xd6, 0x42, 0x2a, 0x31, 0x36, 0xd1, 0xd0,
+ 0x3f, 0x7a, 0xe5, 0x94, 0xd6, 0x36, 0xa0, 0x6f,
+ 0xb7, 0x40, 0x7d, 0x37, 0xc6, 0x55, 0x7c, 0x50,
+ 0x40, 0x6d, 0x29, 0x89, 0xe3, 0x5a, 0xae, 0x97,
+ 0xe7, 0x44, 0x49, 0x6e, 0xbd, 0x81, 0x3d, 0x03,
+ 0x93, 0x06, 0x12, 0x06, 0xe2, 0x41, 0x12, 0x4a,
+ 0xf1, 0x6a, 0xa4, 0x58, 0xa2, 0xfb, 0xd2, 0x15,
+ 0xba, 0xc9, 0x79, 0xc9, 0xce, 0x5e, 0x13, 0xbb,
+ 0xf1, 0x09, 0x04, 0xcc, 0xfd, 0xe8, 0x51, 0x34,
+ 0x6a, 0xe8, 0x61, 0x88, 0xda, 0xed, 0x01, 0x47,
+ 0x84, 0xf5, 0x73, 0x25, 0xf9, 0x1c, 0x42, 0x86,
+ 0x07, 0xf3, 0x5b, 0x1a, 0x01, 0xb3, 0xeb, 0x24,
+ 0x32, 0x8d, 0xf6, 0xed, 0x7c, 0x4b, 0xeb, 0x3c,
+ 0x36, 0x42, 0x28, 0xdf, 0xdf, 0xb6, 0xbe, 0xd9,
+ 0x8c, 0x52, 0xd3, 0x2b, 0x08, 0x90, 0x8c, 0xe7,
+ 0x98, 0x31, 0xe2, 0x32, 0x8e, 0xfc, 0x11, 0x48,
+ 0x00, 0xa8, 0x6a, 0x42, 0x4a, 0x02, 0xc6, 0x4b,
+ 0x09, 0xf1, 0xe3, 0x49, 0xf3, 0x45, 0x1f, 0x0e,
+ 0xbc, 0x56, 0xe2, 0xe4, 0xdf, 0xfb, 0xeb, 0x61,
+ 0xfa, 0x24, 0xc1, 0x63, 0x75, 0xbb, 0x47, 0x75,
+ 0xaf, 0xe1, 0x53, 0x16, 0x96, 0x21, 0x85, 0x26,
+ 0x11, 0xb3, 0x76, 0xe3, 0x23, 0xa1, 0x6b, 0x74,
+ 0x37, 0xd0, 0xde, 0x06, 0x90, 0x71, 0x5d, 0x43,
+ 0x88, 0x9b, 0x00, 0x54, 0xa6, 0x75, 0x2f, 0xa1,
+ 0xc2, 0x0b, 0x73, 0x20, 0x1d, 0xb6, 0x21, 0x79,
+ 0x57, 0x3f, 0xfa, 0x09, 0xbe, 0x8a, 0x33, 0xc3,
+ 0x52, 0xf0, 0x1d, 0x82, 0x31, 0xd1, 0x55, 0xb5,
+ 0x6c, 0x99, 0x25, 0xcf, 0x5c, 0x32, 0xce, 0xe9,
+ 0x0d, 0xfa, 0x69, 0x2c, 0xd5, 0x0d, 0xc5, 0x6d,
+ 0x86, 0xd0, 0x0c, 0x3b, 0x06, 0x50, 0x79, 0xe8,
+ 0xc3, 0xae, 0x04, 0xe6, 0xcd, 0x51, 0xe4, 0x26,
+ 0x9b, 0x4f, 0x7e, 0xa6, 0x0f, 0xab, 0xd8, 0xe5,
+ 0xde, 0xa9, 0x00, 0x95, 0xbe, 0xa3, 0x9d, 0x5d,
+ 0xb2, 0x09, 0x70, 0x18, 0x1c, 0xf0, 0xac, 0x29,
+ 0x23, 0x02, 0x29, 0x28, 0xd2, 0x74, 0x35, 0x57,
+ 0x62, 0x0f, 0x24, 0xea, 0x5e, 0x33, 0xc2, 0x92,
+ 0xf3, 0x78, 0x4d, 0x30, 0x1e, 0xa1, 0x99, 0xa9,
+ 0x82, 0xb0, 0x42, 0x31, 0x8d, 0xad, 0x8a, 0xbc,
+ 0xfc, 0xd4, 0x57, 0x47, 0x3e, 0xb4, 0x50, 0xdd,
+ 0x6e, 0x2c, 0x80, 0x4d, 0x22, 0xf1, 0xfb, 0x57,
+ 0xc4, 0xdd, 0x17, 0xe1, 0x8a, 0x36, 0x4a, 0xb3,
+ 0x37, 0xca, 0xc9, 0x4e, 0xab, 0xd5, 0x69, 0xc4,
+ 0xf4, 0xbc, 0x0b, 0x3b, 0x44, 0x4b, 0x29, 0x9c,
+ 0xee, 0xd4, 0x35, 0x22, 0x21, 0xb0, 0x1f, 0x27,
+ 0x64, 0xa8, 0x51, 0x1b, 0xf0, 0x9f, 0x19, 0x5c,
+ 0xfb, 0x5a, 0x64, 0x74, 0x70, 0x45, 0x09, 0xf5,
+ 0x64, 0xfe, 0x1a, 0x2d, 0xc9, 0x14, 0x04, 0x14,
+ 0xcf, 0xd5, 0x7d, 0x60, 0xaf, 0x94, 0x39, 0x94,
+ 0xe2, 0x7d, 0x79, 0x82, 0xd0, 0x65, 0x3b, 0x6b,
+ 0x9c, 0x19, 0x84, 0xb4, 0x6d, 0xb3, 0x0c, 0x99,
+ 0xc0, 0x56, 0xa8, 0xbd, 0x73, 0xce, 0x05, 0x84,
+ 0x3e, 0x30, 0xaa, 0xc4, 0x9b, 0x1b, 0x04, 0x2a,
+ 0x9f, 0xd7, 0x43, 0x2b, 0x23, 0xdf, 0xbf, 0xaa,
+ 0xd5, 0xc2, 0x43, 0x2d, 0x70, 0xab, 0xdc, 0x75,
+ 0xad, 0xac, 0xf7, 0xc0, 0xbe, 0x67, 0xb2, 0x74,
+ 0xed, 0x67, 0x10, 0x4a, 0x92, 0x60, 0xc1, 0x40,
+ 0x50, 0x19, 0x8a, 0x8a, 0x8c, 0x09, 0x0e, 0x72,
+ 0xe1, 0x73, 0x5e, 0xe8, 0x41, 0x85, 0x63, 0x9f,
+ 0x3f, 0xd7, 0x7d, 0xc4, 0xfb, 0x22, 0x5d, 0x92,
+ 0x6c, 0xb3, 0x1e, 0xe2, 0x50, 0x2f, 0x82, 0xa8,
+ 0x28, 0xc0, 0xb5, 0xd7, 0x5f, 0x68, 0x0d, 0x2c,
+ 0x2d, 0xaf, 0x7e, 0xfa, 0x2e, 0x08, 0x0f, 0x1f,
+ 0x70, 0x9f, 0xe9, 0x19, 0x72, 0x55, 0xf8, 0xfb,
+ 0x51, 0xd2, 0x33, 0x5d, 0xa0, 0xd3, 0x2b, 0x0a,
+ 0x6c, 0xbc, 0x4e, 0xcf, 0x36, 0x4d, 0xdc, 0x3b,
+ 0xe9, 0x3e, 0x81, 0x7c, 0x61, 0xdb, 0x20, 0x2d,
+ 0x3a, 0xc3, 0xb3, 0x0c, 0x1e, 0x00, 0xb9, 0x7c,
+ 0xf5, 0xca, 0x10, 0x5f, 0x3a, 0x71, 0xb3, 0xe4,
+ 0x20, 0xdb, 0x0c, 0x2a, 0x98, 0x63, 0x45, 0x00,
+ 0x58, 0xf6, 0x68, 0xe4, 0x0b, 0xda, 0x13, 0x3b,
+ 0x60, 0x5c, 0x76, 0xdb, 0xb9, 0x97, 0x71, 0xe4,
+ 0xd9, 0xb7, 0xdb, 0xbd, 0x68, 0xc7, 0x84, 0x84,
+ 0xaa, 0x7c, 0x68, 0x62, 0x5e, 0x16, 0xfc, 0xba,
+ 0x72, 0xaa, 0x9a, 0xa9, 0xeb, 0x7c, 0x75, 0x47,
+ 0x97, 0x7e, 0xad, 0xe2, 0xd9, 0x91, 0xe8, 0xe4,
+ 0xa5, 0x31, 0xd7, 0x01, 0x8e, 0xa2, 0x11, 0x88,
+ 0x95, 0xb9, 0xf2, 0x9b, 0xd3, 0x7f, 0x1b, 0x81,
+ 0x22, 0xf7, 0x98, 0x60, 0x0a, 0x64, 0xa6, 0xc1,
+ 0xf6, 0x49, 0xc7, 0xe3, 0x07, 0x4d, 0x94, 0x7a,
+ 0xcf, 0x6e, 0x68, 0x0c, 0x1b, 0x3f, 0x6e, 0x2e,
+ 0xee, 0x92, 0xfa, 0x52, 0xb3, 0x59, 0xf8, 0xf1,
+ 0x8f, 0x6a, 0x66, 0xa3, 0x82, 0x76, 0x4a, 0x07,
+ 0x1a, 0xc7, 0xdd, 0xf5, 0xda, 0x9c, 0x3c, 0x24,
+ 0xbf, 0xfd, 0x42, 0xa1, 0x10, 0x64, 0x6a, 0x0f,
+ 0x89, 0xee, 0x36, 0xa5, 0xce, 0x99, 0x48, 0x6a,
+ 0xf0, 0x9f, 0x9e, 0x69, 0xa4, 0x40, 0x20, 0xe9,
+ 0x16, 0x15, 0xf7, 0xdb, 0x75, 0x02, 0xcb, 0xe9,
+ 0x73, 0x8b, 0x3b, 0x49, 0x2f, 0xf0, 0xaf, 0x51,
+ 0x06, 0x5c, 0xdf, 0x27, 0x27, 0x49, 0x6a, 0xd1,
+ 0xcc, 0xc7, 0xb5, 0x63, 0xb5, 0xfc, 0xb8, 0x5c,
+ 0x87, 0x7f, 0x84, 0xb4, 0xcc, 0x14, 0xa9, 0x53,
+ 0xda, 0xa4, 0x56, 0xf8, 0xb6, 0x1b, 0xcc, 0x40,
+ 0x27, 0x52, 0x06, 0x5a, 0x13, 0x81, 0xd7, 0x3a,
+ 0xd4, 0x3b, 0xfb, 0x49, 0x65, 0x31, 0x33, 0xb2,
+ 0xfa, 0xcd, 0xad, 0x58, 0x4e, 0x2b, 0xae, 0xd2,
+ 0x20, 0xfb, 0x1a, 0x48, 0xb4, 0x3f, 0x9a, 0xd8,
+ 0x7a, 0x35, 0x4a, 0xc8, 0xee, 0x88, 0x5e, 0x07,
+ 0x66, 0x54, 0xb9, 0xec, 0x9f, 0xa3, 0xe3, 0xb9,
+ 0x37, 0xaa, 0x49, 0x76, 0x31, 0xda, 0x74, 0x2d,
+ 0x3c, 0xa4, 0x65, 0x10, 0x32, 0x38, 0xf0, 0xde,
+ 0xd3, 0x99, 0x17, 0xaa, 0x71, 0xaa, 0x8f, 0x0f,
+ 0x8c, 0xaf, 0xa2, 0xf8, 0x5d, 0x64, 0xba, 0x1d,
+ 0xa3, 0xef, 0x96, 0x73, 0xe8, 0xa1, 0x02, 0x8d,
+ 0x0c, 0x6d, 0xb8, 0x06, 0x90, 0xb8, 0x08, 0x56,
+ 0x2c, 0xa7, 0x06, 0xc9, 0xc2, 0x38, 0xdb, 0x7c,
+ 0x63, 0xb1, 0x57, 0x8e, 0xea, 0x7c, 0x79, 0xf3,
+ 0x49, 0x1d, 0xfe, 0x9f, 0xf3, 0x6e, 0xb1, 0x1d,
+ 0xba, 0x19, 0x80, 0x1a, 0x0a, 0xd3, 0xb0, 0x26,
+ 0x21, 0x40, 0xb1, 0x7c, 0xf9, 0x4d, 0x8d, 0x10,
+ 0xc1, 0x7e, 0xf4, 0xf6, 0x3c, 0xa8, 0xfd, 0x7c,
+ 0xa3, 0x92, 0xb2, 0x0f, 0xaa, 0xcc, 0xa6, 0x11,
+ 0xfe, 0x04, 0xe3, 0xd1, 0x7a, 0x32, 0x89, 0xdf,
+ 0x0d, 0xc4, 0x8f, 0x79, 0x6b, 0xca, 0x16, 0x7c,
+ 0x6e, 0xf9, 0xad, 0x0f, 0xf6, 0xfe, 0x27, 0xdb,
+ 0xc4, 0x13, 0x70, 0xf1, 0x62, 0x1a, 0x4f, 0x79,
+ 0x40, 0xc9, 0x9b, 0x8b, 0x21, 0xea, 0x84, 0xfa,
+ 0xf5, 0xf1, 0x89, 0xce, 0xb7, 0x55, 0x0a, 0x80,
+ 0x39, 0x2f, 0x55, 0x36, 0x16, 0x9c, 0x7b, 0x08,
+ 0xbd, 0x87, 0x0d, 0xa5, 0x32, 0xf1, 0x52, 0x7c,
+ 0xe8, 0x55, 0x60, 0x5b, 0xd7, 0x69, 0xe4, 0xfc,
+ 0xfa, 0x12, 0x85, 0x96, 0xea, 0x50, 0x28, 0xab,
+ 0x8a, 0xf7, 0xbb, 0x0e, 0x53, 0x74, 0xca, 0xa6,
+ 0x27, 0x09, 0xc2, 0xb5, 0xde, 0x18, 0x14, 0xd9,
+ 0xea, 0xe5, 0x29, 0x1c, 0x40, 0x56, 0xcf, 0xd7,
+ 0xae, 0x05, 0x3f, 0x65, 0xaf, 0x05, 0x73, 0xe2,
+ 0x35, 0x96, 0x27, 0x07, 0x14, 0xc0, 0xad, 0x33,
+ 0xf1, 0xdc, 0x44, 0x7a, 0x89, 0x17, 0x77, 0xd2,
+ 0x9c, 0x58, 0x60, 0xf0, 0x3f, 0x7b, 0x2d, 0x2e,
+ 0x57, 0x95, 0x54, 0x87, 0xed, 0xf2, 0xc7, 0x4c,
+ 0xf0, 0xae, 0x56, 0x29, 0x19, 0x7d, 0x66, 0x4b,
+ 0x9b, 0x83, 0x84, 0x42, 0x3b, 0x01, 0x25, 0x66,
+ 0x8e, 0x02, 0xde, 0xb9, 0x83, 0x54, 0x19, 0xf6,
+ 0x9f, 0x79, 0x0d, 0x67, 0xc5, 0x1d, 0x7a, 0x44,
+ 0x02, 0x98, 0xa7, 0x16, 0x1c, 0x29, 0x0d, 0x74,
+ 0xff, 0x85, 0x40, 0x06, 0xef, 0x2c, 0xa9, 0xc6,
+ 0xf5, 0x53, 0x07, 0x06, 0xae, 0xe4, 0xfa, 0x5f,
+ 0xd8, 0x39, 0x4d, 0xf1, 0x9b, 0x6b, 0xd9, 0x24,
+ 0x84, 0xfe, 0x03, 0x4c, 0xb2, 0x3f, 0xdf, 0xa1,
+ 0x05, 0x9e, 0x50, 0x14, 0x5a, 0xd9, 0x1a, 0xa2,
+ 0xa7, 0xfa, 0xfa, 0x17, 0xf7, 0x78, 0xd6, 0xb5,
+ 0x92, 0x61, 0x91, 0xac, 0x36, 0xfa, 0x56, 0x0d,
+ 0x38, 0x32, 0x18, 0x85, 0x08, 0x58, 0x37, 0xf0,
+ 0x4b, 0xdb, 0x59, 0xe7, 0xa4, 0x34, 0xc0, 0x1b,
+ 0x01, 0xaf, 0x2d, 0xde, 0xa1, 0xaa, 0x5d, 0xd3,
+ 0xec, 0xe1, 0xd4, 0xf7, 0xe6, 0x54, 0x68, 0xf0,
+ 0x51, 0x97, 0xa7, 0x89, 0xea, 0x24, 0xad, 0xd3,
+ 0x6e, 0x47, 0x93, 0x8b, 0x4b, 0xb4, 0xf7, 0x1c,
+ 0x42, 0x06, 0x67, 0xe8, 0x99, 0xf6, 0xf5, 0x7b,
+ 0x85, 0xb5, 0x65, 0xb5, 0xb5, 0xd2, 0x37, 0xf5,
+ 0xf3, 0x02, 0xa6, 0x4d, 0x11, 0xa7, 0xdc, 0x51,
+ 0x09, 0x7f, 0xa0, 0xd8, 0x88, 0x1c, 0x13, 0x71,
+ 0xae, 0x9c, 0xb7, 0x7b, 0x34, 0xd6, 0x4e, 0x68,
+ 0x26, 0x83, 0x51, 0xaf, 0x1d, 0xee, 0x8b, 0xbb,
+ 0x69, 0x43, 0x2b, 0x9e, 0x8a, 0xbc, 0x02, 0x0e,
+ 0xa0, 0x1b, 0xe0, 0xa8, 0x5f, 0x6f, 0xaf, 0x1b,
+ 0x8f, 0xe7, 0x64, 0x71, 0x74, 0x11, 0x7e, 0xa8,
+ 0xd8, 0xf9, 0x97, 0x06, 0xc3, 0xb6, 0xfb, 0xfb,
+ 0xb7, 0x3d, 0x35, 0x9d, 0x3b, 0x52, 0xed, 0x54,
+ 0xca, 0xf4, 0x81, 0x01, 0x2d, 0x1b, 0xc3, 0xa7,
+ 0x00, 0x3d, 0x1a, 0x39, 0x54, 0xe1, 0xf6, 0xff,
+ 0xed, 0x6f, 0x0b, 0x5a, 0x68, 0xda, 0x58, 0xdd,
+ 0xa9, 0xcf, 0x5c, 0x4a, 0xe5, 0x09, 0x4e, 0xde,
+ 0x9d, 0xbc, 0x3e, 0xee, 0x5a, 0x00, 0x3b, 0x2c,
+ 0x87, 0x10, 0x65, 0x60, 0xdd, 0xd7, 0x56, 0xd1,
+ 0x4c, 0x64, 0x45, 0xe4, 0x21, 0xec, 0x78, 0xf8,
+ 0x25, 0x7a, 0x3e, 0x16, 0x5d, 0x09, 0x53, 0x14,
+ 0xbe, 0x4f, 0xae, 0x87, 0xd8, 0xd1, 0xaa, 0x3c,
+ 0xf6, 0x3e, 0xa4, 0x70, 0x8c, 0x5e, 0x70, 0xa4,
+ 0xb3, 0x6b, 0x66, 0x73, 0xd3, 0xbf, 0x31, 0x06,
+ 0x19, 0x62, 0x93, 0x15, 0xf2, 0x86, 0xe4, 0x52,
+ 0x7e, 0x53, 0x4c, 0x12, 0x38, 0xcc, 0x34, 0x7d,
+ 0x57, 0xf6, 0x42, 0x93, 0x8a, 0xc4, 0xee, 0x5c,
+ 0x8a, 0xe1, 0x52, 0x8f, 0x56, 0x64, 0xf6, 0xa6,
+ 0xd1, 0x91, 0x57, 0x70, 0xcd, 0x11, 0x76, 0xf5,
+ 0x59, 0x60, 0x60, 0x3c, 0xc1, 0xc3, 0x0b, 0x7f,
+ 0x58, 0x1a, 0x50, 0x91, 0xf1, 0x68, 0x8f, 0x6e,
+ 0x74, 0x74, 0xa8, 0x51, 0x0b, 0xf7, 0x7a, 0x98,
+ 0x37, 0xf2, 0x0a, 0x0e, 0xa4, 0x97, 0x04, 0xb8,
+ 0x9b, 0xfd, 0xa0, 0xea, 0xf7, 0x0d, 0xe1, 0xdb,
+ 0x03, 0xf0, 0x31, 0x29, 0xf8, 0xdd, 0x6b, 0x8b,
+ 0x5d, 0xd8, 0x59, 0xa9, 0x29, 0xcf, 0x9a, 0x79,
+ 0x89, 0x19, 0x63, 0x46, 0x09, 0x79, 0x6a, 0x11,
+ 0xda, 0x63, 0x68, 0x48, 0x77, 0x23, 0xfb, 0x7d,
+ 0x3a, 0x43, 0xcb, 0x02, 0x3b, 0x7a, 0x6d, 0x10,
+ 0x2a, 0x9e, 0xac, 0xf1, 0xd4, 0x19, 0xf8, 0x23,
+ 0x64, 0x1d, 0x2c, 0x5f, 0xf2, 0xb0, 0x5c, 0x23,
+ 0x27, 0xf7, 0x27, 0x30, 0x16, 0x37, 0xb1, 0x90,
+ 0xab, 0x38, 0xfb, 0x55, 0xcd, 0x78, 0x58, 0xd4,
+ 0x7d, 0x43, 0xf6, 0x45, 0x5e, 0x55, 0x8d, 0xb1,
+ 0x02, 0x65, 0x58, 0xb4, 0x13, 0x4b, 0x36, 0xf7,
+ 0xcc, 0xfe, 0x3d, 0x0b, 0x82, 0xe2, 0x12, 0x11,
+ 0xbb, 0xe6, 0xb8, 0x3a, 0x48, 0x71, 0xc7, 0x50,
+ 0x06, 0x16, 0x3a, 0xe6, 0x7c, 0x05, 0xc7, 0xc8,
+ 0x4d, 0x2f, 0x08, 0x6a, 0x17, 0x9a, 0x95, 0x97,
+ 0x50, 0x68, 0xdc, 0x28, 0x18, 0xc4, 0x61, 0x38,
+ 0xb9, 0xe0, 0x3e, 0x78, 0xdb, 0x29, 0xe0, 0x9f,
+ 0x52, 0xdd, 0xf8, 0x4f, 0x91, 0xc1, 0xd0, 0x33,
+ 0xa1, 0x7a, 0x8e, 0x30, 0x13, 0x82, 0x07, 0x9f,
+ 0xd3, 0x31, 0x0f, 0x23, 0xbe, 0x32, 0x5a, 0x75,
+ 0xcf, 0x96, 0xb2, 0xec, 0xb5, 0x32, 0xac, 0x21,
+ 0xd1, 0x82, 0x33, 0xd3, 0x15, 0x74, 0xbd, 0x90,
+ 0xf1, 0x2c, 0xe6, 0x5f, 0x8d, 0xe3, 0x02, 0xe8,
+ 0xe9, 0xc4, 0xca, 0x96, 0xeb, 0x0e, 0xbc, 0x91,
+ 0xf4, 0xb9, 0xea, 0xd9, 0x1b, 0x75, 0xbd, 0xe1,
+ 0xac, 0x2a, 0x05, 0x37, 0x52, 0x9b, 0x1b, 0x3f,
+ 0x5a, 0xdc, 0x21, 0xc3, 0x98, 0xbb, 0xaf, 0xa3,
+ 0xf2, 0x00, 0xbf, 0x0d, 0x30, 0x89, 0x05, 0xcc,
+ 0xa5, 0x76, 0xf5, 0x06, 0xf0, 0xc6, 0x54, 0x8a,
+ 0x5d, 0xd4, 0x1e, 0xc1, 0xf2, 0xce, 0xb0, 0x62,
+ 0xc8, 0xfc, 0x59, 0x42, 0x9a, 0x90, 0x60, 0x55,
+ 0xfe, 0x88, 0xa5, 0x8b, 0xb8, 0x33, 0x0c, 0x23,
+ 0x24, 0x0d, 0x15, 0x70, 0x37, 0x1e, 0x3d, 0xf6,
+ 0xd2, 0xea, 0x92, 0x10, 0xb2, 0xc4, 0x51, 0xac,
+ 0xf2, 0xac, 0xf3, 0x6b, 0x6c, 0xaa, 0xcf, 0x12,
+ 0xc5, 0x6c, 0x90, 0x50, 0xb5, 0x0c, 0xfc, 0x1a,
+ 0x15, 0x52, 0xe9, 0x26, 0xc6, 0x52, 0xa4, 0xe7,
+ 0x81, 0x69, 0xe1, 0xe7, 0x9e, 0x30, 0x01, 0xec,
+ 0x84, 0x89, 0xb2, 0x0d, 0x66, 0xdd, 0xce, 0x28,
+ 0x5c, 0xec, 0x98, 0x46, 0x68, 0x21, 0x9f, 0x88,
+ 0x3f, 0x1f, 0x42, 0x77, 0xce, 0xd0, 0x61, 0xd4,
+ 0x20, 0xa7, 0xff, 0x53, 0xad, 0x37, 0xd0, 0x17,
+ 0x35, 0xc9, 0xfc, 0xba, 0x0a, 0x78, 0x3f, 0xf2,
+ 0xcc, 0x86, 0x89, 0xe8, 0x4b, 0x3c, 0x48, 0x33,
+ 0x09, 0x7f, 0xc6, 0xc0, 0xdd, 0xb8, 0xfd, 0x7a,
+ 0x66, 0x66, 0x65, 0xeb, 0x47, 0xa7, 0x04, 0x28,
+ 0xa3, 0x19, 0x8e, 0xa9, 0xb1, 0x13, 0x67, 0x62,
+ 0x70, 0xcf, 0xd6
+};
+static const u8 dec_output012[] __initconst = {
+ 0x74, 0xa6, 0x3e, 0xe4, 0xb1, 0xcb, 0xaf, 0xb0,
+ 0x40, 0xe5, 0x0f, 0x9e, 0xf1, 0xf2, 0x89, 0xb5,
+ 0x42, 0x34, 0x8a, 0xa1, 0x03, 0xb7, 0xe9, 0x57,
+ 0x46, 0xbe, 0x20, 0xe4, 0x6e, 0xb0, 0xeb, 0xff,
+ 0xea, 0x07, 0x7e, 0xef, 0xe2, 0x55, 0x9f, 0xe5,
+ 0x78, 0x3a, 0xb7, 0x83, 0xc2, 0x18, 0x40, 0x7b,
+ 0xeb, 0xcd, 0x81, 0xfb, 0x90, 0x12, 0x9e, 0x46,
+ 0xa9, 0xd6, 0x4a, 0xba, 0xb0, 0x62, 0xdb, 0x6b,
+ 0x99, 0xc4, 0xdb, 0x54, 0x4b, 0xb8, 0xa5, 0x71,
+ 0xcb, 0xcd, 0x63, 0x32, 0x55, 0xfb, 0x31, 0xf0,
+ 0x38, 0xf5, 0xbe, 0x78, 0xe4, 0x45, 0xce, 0x1b,
+ 0x6a, 0x5b, 0x0e, 0xf4, 0x16, 0xe4, 0xb1, 0x3d,
+ 0xf6, 0x63, 0x7b, 0xa7, 0x0c, 0xde, 0x6f, 0x8f,
+ 0x74, 0xdf, 0xe0, 0x1e, 0x9d, 0xce, 0x8f, 0x24,
+ 0xef, 0x23, 0x35, 0x33, 0x7b, 0x83, 0x34, 0x23,
+ 0x58, 0x74, 0x14, 0x77, 0x1f, 0xc2, 0x4f, 0x4e,
+ 0xc6, 0x89, 0xf9, 0x52, 0x09, 0x37, 0x64, 0x14,
+ 0xc4, 0x01, 0x6b, 0x9d, 0x77, 0xe8, 0x90, 0x5d,
+ 0xa8, 0x4a, 0x2a, 0xef, 0x5c, 0x7f, 0xeb, 0xbb,
+ 0xb2, 0xc6, 0x93, 0x99, 0x66, 0xdc, 0x7f, 0xd4,
+ 0x9e, 0x2a, 0xca, 0x8d, 0xdb, 0xe7, 0x20, 0xcf,
+ 0xe4, 0x73, 0xae, 0x49, 0x7d, 0x64, 0x0f, 0x0e,
+ 0x28, 0x46, 0xa9, 0xa8, 0x32, 0xe4, 0x0e, 0xf6,
+ 0x51, 0x53, 0xb8, 0x3c, 0xb1, 0xff, 0xa3, 0x33,
+ 0x41, 0x75, 0xff, 0xf1, 0x6f, 0xf1, 0xfb, 0xbb,
+ 0x83, 0x7f, 0x06, 0x9b, 0xe7, 0x1b, 0x0a, 0xe0,
+ 0x5c, 0x33, 0x60, 0x5b, 0xdb, 0x5b, 0xed, 0xfe,
+ 0xa5, 0x16, 0x19, 0x72, 0xa3, 0x64, 0x23, 0x00,
+ 0x02, 0xc7, 0xf3, 0x6a, 0x81, 0x3e, 0x44, 0x1d,
+ 0x79, 0x15, 0x5f, 0x9a, 0xde, 0xe2, 0xfd, 0x1b,
+ 0x73, 0xc1, 0xbc, 0x23, 0xba, 0x31, 0xd2, 0x50,
+ 0xd5, 0xad, 0x7f, 0x74, 0xa7, 0xc9, 0xf8, 0x3e,
+ 0x2b, 0x26, 0x10, 0xf6, 0x03, 0x36, 0x74, 0xe4,
+ 0x0e, 0x6a, 0x72, 0xb7, 0x73, 0x0a, 0x42, 0x28,
+ 0xc2, 0xad, 0x5e, 0x03, 0xbe, 0xb8, 0x0b, 0xa8,
+ 0x5b, 0xd4, 0xb8, 0xba, 0x52, 0x89, 0xb1, 0x9b,
+ 0xc1, 0xc3, 0x65, 0x87, 0xed, 0xa5, 0xf4, 0x86,
+ 0xfd, 0x41, 0x80, 0x91, 0x27, 0x59, 0x53, 0x67,
+ 0x15, 0x78, 0x54, 0x8b, 0x2d, 0x3d, 0xc7, 0xff,
+ 0x02, 0x92, 0x07, 0x5f, 0x7a, 0x4b, 0x60, 0x59,
+ 0x3c, 0x6f, 0x5c, 0xd8, 0xec, 0x95, 0xd2, 0xfe,
+ 0xa0, 0x3b, 0xd8, 0x3f, 0xd1, 0x69, 0xa6, 0xd6,
+ 0x41, 0xb2, 0xf4, 0x4d, 0x12, 0xf4, 0x58, 0x3e,
+ 0x66, 0x64, 0x80, 0x31, 0x9b, 0xa8, 0x4c, 0x8b,
+ 0x07, 0xb2, 0xec, 0x66, 0x94, 0x66, 0x47, 0x50,
+ 0x50, 0x5f, 0x18, 0x0b, 0x0e, 0xd6, 0xc0, 0x39,
+ 0x21, 0x13, 0x9e, 0x33, 0xbc, 0x79, 0x36, 0x02,
+ 0x96, 0x70, 0xf0, 0x48, 0x67, 0x2f, 0x26, 0xe9,
+ 0x6d, 0x10, 0xbb, 0xd6, 0x3f, 0xd1, 0x64, 0x7a,
+ 0x2e, 0xbe, 0x0c, 0x61, 0xf0, 0x75, 0x42, 0x38,
+ 0x23, 0xb1, 0x9e, 0x9f, 0x7c, 0x67, 0x66, 0xd9,
+ 0x58, 0x9a, 0xf1, 0xbb, 0x41, 0x2a, 0x8d, 0x65,
+ 0x84, 0x94, 0xfc, 0xdc, 0x6a, 0x50, 0x64, 0xdb,
+ 0x56, 0x33, 0x76, 0x00, 0x10, 0xed, 0xbe, 0xd2,
+ 0x12, 0xf6, 0xf6, 0x1b, 0xa2, 0x16, 0xde, 0xae,
+ 0x31, 0x95, 0xdd, 0xb1, 0x08, 0x7e, 0x4e, 0xee,
+ 0xe7, 0xf9, 0xa5, 0xfb, 0x5b, 0x61, 0x43, 0x00,
+ 0x40, 0xf6, 0x7e, 0x02, 0x04, 0x32, 0x4e, 0x0c,
+ 0xe2, 0x66, 0x0d, 0xd7, 0x07, 0x98, 0x0e, 0xf8,
+ 0x72, 0x34, 0x6d, 0x95, 0x86, 0xd7, 0xcb, 0x31,
+ 0x54, 0x47, 0xd0, 0x38, 0x29, 0x9c, 0x5a, 0x68,
+ 0xd4, 0x87, 0x76, 0xc9, 0xe7, 0x7e, 0xe3, 0xf4,
+ 0x81, 0x6d, 0x18, 0xcb, 0xc9, 0x05, 0xaf, 0xa0,
+ 0xfb, 0x66, 0xf7, 0xf1, 0x1c, 0xc6, 0x14, 0x11,
+ 0x4f, 0x2b, 0x79, 0x42, 0x8b, 0xbc, 0xac, 0xe7,
+ 0x6c, 0xfe, 0x0f, 0x58, 0xe7, 0x7c, 0x78, 0x39,
+ 0x30, 0xb0, 0x66, 0x2c, 0x9b, 0x6d, 0x3a, 0xe1,
+ 0xcf, 0xc9, 0xa4, 0x0e, 0x6d, 0x6d, 0x8a, 0xa1,
+ 0x3a, 0xe7, 0x28, 0xd4, 0x78, 0x4c, 0xa6, 0xa2,
+ 0x2a, 0xa6, 0x03, 0x30, 0xd7, 0xa8, 0x25, 0x66,
+ 0x87, 0x2f, 0x69, 0x5c, 0x4e, 0xdd, 0xa5, 0x49,
+ 0x5d, 0x37, 0x4a, 0x59, 0xc4, 0xaf, 0x1f, 0xa2,
+ 0xe4, 0xf8, 0xa6, 0x12, 0x97, 0xd5, 0x79, 0xf5,
+ 0xe2, 0x4a, 0x2b, 0x5f, 0x61, 0xe4, 0x9e, 0xe3,
+ 0xee, 0xb8, 0xa7, 0x5b, 0x2f, 0xf4, 0x9e, 0x6c,
+ 0xfb, 0xd1, 0xc6, 0x56, 0x77, 0xba, 0x75, 0xaa,
+ 0x3d, 0x1a, 0xa8, 0x0b, 0xb3, 0x68, 0x24, 0x00,
+ 0x10, 0x7f, 0xfd, 0xd7, 0xa1, 0x8d, 0x83, 0x54,
+ 0x4f, 0x1f, 0xd8, 0x2a, 0xbe, 0x8a, 0x0c, 0x87,
+ 0xab, 0xa2, 0xde, 0xc3, 0x39, 0xbf, 0x09, 0x03,
+ 0xa5, 0xf3, 0x05, 0x28, 0xe1, 0xe1, 0xee, 0x39,
+ 0x70, 0x9c, 0xd8, 0x81, 0x12, 0x1e, 0x02, 0x40,
+ 0xd2, 0x6e, 0xf0, 0xeb, 0x1b, 0x3d, 0x22, 0xc6,
+ 0xe5, 0xe3, 0xb4, 0x5a, 0x98, 0xbb, 0xf0, 0x22,
+ 0x28, 0x8d, 0xe5, 0xd3, 0x16, 0x48, 0x24, 0xa5,
+ 0xe6, 0x66, 0x0c, 0xf9, 0x08, 0xf9, 0x7e, 0x1e,
+ 0xe1, 0x28, 0x26, 0x22, 0xc7, 0xc7, 0x0a, 0x32,
+ 0x47, 0xfa, 0xa3, 0xbe, 0x3c, 0xc4, 0xc5, 0x53,
+ 0x0a, 0xd5, 0x94, 0x4a, 0xd7, 0x93, 0xd8, 0x42,
+ 0x99, 0xb9, 0x0a, 0xdb, 0x56, 0xf7, 0xb9, 0x1c,
+ 0x53, 0x4f, 0xfa, 0xd3, 0x74, 0xad, 0xd9, 0x68,
+ 0xf1, 0x1b, 0xdf, 0x61, 0xc6, 0x5e, 0xa8, 0x48,
+ 0xfc, 0xd4, 0x4a, 0x4c, 0x3c, 0x32, 0xf7, 0x1c,
+ 0x96, 0x21, 0x9b, 0xf9, 0xa3, 0xcc, 0x5a, 0xce,
+ 0xd5, 0xd7, 0x08, 0x24, 0xf6, 0x1c, 0xfd, 0xdd,
+ 0x38, 0xc2, 0x32, 0xe9, 0xb8, 0xe7, 0xb6, 0xfa,
+ 0x9d, 0x45, 0x13, 0x2c, 0x83, 0xfd, 0x4a, 0x69,
+ 0x82, 0xcd, 0xdc, 0xb3, 0x76, 0x0c, 0x9e, 0xd8,
+ 0xf4, 0x1b, 0x45, 0x15, 0xb4, 0x97, 0xe7, 0x58,
+ 0x34, 0xe2, 0x03, 0x29, 0x5a, 0xbf, 0xb6, 0xe0,
+ 0x5d, 0x13, 0xd9, 0x2b, 0xb4, 0x80, 0xb2, 0x45,
+ 0x81, 0x6a, 0x2e, 0x6c, 0x89, 0x7d, 0xee, 0xbb,
+ 0x52, 0xdd, 0x1f, 0x18, 0xe7, 0x13, 0x6b, 0x33,
+ 0x0e, 0xea, 0x36, 0x92, 0x77, 0x7b, 0x6d, 0x9c,
+ 0x5a, 0x5f, 0x45, 0x7b, 0x7b, 0x35, 0x62, 0x23,
+ 0xd1, 0xbf, 0x0f, 0xd0, 0x08, 0x1b, 0x2b, 0x80,
+ 0x6b, 0x7e, 0xf1, 0x21, 0x47, 0xb0, 0x57, 0xd1,
+ 0x98, 0x72, 0x90, 0x34, 0x1c, 0x20, 0x04, 0xff,
+ 0x3d, 0x5c, 0xee, 0x0e, 0x57, 0x5f, 0x6f, 0x24,
+ 0x4e, 0x3c, 0xea, 0xfc, 0xa5, 0xa9, 0x83, 0xc9,
+ 0x61, 0xb4, 0x51, 0x24, 0xf8, 0x27, 0x5e, 0x46,
+ 0x8c, 0xb1, 0x53, 0x02, 0x96, 0x35, 0xba, 0xb8,
+ 0x4c, 0x71, 0xd3, 0x15, 0x59, 0x35, 0x22, 0x20,
+ 0xad, 0x03, 0x9f, 0x66, 0x44, 0x3b, 0x9c, 0x35,
+ 0x37, 0x1f, 0x9b, 0xbb, 0xf3, 0xdb, 0x35, 0x63,
+ 0x30, 0x64, 0xaa, 0xa2, 0x06, 0xa8, 0x5d, 0xbb,
+ 0xe1, 0x9f, 0x70, 0xec, 0x82, 0x11, 0x06, 0x36,
+ 0xec, 0x8b, 0x69, 0x66, 0x24, 0x44, 0xc9, 0x4a,
+ 0x57, 0xbb, 0x9b, 0x78, 0x13, 0xce, 0x9c, 0x0c,
+ 0xba, 0x92, 0x93, 0x63, 0xb8, 0xe2, 0x95, 0x0f,
+ 0x0f, 0x16, 0x39, 0x52, 0xfd, 0x3a, 0x6d, 0x02,
+ 0x4b, 0xdf, 0x13, 0xd3, 0x2a, 0x22, 0xb4, 0x03,
+ 0x7c, 0x54, 0x49, 0x96, 0x68, 0x54, 0x10, 0xfa,
+ 0xef, 0xaa, 0x6c, 0xe8, 0x22, 0xdc, 0x71, 0x16,
+ 0x13, 0x1a, 0xf6, 0x28, 0xe5, 0x6d, 0x77, 0x3d,
+ 0xcd, 0x30, 0x63, 0xb1, 0x70, 0x52, 0xa1, 0xc5,
+ 0x94, 0x5f, 0xcf, 0xe8, 0xb8, 0x26, 0x98, 0xf7,
+ 0x06, 0xa0, 0x0a, 0x70, 0xfa, 0x03, 0x80, 0xac,
+ 0xc1, 0xec, 0xd6, 0x4c, 0x54, 0xd7, 0xfe, 0x47,
+ 0xb6, 0x88, 0x4a, 0xf7, 0x71, 0x24, 0xee, 0xf3,
+ 0xd2, 0xc2, 0x4a, 0x7f, 0xfe, 0x61, 0xc7, 0x35,
+ 0xc9, 0x37, 0x67, 0xcb, 0x24, 0x35, 0xda, 0x7e,
+ 0xca, 0x5f, 0xf3, 0x8d, 0xd4, 0x13, 0x8e, 0xd6,
+ 0xcb, 0x4d, 0x53, 0x8f, 0x53, 0x1f, 0xc0, 0x74,
+ 0xf7, 0x53, 0xb9, 0x5e, 0x23, 0x37, 0xba, 0x6e,
+ 0xe3, 0x9d, 0x07, 0x55, 0x25, 0x7b, 0xe6, 0x2a,
+ 0x64, 0xd1, 0x32, 0xdd, 0x54, 0x1b, 0x4b, 0xc0,
+ 0xe1, 0xd7, 0x69, 0x58, 0xf8, 0x93, 0x29, 0xc4,
+ 0xdd, 0x23, 0x2f, 0xa5, 0xfc, 0x9d, 0x7e, 0xf8,
+ 0xd4, 0x90, 0xcd, 0x82, 0x55, 0xdc, 0x16, 0x16,
+ 0x9f, 0x07, 0x52, 0x9b, 0x9d, 0x25, 0xed, 0x32,
+ 0xc5, 0x7b, 0xdf, 0xf6, 0x83, 0x46, 0x3d, 0x65,
+ 0xb7, 0xef, 0x87, 0x7a, 0x12, 0x69, 0x8f, 0x06,
+ 0x7c, 0x51, 0x15, 0x4a, 0x08, 0xe8, 0xac, 0x9a,
+ 0x0c, 0x24, 0xa7, 0x27, 0xd8, 0x46, 0x2f, 0xe7,
+ 0x01, 0x0e, 0x1c, 0xc6, 0x91, 0xb0, 0x6e, 0x85,
+ 0x65, 0xf0, 0x29, 0x0d, 0x2e, 0x6b, 0x3b, 0xfb,
+ 0x4b, 0xdf, 0xe4, 0x80, 0x93, 0x03, 0x66, 0x46,
+ 0x3e, 0x8a, 0x6e, 0xf3, 0x5e, 0x4d, 0x62, 0x0e,
+ 0x49, 0x05, 0xaf, 0xd4, 0xf8, 0x21, 0x20, 0x61,
+ 0x1d, 0x39, 0x17, 0xf4, 0x61, 0x47, 0x95, 0xfb,
+ 0x15, 0x2e, 0xb3, 0x4f, 0xd0, 0x5d, 0xf5, 0x7d,
+ 0x40, 0xda, 0x90, 0x3c, 0x6b, 0xcb, 0x17, 0x00,
+ 0x13, 0x3b, 0x64, 0x34, 0x1b, 0xf0, 0xf2, 0xe5,
+ 0x3b, 0xb2, 0xc7, 0xd3, 0x5f, 0x3a, 0x44, 0xa6,
+ 0x9b, 0xb7, 0x78, 0x0e, 0x42, 0x5d, 0x4c, 0xc1,
+ 0xe9, 0xd2, 0xcb, 0xb7, 0x78, 0xd1, 0xfe, 0x9a,
+ 0xb5, 0x07, 0xe9, 0xe0, 0xbe, 0xe2, 0x8a, 0xa7,
+ 0x01, 0x83, 0x00, 0x8c, 0x5c, 0x08, 0xe6, 0x63,
+ 0x12, 0x92, 0xb7, 0xb7, 0xa6, 0x19, 0x7d, 0x38,
+ 0x13, 0x38, 0x92, 0x87, 0x24, 0xf9, 0x48, 0xb3,
+ 0x5e, 0x87, 0x6a, 0x40, 0x39, 0x5c, 0x3f, 0xed,
+ 0x8f, 0xee, 0xdb, 0x15, 0x82, 0x06, 0xda, 0x49,
+ 0x21, 0x2b, 0xb5, 0xbf, 0x32, 0x7c, 0x9f, 0x42,
+ 0x28, 0x63, 0xcf, 0xaf, 0x1e, 0xf8, 0xc6, 0xa0,
+ 0xd1, 0x02, 0x43, 0x57, 0x62, 0xec, 0x9b, 0x0f,
+ 0x01, 0x9e, 0x71, 0xd8, 0x87, 0x9d, 0x01, 0xc1,
+ 0x58, 0x77, 0xd9, 0xaf, 0xb1, 0x10, 0x7e, 0xdd,
+ 0xa6, 0x50, 0x96, 0xe5, 0xf0, 0x72, 0x00, 0x6d,
+ 0x4b, 0xf8, 0x2a, 0x8f, 0x19, 0xf3, 0x22, 0x88,
+ 0x11, 0x4a, 0x8b, 0x7c, 0xfd, 0xb7, 0xed, 0xe1,
+ 0xf6, 0x40, 0x39, 0xe0, 0xe9, 0xf6, 0x3d, 0x25,
+ 0xe6, 0x74, 0x3c, 0x58, 0x57, 0x7f, 0xe1, 0x22,
+ 0x96, 0x47, 0x31, 0x91, 0xba, 0x70, 0x85, 0x28,
+ 0x6b, 0x9f, 0x6e, 0x25, 0xac, 0x23, 0x66, 0x2f,
+ 0x29, 0x88, 0x28, 0xce, 0x8c, 0x5c, 0x88, 0x53,
+ 0xd1, 0x3b, 0xcc, 0x6a, 0x51, 0xb2, 0xe1, 0x28,
+ 0x3f, 0x91, 0xb4, 0x0d, 0x00, 0x3a, 0xe3, 0xf8,
+ 0xc3, 0x8f, 0xd7, 0x96, 0x62, 0x0e, 0x2e, 0xfc,
+ 0xc8, 0x6c, 0x77, 0xa6, 0x1d, 0x22, 0xc1, 0xb8,
+ 0xe6, 0x61, 0xd7, 0x67, 0x36, 0x13, 0x7b, 0xbb,
+ 0x9b, 0x59, 0x09, 0xa6, 0xdf, 0xf7, 0x6b, 0xa3,
+ 0x40, 0x1a, 0xf5, 0x4f, 0xb4, 0xda, 0xd3, 0xf3,
+ 0x81, 0x93, 0xc6, 0x18, 0xd9, 0x26, 0xee, 0xac,
+ 0xf0, 0xaa, 0xdf, 0xc5, 0x9c, 0xca, 0xc2, 0xa2,
+ 0xcc, 0x7b, 0x5c, 0x24, 0xb0, 0xbc, 0xd0, 0x6a,
+ 0x4d, 0x89, 0x09, 0xb8, 0x07, 0xfe, 0x87, 0xad,
+ 0x0a, 0xea, 0xb8, 0x42, 0xf9, 0x5e, 0xb3, 0x3e,
+ 0x36, 0x4c, 0xaf, 0x75, 0x9e, 0x1c, 0xeb, 0xbd,
+ 0xbc, 0xbb, 0x80, 0x40, 0xa7, 0x3a, 0x30, 0xbf,
+ 0xa8, 0x44, 0xf4, 0xeb, 0x38, 0xad, 0x29, 0xba,
+ 0x23, 0xed, 0x41, 0x0c, 0xea, 0xd2, 0xbb, 0x41,
+ 0x18, 0xd6, 0xb9, 0xba, 0x65, 0x2b, 0xa3, 0x91,
+ 0x6d, 0x1f, 0xa9, 0xf4, 0xd1, 0x25, 0x8d, 0x4d,
+ 0x38, 0xff, 0x64, 0xa0, 0xec, 0xde, 0xa6, 0xb6,
+ 0x79, 0xab, 0x8e, 0x33, 0x6c, 0x47, 0xde, 0xaf,
+ 0x94, 0xa4, 0xa5, 0x86, 0x77, 0x55, 0x09, 0x92,
+ 0x81, 0x31, 0x76, 0xc7, 0x34, 0x22, 0x89, 0x8e,
+ 0x3d, 0x26, 0x26, 0xd7, 0xfc, 0x1e, 0x16, 0x72,
+ 0x13, 0x33, 0x63, 0xd5, 0x22, 0xbe, 0xb8, 0x04,
+ 0x34, 0x84, 0x41, 0xbb, 0x80, 0xd0, 0x9f, 0x46,
+ 0x48, 0x07, 0xa7, 0xfc, 0x2b, 0x3a, 0x75, 0x55,
+ 0x8c, 0xc7, 0x6a, 0xbd, 0x7e, 0x46, 0x08, 0x84,
+ 0x0f, 0xd5, 0x74, 0xc0, 0x82, 0x8e, 0xaa, 0x61,
+ 0x05, 0x01, 0xb2, 0x47, 0x6e, 0x20, 0x6a, 0x2d,
+ 0x58, 0x70, 0x48, 0x32, 0xa7, 0x37, 0xd2, 0xb8,
+ 0x82, 0x1a, 0x51, 0xb9, 0x61, 0xdd, 0xfd, 0x9d,
+ 0x6b, 0x0e, 0x18, 0x97, 0xf8, 0x45, 0x5f, 0x87,
+ 0x10, 0xcf, 0x34, 0x72, 0x45, 0x26, 0x49, 0x70,
+ 0xe7, 0xa3, 0x78, 0xe0, 0x52, 0x89, 0x84, 0x94,
+ 0x83, 0x82, 0xc2, 0x69, 0x8f, 0xe3, 0xe1, 0x3f,
+ 0x60, 0x74, 0x88, 0xc4, 0xf7, 0x75, 0x2c, 0xfb,
+ 0xbd, 0xb6, 0xc4, 0x7e, 0x10, 0x0a, 0x6c, 0x90,
+ 0x04, 0x9e, 0xc3, 0x3f, 0x59, 0x7c, 0xce, 0x31,
+ 0x18, 0x60, 0x57, 0x73, 0x46, 0x94, 0x7d, 0x06,
+ 0xa0, 0x6d, 0x44, 0xec, 0xa2, 0x0a, 0x9e, 0x05,
+ 0x15, 0xef, 0xca, 0x5c, 0xbf, 0x00, 0xeb, 0xf7,
+ 0x3d, 0x32, 0xd4, 0xa5, 0xef, 0x49, 0x89, 0x5e,
+ 0x46, 0xb0, 0xa6, 0x63, 0x5b, 0x8a, 0x73, 0xae,
+ 0x6f, 0xd5, 0x9d, 0xf8, 0x4f, 0x40, 0xb5, 0xb2,
+ 0x6e, 0xd3, 0xb6, 0x01, 0xa9, 0x26, 0xa2, 0x21,
+ 0xcf, 0x33, 0x7a, 0x3a, 0xa4, 0x23, 0x13, 0xb0,
+ 0x69, 0x6a, 0xee, 0xce, 0xd8, 0x9d, 0x01, 0x1d,
+ 0x50, 0xc1, 0x30, 0x6c, 0xb1, 0xcd, 0xa0, 0xf0,
+ 0xf0, 0xa2, 0x64, 0x6f, 0xbb, 0xbf, 0x5e, 0xe6,
+ 0xab, 0x87, 0xb4, 0x0f, 0x4f, 0x15, 0xaf, 0xb5,
+ 0x25, 0xa1, 0xb2, 0xd0, 0x80, 0x2c, 0xfb, 0xf9,
+ 0xfe, 0xd2, 0x33, 0xbb, 0x76, 0xfe, 0x7c, 0xa8,
+ 0x66, 0xf7, 0xe7, 0x85, 0x9f, 0x1f, 0x85, 0x57,
+ 0x88, 0xe1, 0xe9, 0x63, 0xe4, 0xd8, 0x1c, 0xa1,
+ 0xfb, 0xda, 0x44, 0x05, 0x2e, 0x1d, 0x3a, 0x1c,
+ 0xff, 0xc8, 0x3b, 0xc0, 0xfe, 0xda, 0x22, 0x0b,
+ 0x43, 0xd6, 0x88, 0x39, 0x4c, 0x4a, 0xa6, 0x69,
+ 0x18, 0x93, 0x42, 0x4e, 0xb5, 0xcc, 0x66, 0x0d,
+ 0x09, 0xf8, 0x1e, 0x7c, 0xd3, 0x3c, 0x99, 0x0d,
+ 0x50, 0x1d, 0x62, 0xe9, 0x57, 0x06, 0xbf, 0x19,
+ 0x88, 0xdd, 0xad, 0x7b, 0x4f, 0xf9, 0xc7, 0x82,
+ 0x6d, 0x8d, 0xc8, 0xc4, 0xc5, 0x78, 0x17, 0x20,
+ 0x15, 0xc5, 0x52, 0x41, 0xcf, 0x5b, 0xd6, 0x7f,
+ 0x94, 0x02, 0x41, 0xe0, 0x40, 0x22, 0x03, 0x5e,
+ 0xd1, 0x53, 0xd4, 0x86, 0xd3, 0x2c, 0x9f, 0x0f,
+ 0x96, 0xe3, 0x6b, 0x9a, 0x76, 0x32, 0x06, 0x47,
+ 0x4b, 0x11, 0xb3, 0xdd, 0x03, 0x65, 0xbd, 0x9b,
+ 0x01, 0xda, 0x9c, 0xb9, 0x7e, 0x3f, 0x6a, 0xc4,
+ 0x7b, 0xea, 0xd4, 0x3c, 0xb9, 0xfb, 0x5c, 0x6b,
+ 0x64, 0x33, 0x52, 0xba, 0x64, 0x78, 0x8f, 0xa4,
+ 0xaf, 0x7a, 0x61, 0x8d, 0xbc, 0xc5, 0x73, 0xe9,
+ 0x6b, 0x58, 0x97, 0x4b, 0xbf, 0x63, 0x22, 0xd3,
+ 0x37, 0x02, 0x54, 0xc5, 0xb9, 0x16, 0x4a, 0xf0,
+ 0x19, 0xd8, 0x94, 0x57, 0xb8, 0x8a, 0xb3, 0x16,
+ 0x3b, 0xd0, 0x84, 0x8e, 0x67, 0xa6, 0xa3, 0x7d,
+ 0x78, 0xec, 0x00
+};
+static const u8 dec_assoc012[] __initconst = {
+ 0xb1, 0x69, 0x83, 0x87, 0x30, 0xaa, 0x5d, 0xb8,
+ 0x77, 0xe8, 0x21, 0xff, 0x06, 0x59, 0x35, 0xce,
+ 0x75, 0xfe, 0x38, 0xef, 0xb8, 0x91, 0x43, 0x8c,
+ 0xcf, 0x70, 0xdd, 0x0a, 0x68, 0xbf, 0xd4, 0xbc,
+ 0x16, 0x76, 0x99, 0x36, 0x1e, 0x58, 0x79, 0x5e,
+ 0xd4, 0x29, 0xf7, 0x33, 0x93, 0x48, 0xdb, 0x5f,
+ 0x01, 0xae, 0x9c, 0xb6, 0xe4, 0x88, 0x6d, 0x2b,
+ 0x76, 0x75, 0xe0, 0xf3, 0x74, 0xe2, 0xc9
+};
+static const u8 dec_nonce012[] __initconst = {
+ 0x05, 0xa3, 0x93, 0xed, 0x30, 0xc5, 0xa2, 0x06
+};
+static const u8 dec_key012[] __initconst = {
+ 0xb3, 0x35, 0x50, 0x03, 0x54, 0x2e, 0x40, 0x5e,
+ 0x8f, 0x59, 0x8e, 0xc5, 0x90, 0xd5, 0x27, 0x2d,
+ 0xba, 0x29, 0x2e, 0xcb, 0x1b, 0x70, 0x44, 0x1e,
+ 0x65, 0x91, 0x6e, 0x2a, 0x79, 0x22, 0xda, 0x64
+};
+
+static const u8 dec_input013[] __initconst = {
+ 0x52, 0x34, 0xb3, 0x65, 0x3b, 0xb7, 0xe5, 0xd3,
+ 0xab, 0x49, 0x17, 0x60, 0xd2, 0x52, 0x56, 0xdf,
+ 0xdf, 0x34, 0x56, 0x82, 0xe2, 0xbe, 0xe5, 0xe1,
+ 0x28, 0xd1, 0x4e, 0x5f, 0x4f, 0x01, 0x7d, 0x3f,
+ 0x99, 0x6b, 0x30, 0x6e, 0x1a, 0x7c, 0x4c, 0x8e,
+ 0x62, 0x81, 0xae, 0x86, 0x3f, 0x6b, 0xd0, 0xb5,
+ 0xa9, 0xcf, 0x50, 0xf1, 0x02, 0x12, 0xa0, 0x0b,
+ 0x24, 0xe9, 0xe6, 0x72, 0x89, 0x2c, 0x52, 0x1b,
+ 0x34, 0x38, 0xf8, 0x75, 0x5f, 0xa0, 0x74, 0xe2,
+ 0x99, 0xdd, 0xa6, 0x4b, 0x14, 0x50, 0x4e, 0xf1,
+ 0xbe, 0xd6, 0x9e, 0xdb, 0xb2, 0x24, 0x27, 0x74,
+ 0x12, 0x4a, 0x78, 0x78, 0x17, 0xa5, 0x58, 0x8e,
+ 0x2f, 0xf9, 0xf4, 0x8d, 0xee, 0x03, 0x88, 0xae,
+ 0xb8, 0x29, 0xa1, 0x2f, 0x4b, 0xee, 0x92, 0xbd,
+ 0x87, 0xb3, 0xce, 0x34, 0x21, 0x57, 0x46, 0x04,
+ 0x49, 0x0c, 0x80, 0xf2, 0x01, 0x13, 0xa1, 0x55,
+ 0xb3, 0xff, 0x44, 0x30, 0x3c, 0x1c, 0xd0, 0xef,
+ 0xbc, 0x18, 0x74, 0x26, 0xad, 0x41, 0x5b, 0x5b,
+ 0x3e, 0x9a, 0x7a, 0x46, 0x4f, 0x16, 0xd6, 0x74,
+ 0x5a, 0xb7, 0x3a, 0x28, 0x31, 0xd8, 0xae, 0x26,
+ 0xac, 0x50, 0x53, 0x86, 0xf2, 0x56, 0xd7, 0x3f,
+ 0x29, 0xbc, 0x45, 0x68, 0x8e, 0xcb, 0x98, 0x64,
+ 0xdd, 0xc9, 0xba, 0xb8, 0x4b, 0x7b, 0x82, 0xdd,
+ 0x14, 0xa7, 0xcb, 0x71, 0x72, 0x00, 0x5c, 0xad,
+ 0x7b, 0x6a, 0x89, 0xa4, 0x3d, 0xbf, 0xb5, 0x4b,
+ 0x3e, 0x7c, 0x5a, 0xcf, 0xb8, 0xa1, 0xc5, 0x6e,
+ 0xc8, 0xb6, 0x31, 0x57, 0x7b, 0xdf, 0xa5, 0x7e,
+ 0xb1, 0xd6, 0x42, 0x2a, 0x31, 0x36, 0xd1, 0xd0,
+ 0x3f, 0x7a, 0xe5, 0x94, 0xd6, 0x36, 0xa0, 0x6f,
+ 0xb7, 0x40, 0x7d, 0x37, 0xc6, 0x55, 0x7c, 0x50,
+ 0x40, 0x6d, 0x29, 0x89, 0xe3, 0x5a, 0xae, 0x97,
+ 0xe7, 0x44, 0x49, 0x6e, 0xbd, 0x81, 0x3d, 0x03,
+ 0x93, 0x06, 0x12, 0x06, 0xe2, 0x41, 0x12, 0x4a,
+ 0xf1, 0x6a, 0xa4, 0x58, 0xa2, 0xfb, 0xd2, 0x15,
+ 0xba, 0xc9, 0x79, 0xc9, 0xce, 0x5e, 0x13, 0xbb,
+ 0xf1, 0x09, 0x04, 0xcc, 0xfd, 0xe8, 0x51, 0x34,
+ 0x6a, 0xe8, 0x61, 0x88, 0xda, 0xed, 0x01, 0x47,
+ 0x84, 0xf5, 0x73, 0x25, 0xf9, 0x1c, 0x42, 0x86,
+ 0x07, 0xf3, 0x5b, 0x1a, 0x01, 0xb3, 0xeb, 0x24,
+ 0x32, 0x8d, 0xf6, 0xed, 0x7c, 0x4b, 0xeb, 0x3c,
+ 0x36, 0x42, 0x28, 0xdf, 0xdf, 0xb6, 0xbe, 0xd9,
+ 0x8c, 0x52, 0xd3, 0x2b, 0x08, 0x90, 0x8c, 0xe7,
+ 0x98, 0x31, 0xe2, 0x32, 0x8e, 0xfc, 0x11, 0x48,
+ 0x00, 0xa8, 0x6a, 0x42, 0x4a, 0x02, 0xc6, 0x4b,
+ 0x09, 0xf1, 0xe3, 0x49, 0xf3, 0x45, 0x1f, 0x0e,
+ 0xbc, 0x56, 0xe2, 0xe4, 0xdf, 0xfb, 0xeb, 0x61,
+ 0xfa, 0x24, 0xc1, 0x63, 0x75, 0xbb, 0x47, 0x75,
+ 0xaf, 0xe1, 0x53, 0x16, 0x96, 0x21, 0x85, 0x26,
+ 0x11, 0xb3, 0x76, 0xe3, 0x23, 0xa1, 0x6b, 0x74,
+ 0x37, 0xd0, 0xde, 0x06, 0x90, 0x71, 0x5d, 0x43,
+ 0x88, 0x9b, 0x00, 0x54, 0xa6, 0x75, 0x2f, 0xa1,
+ 0xc2, 0x0b, 0x73, 0x20, 0x1d, 0xb6, 0x21, 0x79,
+ 0x57, 0x3f, 0xfa, 0x09, 0xbe, 0x8a, 0x33, 0xc3,
+ 0x52, 0xf0, 0x1d, 0x82, 0x31, 0xd1, 0x55, 0xb5,
+ 0x6c, 0x99, 0x25, 0xcf, 0x5c, 0x32, 0xce, 0xe9,
+ 0x0d, 0xfa, 0x69, 0x2c, 0xd5, 0x0d, 0xc5, 0x6d,
+ 0x86, 0xd0, 0x0c, 0x3b, 0x06, 0x50, 0x79, 0xe8,
+ 0xc3, 0xae, 0x04, 0xe6, 0xcd, 0x51, 0xe4, 0x26,
+ 0x9b, 0x4f, 0x7e, 0xa6, 0x0f, 0xab, 0xd8, 0xe5,
+ 0xde, 0xa9, 0x00, 0x95, 0xbe, 0xa3, 0x9d, 0x5d,
+ 0xb2, 0x09, 0x70, 0x18, 0x1c, 0xf0, 0xac, 0x29,
+ 0x23, 0x02, 0x29, 0x28, 0xd2, 0x74, 0x35, 0x57,
+ 0x62, 0x0f, 0x24, 0xea, 0x5e, 0x33, 0xc2, 0x92,
+ 0xf3, 0x78, 0x4d, 0x30, 0x1e, 0xa1, 0x99, 0xa9,
+ 0x82, 0xb0, 0x42, 0x31, 0x8d, 0xad, 0x8a, 0xbc,
+ 0xfc, 0xd4, 0x57, 0x47, 0x3e, 0xb4, 0x50, 0xdd,
+ 0x6e, 0x2c, 0x80, 0x4d, 0x22, 0xf1, 0xfb, 0x57,
+ 0xc4, 0xdd, 0x17, 0xe1, 0x8a, 0x36, 0x4a, 0xb3,
+ 0x37, 0xca, 0xc9, 0x4e, 0xab, 0xd5, 0x69, 0xc4,
+ 0xf4, 0xbc, 0x0b, 0x3b, 0x44, 0x4b, 0x29, 0x9c,
+ 0xee, 0xd4, 0x35, 0x22, 0x21, 0xb0, 0x1f, 0x27,
+ 0x64, 0xa8, 0x51, 0x1b, 0xf0, 0x9f, 0x19, 0x5c,
+ 0xfb, 0x5a, 0x64, 0x74, 0x70, 0x45, 0x09, 0xf5,
+ 0x64, 0xfe, 0x1a, 0x2d, 0xc9, 0x14, 0x04, 0x14,
+ 0xcf, 0xd5, 0x7d, 0x60, 0xaf, 0x94, 0x39, 0x94,
+ 0xe2, 0x7d, 0x79, 0x82, 0xd0, 0x65, 0x3b, 0x6b,
+ 0x9c, 0x19, 0x84, 0xb4, 0x6d, 0xb3, 0x0c, 0x99,
+ 0xc0, 0x56, 0xa8, 0xbd, 0x73, 0xce, 0x05, 0x84,
+ 0x3e, 0x30, 0xaa, 0xc4, 0x9b, 0x1b, 0x04, 0x2a,
+ 0x9f, 0xd7, 0x43, 0x2b, 0x23, 0xdf, 0xbf, 0xaa,
+ 0xd5, 0xc2, 0x43, 0x2d, 0x70, 0xab, 0xdc, 0x75,
+ 0xad, 0xac, 0xf7, 0xc0, 0xbe, 0x67, 0xb2, 0x74,
+ 0xed, 0x67, 0x10, 0x4a, 0x92, 0x60, 0xc1, 0x40,
+ 0x50, 0x19, 0x8a, 0x8a, 0x8c, 0x09, 0x0e, 0x72,
+ 0xe1, 0x73, 0x5e, 0xe8, 0x41, 0x85, 0x63, 0x9f,
+ 0x3f, 0xd7, 0x7d, 0xc4, 0xfb, 0x22, 0x5d, 0x92,
+ 0x6c, 0xb3, 0x1e, 0xe2, 0x50, 0x2f, 0x82, 0xa8,
+ 0x28, 0xc0, 0xb5, 0xd7, 0x5f, 0x68, 0x0d, 0x2c,
+ 0x2d, 0xaf, 0x7e, 0xfa, 0x2e, 0x08, 0x0f, 0x1f,
+ 0x70, 0x9f, 0xe9, 0x19, 0x72, 0x55, 0xf8, 0xfb,
+ 0x51, 0xd2, 0x33, 0x5d, 0xa0, 0xd3, 0x2b, 0x0a,
+ 0x6c, 0xbc, 0x4e, 0xcf, 0x36, 0x4d, 0xdc, 0x3b,
+ 0xe9, 0x3e, 0x81, 0x7c, 0x61, 0xdb, 0x20, 0x2d,
+ 0x3a, 0xc3, 0xb3, 0x0c, 0x1e, 0x00, 0xb9, 0x7c,
+ 0xf5, 0xca, 0x10, 0x5f, 0x3a, 0x71, 0xb3, 0xe4,
+ 0x20, 0xdb, 0x0c, 0x2a, 0x98, 0x63, 0x45, 0x00,
+ 0x58, 0xf6, 0x68, 0xe4, 0x0b, 0xda, 0x13, 0x3b,
+ 0x60, 0x5c, 0x76, 0xdb, 0xb9, 0x97, 0x71, 0xe4,
+ 0xd9, 0xb7, 0xdb, 0xbd, 0x68, 0xc7, 0x84, 0x84,
+ 0xaa, 0x7c, 0x68, 0x62, 0x5e, 0x16, 0xfc, 0xba,
+ 0x72, 0xaa, 0x9a, 0xa9, 0xeb, 0x7c, 0x75, 0x47,
+ 0x97, 0x7e, 0xad, 0xe2, 0xd9, 0x91, 0xe8, 0xe4,
+ 0xa5, 0x31, 0xd7, 0x01, 0x8e, 0xa2, 0x11, 0x88,
+ 0x95, 0xb9, 0xf2, 0x9b, 0xd3, 0x7f, 0x1b, 0x81,
+ 0x22, 0xf7, 0x98, 0x60, 0x0a, 0x64, 0xa6, 0xc1,
+ 0xf6, 0x49, 0xc7, 0xe3, 0x07, 0x4d, 0x94, 0x7a,
+ 0xcf, 0x6e, 0x68, 0x0c, 0x1b, 0x3f, 0x6e, 0x2e,
+ 0xee, 0x92, 0xfa, 0x52, 0xb3, 0x59, 0xf8, 0xf1,
+ 0x8f, 0x6a, 0x66, 0xa3, 0x82, 0x76, 0x4a, 0x07,
+ 0x1a, 0xc7, 0xdd, 0xf5, 0xda, 0x9c, 0x3c, 0x24,
+ 0xbf, 0xfd, 0x42, 0xa1, 0x10, 0x64, 0x6a, 0x0f,
+ 0x89, 0xee, 0x36, 0xa5, 0xce, 0x99, 0x48, 0x6a,
+ 0xf0, 0x9f, 0x9e, 0x69, 0xa4, 0x40, 0x20, 0xe9,
+ 0x16, 0x15, 0xf7, 0xdb, 0x75, 0x02, 0xcb, 0xe9,
+ 0x73, 0x8b, 0x3b, 0x49, 0x2f, 0xf0, 0xaf, 0x51,
+ 0x06, 0x5c, 0xdf, 0x27, 0x27, 0x49, 0x6a, 0xd1,
+ 0xcc, 0xc7, 0xb5, 0x63, 0xb5, 0xfc, 0xb8, 0x5c,
+ 0x87, 0x7f, 0x84, 0xb4, 0xcc, 0x14, 0xa9, 0x53,
+ 0xda, 0xa4, 0x56, 0xf8, 0xb6, 0x1b, 0xcc, 0x40,
+ 0x27, 0x52, 0x06, 0x5a, 0x13, 0x81, 0xd7, 0x3a,
+ 0xd4, 0x3b, 0xfb, 0x49, 0x65, 0x31, 0x33, 0xb2,
+ 0xfa, 0xcd, 0xad, 0x58, 0x4e, 0x2b, 0xae, 0xd2,
+ 0x20, 0xfb, 0x1a, 0x48, 0xb4, 0x3f, 0x9a, 0xd8,
+ 0x7a, 0x35, 0x4a, 0xc8, 0xee, 0x88, 0x5e, 0x07,
+ 0x66, 0x54, 0xb9, 0xec, 0x9f, 0xa3, 0xe3, 0xb9,
+ 0x37, 0xaa, 0x49, 0x76, 0x31, 0xda, 0x74, 0x2d,
+ 0x3c, 0xa4, 0x65, 0x10, 0x32, 0x38, 0xf0, 0xde,
+ 0xd3, 0x99, 0x17, 0xaa, 0x71, 0xaa, 0x8f, 0x0f,
+ 0x8c, 0xaf, 0xa2, 0xf8, 0x5d, 0x64, 0xba, 0x1d,
+ 0xa3, 0xef, 0x96, 0x73, 0xe8, 0xa1, 0x02, 0x8d,
+ 0x0c, 0x6d, 0xb8, 0x06, 0x90, 0xb8, 0x08, 0x56,
+ 0x2c, 0xa7, 0x06, 0xc9, 0xc2, 0x38, 0xdb, 0x7c,
+ 0x63, 0xb1, 0x57, 0x8e, 0xea, 0x7c, 0x79, 0xf3,
+ 0x49, 0x1d, 0xfe, 0x9f, 0xf3, 0x6e, 0xb1, 0x1d,
+ 0xba, 0x19, 0x80, 0x1a, 0x0a, 0xd3, 0xb0, 0x26,
+ 0x21, 0x40, 0xb1, 0x7c, 0xf9, 0x4d, 0x8d, 0x10,
+ 0xc1, 0x7e, 0xf4, 0xf6, 0x3c, 0xa8, 0xfd, 0x7c,
+ 0xa3, 0x92, 0xb2, 0x0f, 0xaa, 0xcc, 0xa6, 0x11,
+ 0xfe, 0x04, 0xe3, 0xd1, 0x7a, 0x32, 0x89, 0xdf,
+ 0x0d, 0xc4, 0x8f, 0x79, 0x6b, 0xca, 0x16, 0x7c,
+ 0x6e, 0xf9, 0xad, 0x0f, 0xf6, 0xfe, 0x27, 0xdb,
+ 0xc4, 0x13, 0x70, 0xf1, 0x62, 0x1a, 0x4f, 0x79,
+ 0x40, 0xc9, 0x9b, 0x8b, 0x21, 0xea, 0x84, 0xfa,
+ 0xf5, 0xf1, 0x89, 0xce, 0xb7, 0x55, 0x0a, 0x80,
+ 0x39, 0x2f, 0x55, 0x36, 0x16, 0x9c, 0x7b, 0x08,
+ 0xbd, 0x87, 0x0d, 0xa5, 0x32, 0xf1, 0x52, 0x7c,
+ 0xe8, 0x55, 0x60, 0x5b, 0xd7, 0x69, 0xe4, 0xfc,
+ 0xfa, 0x12, 0x85, 0x96, 0xea, 0x50, 0x28, 0xab,
+ 0x8a, 0xf7, 0xbb, 0x0e, 0x53, 0x74, 0xca, 0xa6,
+ 0x27, 0x09, 0xc2, 0xb5, 0xde, 0x18, 0x14, 0xd9,
+ 0xea, 0xe5, 0x29, 0x1c, 0x40, 0x56, 0xcf, 0xd7,
+ 0xae, 0x05, 0x3f, 0x65, 0xaf, 0x05, 0x73, 0xe2,
+ 0x35, 0x96, 0x27, 0x07, 0x14, 0xc0, 0xad, 0x33,
+ 0xf1, 0xdc, 0x44, 0x7a, 0x89, 0x17, 0x77, 0xd2,
+ 0x9c, 0x58, 0x60, 0xf0, 0x3f, 0x7b, 0x2d, 0x2e,
+ 0x57, 0x95, 0x54, 0x87, 0xed, 0xf2, 0xc7, 0x4c,
+ 0xf0, 0xae, 0x56, 0x29, 0x19, 0x7d, 0x66, 0x4b,
+ 0x9b, 0x83, 0x84, 0x42, 0x3b, 0x01, 0x25, 0x66,
+ 0x8e, 0x02, 0xde, 0xb9, 0x83, 0x54, 0x19, 0xf6,
+ 0x9f, 0x79, 0x0d, 0x67, 0xc5, 0x1d, 0x7a, 0x44,
+ 0x02, 0x98, 0xa7, 0x16, 0x1c, 0x29, 0x0d, 0x74,
+ 0xff, 0x85, 0x40, 0x06, 0xef, 0x2c, 0xa9, 0xc6,
+ 0xf5, 0x53, 0x07, 0x06, 0xae, 0xe4, 0xfa, 0x5f,
+ 0xd8, 0x39, 0x4d, 0xf1, 0x9b, 0x6b, 0xd9, 0x24,
+ 0x84, 0xfe, 0x03, 0x4c, 0xb2, 0x3f, 0xdf, 0xa1,
+ 0x05, 0x9e, 0x50, 0x14, 0x5a, 0xd9, 0x1a, 0xa2,
+ 0xa7, 0xfa, 0xfa, 0x17, 0xf7, 0x78, 0xd6, 0xb5,
+ 0x92, 0x61, 0x91, 0xac, 0x36, 0xfa, 0x56, 0x0d,
+ 0x38, 0x32, 0x18, 0x85, 0x08, 0x58, 0x37, 0xf0,
+ 0x4b, 0xdb, 0x59, 0xe7, 0xa4, 0x34, 0xc0, 0x1b,
+ 0x01, 0xaf, 0x2d, 0xde, 0xa1, 0xaa, 0x5d, 0xd3,
+ 0xec, 0xe1, 0xd4, 0xf7, 0xe6, 0x54, 0x68, 0xf0,
+ 0x51, 0x97, 0xa7, 0x89, 0xea, 0x24, 0xad, 0xd3,
+ 0x6e, 0x47, 0x93, 0x8b, 0x4b, 0xb4, 0xf7, 0x1c,
+ 0x42, 0x06, 0x67, 0xe8, 0x99, 0xf6, 0xf5, 0x7b,
+ 0x85, 0xb5, 0x65, 0xb5, 0xb5, 0xd2, 0x37, 0xf5,
+ 0xf3, 0x02, 0xa6, 0x4d, 0x11, 0xa7, 0xdc, 0x51,
+ 0x09, 0x7f, 0xa0, 0xd8, 0x88, 0x1c, 0x13, 0x71,
+ 0xae, 0x9c, 0xb7, 0x7b, 0x34, 0xd6, 0x4e, 0x68,
+ 0x26, 0x83, 0x51, 0xaf, 0x1d, 0xee, 0x8b, 0xbb,
+ 0x69, 0x43, 0x2b, 0x9e, 0x8a, 0xbc, 0x02, 0x0e,
+ 0xa0, 0x1b, 0xe0, 0xa8, 0x5f, 0x6f, 0xaf, 0x1b,
+ 0x8f, 0xe7, 0x64, 0x71, 0x74, 0x11, 0x7e, 0xa8,
+ 0xd8, 0xf9, 0x97, 0x06, 0xc3, 0xb6, 0xfb, 0xfb,
+ 0xb7, 0x3d, 0x35, 0x9d, 0x3b, 0x52, 0xed, 0x54,
+ 0xca, 0xf4, 0x81, 0x01, 0x2d, 0x1b, 0xc3, 0xa7,
+ 0x00, 0x3d, 0x1a, 0x39, 0x54, 0xe1, 0xf6, 0xff,
+ 0xed, 0x6f, 0x0b, 0x5a, 0x68, 0xda, 0x58, 0xdd,
+ 0xa9, 0xcf, 0x5c, 0x4a, 0xe5, 0x09, 0x4e, 0xde,
+ 0x9d, 0xbc, 0x3e, 0xee, 0x5a, 0x00, 0x3b, 0x2c,
+ 0x87, 0x10, 0x65, 0x60, 0xdd, 0xd7, 0x56, 0xd1,
+ 0x4c, 0x64, 0x45, 0xe4, 0x21, 0xec, 0x78, 0xf8,
+ 0x25, 0x7a, 0x3e, 0x16, 0x5d, 0x09, 0x53, 0x14,
+ 0xbe, 0x4f, 0xae, 0x87, 0xd8, 0xd1, 0xaa, 0x3c,
+ 0xf6, 0x3e, 0xa4, 0x70, 0x8c, 0x5e, 0x70, 0xa4,
+ 0xb3, 0x6b, 0x66, 0x73, 0xd3, 0xbf, 0x31, 0x06,
+ 0x19, 0x62, 0x93, 0x15, 0xf2, 0x86, 0xe4, 0x52,
+ 0x7e, 0x53, 0x4c, 0x12, 0x38, 0xcc, 0x34, 0x7d,
+ 0x57, 0xf6, 0x42, 0x93, 0x8a, 0xc4, 0xee, 0x5c,
+ 0x8a, 0xe1, 0x52, 0x8f, 0x56, 0x64, 0xf6, 0xa6,
+ 0xd1, 0x91, 0x57, 0x70, 0xcd, 0x11, 0x76, 0xf5,
+ 0x59, 0x60, 0x60, 0x3c, 0xc1, 0xc3, 0x0b, 0x7f,
+ 0x58, 0x1a, 0x50, 0x91, 0xf1, 0x68, 0x8f, 0x6e,
+ 0x74, 0x74, 0xa8, 0x51, 0x0b, 0xf7, 0x7a, 0x98,
+ 0x37, 0xf2, 0x0a, 0x0e, 0xa4, 0x97, 0x04, 0xb8,
+ 0x9b, 0xfd, 0xa0, 0xea, 0xf7, 0x0d, 0xe1, 0xdb,
+ 0x03, 0xf0, 0x31, 0x29, 0xf8, 0xdd, 0x6b, 0x8b,
+ 0x5d, 0xd8, 0x59, 0xa9, 0x29, 0xcf, 0x9a, 0x79,
+ 0x89, 0x19, 0x63, 0x46, 0x09, 0x79, 0x6a, 0x11,
+ 0xda, 0x63, 0x68, 0x48, 0x77, 0x23, 0xfb, 0x7d,
+ 0x3a, 0x43, 0xcb, 0x02, 0x3b, 0x7a, 0x6d, 0x10,
+ 0x2a, 0x9e, 0xac, 0xf1, 0xd4, 0x19, 0xf8, 0x23,
+ 0x64, 0x1d, 0x2c, 0x5f, 0xf2, 0xb0, 0x5c, 0x23,
+ 0x27, 0xf7, 0x27, 0x30, 0x16, 0x37, 0xb1, 0x90,
+ 0xab, 0x38, 0xfb, 0x55, 0xcd, 0x78, 0x58, 0xd4,
+ 0x7d, 0x43, 0xf6, 0x45, 0x5e, 0x55, 0x8d, 0xb1,
+ 0x02, 0x65, 0x58, 0xb4, 0x13, 0x4b, 0x36, 0xf7,
+ 0xcc, 0xfe, 0x3d, 0x0b, 0x82, 0xe2, 0x12, 0x11,
+ 0xbb, 0xe6, 0xb8, 0x3a, 0x48, 0x71, 0xc7, 0x50,
+ 0x06, 0x16, 0x3a, 0xe6, 0x7c, 0x05, 0xc7, 0xc8,
+ 0x4d, 0x2f, 0x08, 0x6a, 0x17, 0x9a, 0x95, 0x97,
+ 0x50, 0x68, 0xdc, 0x28, 0x18, 0xc4, 0x61, 0x38,
+ 0xb9, 0xe0, 0x3e, 0x78, 0xdb, 0x29, 0xe0, 0x9f,
+ 0x52, 0xdd, 0xf8, 0x4f, 0x91, 0xc1, 0xd0, 0x33,
+ 0xa1, 0x7a, 0x8e, 0x30, 0x13, 0x82, 0x07, 0x9f,
+ 0xd3, 0x31, 0x0f, 0x23, 0xbe, 0x32, 0x5a, 0x75,
+ 0xcf, 0x96, 0xb2, 0xec, 0xb5, 0x32, 0xac, 0x21,
+ 0xd1, 0x82, 0x33, 0xd3, 0x15, 0x74, 0xbd, 0x90,
+ 0xf1, 0x2c, 0xe6, 0x5f, 0x8d, 0xe3, 0x02, 0xe8,
+ 0xe9, 0xc4, 0xca, 0x96, 0xeb, 0x0e, 0xbc, 0x91,
+ 0xf4, 0xb9, 0xea, 0xd9, 0x1b, 0x75, 0xbd, 0xe1,
+ 0xac, 0x2a, 0x05, 0x37, 0x52, 0x9b, 0x1b, 0x3f,
+ 0x5a, 0xdc, 0x21, 0xc3, 0x98, 0xbb, 0xaf, 0xa3,
+ 0xf2, 0x00, 0xbf, 0x0d, 0x30, 0x89, 0x05, 0xcc,
+ 0xa5, 0x76, 0xf5, 0x06, 0xf0, 0xc6, 0x54, 0x8a,
+ 0x5d, 0xd4, 0x1e, 0xc1, 0xf2, 0xce, 0xb0, 0x62,
+ 0xc8, 0xfc, 0x59, 0x42, 0x9a, 0x90, 0x60, 0x55,
+ 0xfe, 0x88, 0xa5, 0x8b, 0xb8, 0x33, 0x0c, 0x23,
+ 0x24, 0x0d, 0x15, 0x70, 0x37, 0x1e, 0x3d, 0xf6,
+ 0xd2, 0xea, 0x92, 0x10, 0xb2, 0xc4, 0x51, 0xac,
+ 0xf2, 0xac, 0xf3, 0x6b, 0x6c, 0xaa, 0xcf, 0x12,
+ 0xc5, 0x6c, 0x90, 0x50, 0xb5, 0x0c, 0xfc, 0x1a,
+ 0x15, 0x52, 0xe9, 0x26, 0xc6, 0x52, 0xa4, 0xe7,
+ 0x81, 0x69, 0xe1, 0xe7, 0x9e, 0x30, 0x01, 0xec,
+ 0x84, 0x89, 0xb2, 0x0d, 0x66, 0xdd, 0xce, 0x28,
+ 0x5c, 0xec, 0x98, 0x46, 0x68, 0x21, 0x9f, 0x88,
+ 0x3f, 0x1f, 0x42, 0x77, 0xce, 0xd0, 0x61, 0xd4,
+ 0x20, 0xa7, 0xff, 0x53, 0xad, 0x37, 0xd0, 0x17,
+ 0x35, 0xc9, 0xfc, 0xba, 0x0a, 0x78, 0x3f, 0xf2,
+ 0xcc, 0x86, 0x89, 0xe8, 0x4b, 0x3c, 0x48, 0x33,
+ 0x09, 0x7f, 0xc6, 0xc0, 0xdd, 0xb8, 0xfd, 0x7a,
+ 0x66, 0x66, 0x65, 0xeb, 0x47, 0xa7, 0x04, 0x28,
+ 0xa3, 0x19, 0x8e, 0xa9, 0xb1, 0x13, 0x67, 0x62,
+ 0x70, 0xcf, 0xd7
+};
+static const u8 dec_output013[] __initconst = {
+ 0x74, 0xa6, 0x3e, 0xe4, 0xb1, 0xcb, 0xaf, 0xb0,
+ 0x40, 0xe5, 0x0f, 0x9e, 0xf1, 0xf2, 0x89, 0xb5,
+ 0x42, 0x34, 0x8a, 0xa1, 0x03, 0xb7, 0xe9, 0x57,
+ 0x46, 0xbe, 0x20, 0xe4, 0x6e, 0xb0, 0xeb, 0xff,
+ 0xea, 0x07, 0x7e, 0xef, 0xe2, 0x55, 0x9f, 0xe5,
+ 0x78, 0x3a, 0xb7, 0x83, 0xc2, 0x18, 0x40, 0x7b,
+ 0xeb, 0xcd, 0x81, 0xfb, 0x90, 0x12, 0x9e, 0x46,
+ 0xa9, 0xd6, 0x4a, 0xba, 0xb0, 0x62, 0xdb, 0x6b,
+ 0x99, 0xc4, 0xdb, 0x54, 0x4b, 0xb8, 0xa5, 0x71,
+ 0xcb, 0xcd, 0x63, 0x32, 0x55, 0xfb, 0x31, 0xf0,
+ 0x38, 0xf5, 0xbe, 0x78, 0xe4, 0x45, 0xce, 0x1b,
+ 0x6a, 0x5b, 0x0e, 0xf4, 0x16, 0xe4, 0xb1, 0x3d,
+ 0xf6, 0x63, 0x7b, 0xa7, 0x0c, 0xde, 0x6f, 0x8f,
+ 0x74, 0xdf, 0xe0, 0x1e, 0x9d, 0xce, 0x8f, 0x24,
+ 0xef, 0x23, 0x35, 0x33, 0x7b, 0x83, 0x34, 0x23,
+ 0x58, 0x74, 0x14, 0x77, 0x1f, 0xc2, 0x4f, 0x4e,
+ 0xc6, 0x89, 0xf9, 0x52, 0x09, 0x37, 0x64, 0x14,
+ 0xc4, 0x01, 0x6b, 0x9d, 0x77, 0xe8, 0x90, 0x5d,
+ 0xa8, 0x4a, 0x2a, 0xef, 0x5c, 0x7f, 0xeb, 0xbb,
+ 0xb2, 0xc6, 0x93, 0x99, 0x66, 0xdc, 0x7f, 0xd4,
+ 0x9e, 0x2a, 0xca, 0x8d, 0xdb, 0xe7, 0x20, 0xcf,
+ 0xe4, 0x73, 0xae, 0x49, 0x7d, 0x64, 0x0f, 0x0e,
+ 0x28, 0x46, 0xa9, 0xa8, 0x32, 0xe4, 0x0e, 0xf6,
+ 0x51, 0x53, 0xb8, 0x3c, 0xb1, 0xff, 0xa3, 0x33,
+ 0x41, 0x75, 0xff, 0xf1, 0x6f, 0xf1, 0xfb, 0xbb,
+ 0x83, 0x7f, 0x06, 0x9b, 0xe7, 0x1b, 0x0a, 0xe0,
+ 0x5c, 0x33, 0x60, 0x5b, 0xdb, 0x5b, 0xed, 0xfe,
+ 0xa5, 0x16, 0x19, 0x72, 0xa3, 0x64, 0x23, 0x00,
+ 0x02, 0xc7, 0xf3, 0x6a, 0x81, 0x3e, 0x44, 0x1d,
+ 0x79, 0x15, 0x5f, 0x9a, 0xde, 0xe2, 0xfd, 0x1b,
+ 0x73, 0xc1, 0xbc, 0x23, 0xba, 0x31, 0xd2, 0x50,
+ 0xd5, 0xad, 0x7f, 0x74, 0xa7, 0xc9, 0xf8, 0x3e,
+ 0x2b, 0x26, 0x10, 0xf6, 0x03, 0x36, 0x74, 0xe4,
+ 0x0e, 0x6a, 0x72, 0xb7, 0x73, 0x0a, 0x42, 0x28,
+ 0xc2, 0xad, 0x5e, 0x03, 0xbe, 0xb8, 0x0b, 0xa8,
+ 0x5b, 0xd4, 0xb8, 0xba, 0x52, 0x89, 0xb1, 0x9b,
+ 0xc1, 0xc3, 0x65, 0x87, 0xed, 0xa5, 0xf4, 0x86,
+ 0xfd, 0x41, 0x80, 0x91, 0x27, 0x59, 0x53, 0x67,
+ 0x15, 0x78, 0x54, 0x8b, 0x2d, 0x3d, 0xc7, 0xff,
+ 0x02, 0x92, 0x07, 0x5f, 0x7a, 0x4b, 0x60, 0x59,
+ 0x3c, 0x6f, 0x5c, 0xd8, 0xec, 0x95, 0xd2, 0xfe,
+ 0xa0, 0x3b, 0xd8, 0x3f, 0xd1, 0x69, 0xa6, 0xd6,
+ 0x41, 0xb2, 0xf4, 0x4d, 0x12, 0xf4, 0x58, 0x3e,
+ 0x66, 0x64, 0x80, 0x31, 0x9b, 0xa8, 0x4c, 0x8b,
+ 0x07, 0xb2, 0xec, 0x66, 0x94, 0x66, 0x47, 0x50,
+ 0x50, 0x5f, 0x18, 0x0b, 0x0e, 0xd6, 0xc0, 0x39,
+ 0x21, 0x13, 0x9e, 0x33, 0xbc, 0x79, 0x36, 0x02,
+ 0x96, 0x70, 0xf0, 0x48, 0x67, 0x2f, 0x26, 0xe9,
+ 0x6d, 0x10, 0xbb, 0xd6, 0x3f, 0xd1, 0x64, 0x7a,
+ 0x2e, 0xbe, 0x0c, 0x61, 0xf0, 0x75, 0x42, 0x38,
+ 0x23, 0xb1, 0x9e, 0x9f, 0x7c, 0x67, 0x66, 0xd9,
+ 0x58, 0x9a, 0xf1, 0xbb, 0x41, 0x2a, 0x8d, 0x65,
+ 0x84, 0x94, 0xfc, 0xdc, 0x6a, 0x50, 0x64, 0xdb,
+ 0x56, 0x33, 0x76, 0x00, 0x10, 0xed, 0xbe, 0xd2,
+ 0x12, 0xf6, 0xf6, 0x1b, 0xa2, 0x16, 0xde, 0xae,
+ 0x31, 0x95, 0xdd, 0xb1, 0x08, 0x7e, 0x4e, 0xee,
+ 0xe7, 0xf9, 0xa5, 0xfb, 0x5b, 0x61, 0x43, 0x00,
+ 0x40, 0xf6, 0x7e, 0x02, 0x04, 0x32, 0x4e, 0x0c,
+ 0xe2, 0x66, 0x0d, 0xd7, 0x07, 0x98, 0x0e, 0xf8,
+ 0x72, 0x34, 0x6d, 0x95, 0x86, 0xd7, 0xcb, 0x31,
+ 0x54, 0x47, 0xd0, 0x38, 0x29, 0x9c, 0x5a, 0x68,
+ 0xd4, 0x87, 0x76, 0xc9, 0xe7, 0x7e, 0xe3, 0xf4,
+ 0x81, 0x6d, 0x18, 0xcb, 0xc9, 0x05, 0xaf, 0xa0,
+ 0xfb, 0x66, 0xf7, 0xf1, 0x1c, 0xc6, 0x14, 0x11,
+ 0x4f, 0x2b, 0x79, 0x42, 0x8b, 0xbc, 0xac, 0xe7,
+ 0x6c, 0xfe, 0x0f, 0x58, 0xe7, 0x7c, 0x78, 0x39,
+ 0x30, 0xb0, 0x66, 0x2c, 0x9b, 0x6d, 0x3a, 0xe1,
+ 0xcf, 0xc9, 0xa4, 0x0e, 0x6d, 0x6d, 0x8a, 0xa1,
+ 0x3a, 0xe7, 0x28, 0xd4, 0x78, 0x4c, 0xa6, 0xa2,
+ 0x2a, 0xa6, 0x03, 0x30, 0xd7, 0xa8, 0x25, 0x66,
+ 0x87, 0x2f, 0x69, 0x5c, 0x4e, 0xdd, 0xa5, 0x49,
+ 0x5d, 0x37, 0x4a, 0x59, 0xc4, 0xaf, 0x1f, 0xa2,
+ 0xe4, 0xf8, 0xa6, 0x12, 0x97, 0xd5, 0x79, 0xf5,
+ 0xe2, 0x4a, 0x2b, 0x5f, 0x61, 0xe4, 0x9e, 0xe3,
+ 0xee, 0xb8, 0xa7, 0x5b, 0x2f, 0xf4, 0x9e, 0x6c,
+ 0xfb, 0xd1, 0xc6, 0x56, 0x77, 0xba, 0x75, 0xaa,
+ 0x3d, 0x1a, 0xa8, 0x0b, 0xb3, 0x68, 0x24, 0x00,
+ 0x10, 0x7f, 0xfd, 0xd7, 0xa1, 0x8d, 0x83, 0x54,
+ 0x4f, 0x1f, 0xd8, 0x2a, 0xbe, 0x8a, 0x0c, 0x87,
+ 0xab, 0xa2, 0xde, 0xc3, 0x39, 0xbf, 0x09, 0x03,
+ 0xa5, 0xf3, 0x05, 0x28, 0xe1, 0xe1, 0xee, 0x39,
+ 0x70, 0x9c, 0xd8, 0x81, 0x12, 0x1e, 0x02, 0x40,
+ 0xd2, 0x6e, 0xf0, 0xeb, 0x1b, 0x3d, 0x22, 0xc6,
+ 0xe5, 0xe3, 0xb4, 0x5a, 0x98, 0xbb, 0xf0, 0x22,
+ 0x28, 0x8d, 0xe5, 0xd3, 0x16, 0x48, 0x24, 0xa5,
+ 0xe6, 0x66, 0x0c, 0xf9, 0x08, 0xf9, 0x7e, 0x1e,
+ 0xe1, 0x28, 0x26, 0x22, 0xc7, 0xc7, 0x0a, 0x32,
+ 0x47, 0xfa, 0xa3, 0xbe, 0x3c, 0xc4, 0xc5, 0x53,
+ 0x0a, 0xd5, 0x94, 0x4a, 0xd7, 0x93, 0xd8, 0x42,
+ 0x99, 0xb9, 0x0a, 0xdb, 0x56, 0xf7, 0xb9, 0x1c,
+ 0x53, 0x4f, 0xfa, 0xd3, 0x74, 0xad, 0xd9, 0x68,
+ 0xf1, 0x1b, 0xdf, 0x61, 0xc6, 0x5e, 0xa8, 0x48,
+ 0xfc, 0xd4, 0x4a, 0x4c, 0x3c, 0x32, 0xf7, 0x1c,
+ 0x96, 0x21, 0x9b, 0xf9, 0xa3, 0xcc, 0x5a, 0xce,
+ 0xd5, 0xd7, 0x08, 0x24, 0xf6, 0x1c, 0xfd, 0xdd,
+ 0x38, 0xc2, 0x32, 0xe9, 0xb8, 0xe7, 0xb6, 0xfa,
+ 0x9d, 0x45, 0x13, 0x2c, 0x83, 0xfd, 0x4a, 0x69,
+ 0x82, 0xcd, 0xdc, 0xb3, 0x76, 0x0c, 0x9e, 0xd8,
+ 0xf4, 0x1b, 0x45, 0x15, 0xb4, 0x97, 0xe7, 0x58,
+ 0x34, 0xe2, 0x03, 0x29, 0x5a, 0xbf, 0xb6, 0xe0,
+ 0x5d, 0x13, 0xd9, 0x2b, 0xb4, 0x80, 0xb2, 0x45,
+ 0x81, 0x6a, 0x2e, 0x6c, 0x89, 0x7d, 0xee, 0xbb,
+ 0x52, 0xdd, 0x1f, 0x18, 0xe7, 0x13, 0x6b, 0x33,
+ 0x0e, 0xea, 0x36, 0x92, 0x77, 0x7b, 0x6d, 0x9c,
+ 0x5a, 0x5f, 0x45, 0x7b, 0x7b, 0x35, 0x62, 0x23,
+ 0xd1, 0xbf, 0x0f, 0xd0, 0x08, 0x1b, 0x2b, 0x80,
+ 0x6b, 0x7e, 0xf1, 0x21, 0x47, 0xb0, 0x57, 0xd1,
+ 0x98, 0x72, 0x90, 0x34, 0x1c, 0x20, 0x04, 0xff,
+ 0x3d, 0x5c, 0xee, 0x0e, 0x57, 0x5f, 0x6f, 0x24,
+ 0x4e, 0x3c, 0xea, 0xfc, 0xa5, 0xa9, 0x83, 0xc9,
+ 0x61, 0xb4, 0x51, 0x24, 0xf8, 0x27, 0x5e, 0x46,
+ 0x8c, 0xb1, 0x53, 0x02, 0x96, 0x35, 0xba, 0xb8,
+ 0x4c, 0x71, 0xd3, 0x15, 0x59, 0x35, 0x22, 0x20,
+ 0xad, 0x03, 0x9f, 0x66, 0x44, 0x3b, 0x9c, 0x35,
+ 0x37, 0x1f, 0x9b, 0xbb, 0xf3, 0xdb, 0x35, 0x63,
+ 0x30, 0x64, 0xaa, 0xa2, 0x06, 0xa8, 0x5d, 0xbb,
+ 0xe1, 0x9f, 0x70, 0xec, 0x82, 0x11, 0x06, 0x36,
+ 0xec, 0x8b, 0x69, 0x66, 0x24, 0x44, 0xc9, 0x4a,
+ 0x57, 0xbb, 0x9b, 0x78, 0x13, 0xce, 0x9c, 0x0c,
+ 0xba, 0x92, 0x93, 0x63, 0xb8, 0xe2, 0x95, 0x0f,
+ 0x0f, 0x16, 0x39, 0x52, 0xfd, 0x3a, 0x6d, 0x02,
+ 0x4b, 0xdf, 0x13, 0xd3, 0x2a, 0x22, 0xb4, 0x03,
+ 0x7c, 0x54, 0x49, 0x96, 0x68, 0x54, 0x10, 0xfa,
+ 0xef, 0xaa, 0x6c, 0xe8, 0x22, 0xdc, 0x71, 0x16,
+ 0x13, 0x1a, 0xf6, 0x28, 0xe5, 0x6d, 0x77, 0x3d,
+ 0xcd, 0x30, 0x63, 0xb1, 0x70, 0x52, 0xa1, 0xc5,
+ 0x94, 0x5f, 0xcf, 0xe8, 0xb8, 0x26, 0x98, 0xf7,
+ 0x06, 0xa0, 0x0a, 0x70, 0xfa, 0x03, 0x80, 0xac,
+ 0xc1, 0xec, 0xd6, 0x4c, 0x54, 0xd7, 0xfe, 0x47,
+ 0xb6, 0x88, 0x4a, 0xf7, 0x71, 0x24, 0xee, 0xf3,
+ 0xd2, 0xc2, 0x4a, 0x7f, 0xfe, 0x61, 0xc7, 0x35,
+ 0xc9, 0x37, 0x67, 0xcb, 0x24, 0x35, 0xda, 0x7e,
+ 0xca, 0x5f, 0xf3, 0x8d, 0xd4, 0x13, 0x8e, 0xd6,
+ 0xcb, 0x4d, 0x53, 0x8f, 0x53, 0x1f, 0xc0, 0x74,
+ 0xf7, 0x53, 0xb9, 0x5e, 0x23, 0x37, 0xba, 0x6e,
+ 0xe3, 0x9d, 0x07, 0x55, 0x25, 0x7b, 0xe6, 0x2a,
+ 0x64, 0xd1, 0x32, 0xdd, 0x54, 0x1b, 0x4b, 0xc0,
+ 0xe1, 0xd7, 0x69, 0x58, 0xf8, 0x93, 0x29, 0xc4,
+ 0xdd, 0x23, 0x2f, 0xa5, 0xfc, 0x9d, 0x7e, 0xf8,
+ 0xd4, 0x90, 0xcd, 0x82, 0x55, 0xdc, 0x16, 0x16,
+ 0x9f, 0x07, 0x52, 0x9b, 0x9d, 0x25, 0xed, 0x32,
+ 0xc5, 0x7b, 0xdf, 0xf6, 0x83, 0x46, 0x3d, 0x65,
+ 0xb7, 0xef, 0x87, 0x7a, 0x12, 0x69, 0x8f, 0x06,
+ 0x7c, 0x51, 0x15, 0x4a, 0x08, 0xe8, 0xac, 0x9a,
+ 0x0c, 0x24, 0xa7, 0x27, 0xd8, 0x46, 0x2f, 0xe7,
+ 0x01, 0x0e, 0x1c, 0xc6, 0x91, 0xb0, 0x6e, 0x85,
+ 0x65, 0xf0, 0x29, 0x0d, 0x2e, 0x6b, 0x3b, 0xfb,
+ 0x4b, 0xdf, 0xe4, 0x80, 0x93, 0x03, 0x66, 0x46,
+ 0x3e, 0x8a, 0x6e, 0xf3, 0x5e, 0x4d, 0x62, 0x0e,
+ 0x49, 0x05, 0xaf, 0xd4, 0xf8, 0x21, 0x20, 0x61,
+ 0x1d, 0x39, 0x17, 0xf4, 0x61, 0x47, 0x95, 0xfb,
+ 0x15, 0x2e, 0xb3, 0x4f, 0xd0, 0x5d, 0xf5, 0x7d,
+ 0x40, 0xda, 0x90, 0x3c, 0x6b, 0xcb, 0x17, 0x00,
+ 0x13, 0x3b, 0x64, 0x34, 0x1b, 0xf0, 0xf2, 0xe5,
+ 0x3b, 0xb2, 0xc7, 0xd3, 0x5f, 0x3a, 0x44, 0xa6,
+ 0x9b, 0xb7, 0x78, 0x0e, 0x42, 0x5d, 0x4c, 0xc1,
+ 0xe9, 0xd2, 0xcb, 0xb7, 0x78, 0xd1, 0xfe, 0x9a,
+ 0xb5, 0x07, 0xe9, 0xe0, 0xbe, 0xe2, 0x8a, 0xa7,
+ 0x01, 0x83, 0x00, 0x8c, 0x5c, 0x08, 0xe6, 0x63,
+ 0x12, 0x92, 0xb7, 0xb7, 0xa6, 0x19, 0x7d, 0x38,
+ 0x13, 0x38, 0x92, 0x87, 0x24, 0xf9, 0x48, 0xb3,
+ 0x5e, 0x87, 0x6a, 0x40, 0x39, 0x5c, 0x3f, 0xed,
+ 0x8f, 0xee, 0xdb, 0x15, 0x82, 0x06, 0xda, 0x49,
+ 0x21, 0x2b, 0xb5, 0xbf, 0x32, 0x7c, 0x9f, 0x42,
+ 0x28, 0x63, 0xcf, 0xaf, 0x1e, 0xf8, 0xc6, 0xa0,
+ 0xd1, 0x02, 0x43, 0x57, 0x62, 0xec, 0x9b, 0x0f,
+ 0x01, 0x9e, 0x71, 0xd8, 0x87, 0x9d, 0x01, 0xc1,
+ 0x58, 0x77, 0xd9, 0xaf, 0xb1, 0x10, 0x7e, 0xdd,
+ 0xa6, 0x50, 0x96, 0xe5, 0xf0, 0x72, 0x00, 0x6d,
+ 0x4b, 0xf8, 0x2a, 0x8f, 0x19, 0xf3, 0x22, 0x88,
+ 0x11, 0x4a, 0x8b, 0x7c, 0xfd, 0xb7, 0xed, 0xe1,
+ 0xf6, 0x40, 0x39, 0xe0, 0xe9, 0xf6, 0x3d, 0x25,
+ 0xe6, 0x74, 0x3c, 0x58, 0x57, 0x7f, 0xe1, 0x22,
+ 0x96, 0x47, 0x31, 0x91, 0xba, 0x70, 0x85, 0x28,
+ 0x6b, 0x9f, 0x6e, 0x25, 0xac, 0x23, 0x66, 0x2f,
+ 0x29, 0x88, 0x28, 0xce, 0x8c, 0x5c, 0x88, 0x53,
+ 0xd1, 0x3b, 0xcc, 0x6a, 0x51, 0xb2, 0xe1, 0x28,
+ 0x3f, 0x91, 0xb4, 0x0d, 0x00, 0x3a, 0xe3, 0xf8,
+ 0xc3, 0x8f, 0xd7, 0x96, 0x62, 0x0e, 0x2e, 0xfc,
+ 0xc8, 0x6c, 0x77, 0xa6, 0x1d, 0x22, 0xc1, 0xb8,
+ 0xe6, 0x61, 0xd7, 0x67, 0x36, 0x13, 0x7b, 0xbb,
+ 0x9b, 0x59, 0x09, 0xa6, 0xdf, 0xf7, 0x6b, 0xa3,
+ 0x40, 0x1a, 0xf5, 0x4f, 0xb4, 0xda, 0xd3, 0xf3,
+ 0x81, 0x93, 0xc6, 0x18, 0xd9, 0x26, 0xee, 0xac,
+ 0xf0, 0xaa, 0xdf, 0xc5, 0x9c, 0xca, 0xc2, 0xa2,
+ 0xcc, 0x7b, 0x5c, 0x24, 0xb0, 0xbc, 0xd0, 0x6a,
+ 0x4d, 0x89, 0x09, 0xb8, 0x07, 0xfe, 0x87, 0xad,
+ 0x0a, 0xea, 0xb8, 0x42, 0xf9, 0x5e, 0xb3, 0x3e,
+ 0x36, 0x4c, 0xaf, 0x75, 0x9e, 0x1c, 0xeb, 0xbd,
+ 0xbc, 0xbb, 0x80, 0x40, 0xa7, 0x3a, 0x30, 0xbf,
+ 0xa8, 0x44, 0xf4, 0xeb, 0x38, 0xad, 0x29, 0xba,
+ 0x23, 0xed, 0x41, 0x0c, 0xea, 0xd2, 0xbb, 0x41,
+ 0x18, 0xd6, 0xb9, 0xba, 0x65, 0x2b, 0xa3, 0x91,
+ 0x6d, 0x1f, 0xa9, 0xf4, 0xd1, 0x25, 0x8d, 0x4d,
+ 0x38, 0xff, 0x64, 0xa0, 0xec, 0xde, 0xa6, 0xb6,
+ 0x79, 0xab, 0x8e, 0x33, 0x6c, 0x47, 0xde, 0xaf,
+ 0x94, 0xa4, 0xa5, 0x86, 0x77, 0x55, 0x09, 0x92,
+ 0x81, 0x31, 0x76, 0xc7, 0x34, 0x22, 0x89, 0x8e,
+ 0x3d, 0x26, 0x26, 0xd7, 0xfc, 0x1e, 0x16, 0x72,
+ 0x13, 0x33, 0x63, 0xd5, 0x22, 0xbe, 0xb8, 0x04,
+ 0x34, 0x84, 0x41, 0xbb, 0x80, 0xd0, 0x9f, 0x46,
+ 0x48, 0x07, 0xa7, 0xfc, 0x2b, 0x3a, 0x75, 0x55,
+ 0x8c, 0xc7, 0x6a, 0xbd, 0x7e, 0x46, 0x08, 0x84,
+ 0x0f, 0xd5, 0x74, 0xc0, 0x82, 0x8e, 0xaa, 0x61,
+ 0x05, 0x01, 0xb2, 0x47, 0x6e, 0x20, 0x6a, 0x2d,
+ 0x58, 0x70, 0x48, 0x32, 0xa7, 0x37, 0xd2, 0xb8,
+ 0x82, 0x1a, 0x51, 0xb9, 0x61, 0xdd, 0xfd, 0x9d,
+ 0x6b, 0x0e, 0x18, 0x97, 0xf8, 0x45, 0x5f, 0x87,
+ 0x10, 0xcf, 0x34, 0x72, 0x45, 0x26, 0x49, 0x70,
+ 0xe7, 0xa3, 0x78, 0xe0, 0x52, 0x89, 0x84, 0x94,
+ 0x83, 0x82, 0xc2, 0x69, 0x8f, 0xe3, 0xe1, 0x3f,
+ 0x60, 0x74, 0x88, 0xc4, 0xf7, 0x75, 0x2c, 0xfb,
+ 0xbd, 0xb6, 0xc4, 0x7e, 0x10, 0x0a, 0x6c, 0x90,
+ 0x04, 0x9e, 0xc3, 0x3f, 0x59, 0x7c, 0xce, 0x31,
+ 0x18, 0x60, 0x57, 0x73, 0x46, 0x94, 0x7d, 0x06,
+ 0xa0, 0x6d, 0x44, 0xec, 0xa2, 0x0a, 0x9e, 0x05,
+ 0x15, 0xef, 0xca, 0x5c, 0xbf, 0x00, 0xeb, 0xf7,
+ 0x3d, 0x32, 0xd4, 0xa5, 0xef, 0x49, 0x89, 0x5e,
+ 0x46, 0xb0, 0xa6, 0x63, 0x5b, 0x8a, 0x73, 0xae,
+ 0x6f, 0xd5, 0x9d, 0xf8, 0x4f, 0x40, 0xb5, 0xb2,
+ 0x6e, 0xd3, 0xb6, 0x01, 0xa9, 0x26, 0xa2, 0x21,
+ 0xcf, 0x33, 0x7a, 0x3a, 0xa4, 0x23, 0x13, 0xb0,
+ 0x69, 0x6a, 0xee, 0xce, 0xd8, 0x9d, 0x01, 0x1d,
+ 0x50, 0xc1, 0x30, 0x6c, 0xb1, 0xcd, 0xa0, 0xf0,
+ 0xf0, 0xa2, 0x64, 0x6f, 0xbb, 0xbf, 0x5e, 0xe6,
+ 0xab, 0x87, 0xb4, 0x0f, 0x4f, 0x15, 0xaf, 0xb5,
+ 0x25, 0xa1, 0xb2, 0xd0, 0x80, 0x2c, 0xfb, 0xf9,
+ 0xfe, 0xd2, 0x33, 0xbb, 0x76, 0xfe, 0x7c, 0xa8,
+ 0x66, 0xf7, 0xe7, 0x85, 0x9f, 0x1f, 0x85, 0x57,
+ 0x88, 0xe1, 0xe9, 0x63, 0xe4, 0xd8, 0x1c, 0xa1,
+ 0xfb, 0xda, 0x44, 0x05, 0x2e, 0x1d, 0x3a, 0x1c,
+ 0xff, 0xc8, 0x3b, 0xc0, 0xfe, 0xda, 0x22, 0x0b,
+ 0x43, 0xd6, 0x88, 0x39, 0x4c, 0x4a, 0xa6, 0x69,
+ 0x18, 0x93, 0x42, 0x4e, 0xb5, 0xcc, 0x66, 0x0d,
+ 0x09, 0xf8, 0x1e, 0x7c, 0xd3, 0x3c, 0x99, 0x0d,
+ 0x50, 0x1d, 0x62, 0xe9, 0x57, 0x06, 0xbf, 0x19,
+ 0x88, 0xdd, 0xad, 0x7b, 0x4f, 0xf9, 0xc7, 0x82,
+ 0x6d, 0x8d, 0xc8, 0xc4, 0xc5, 0x78, 0x17, 0x20,
+ 0x15, 0xc5, 0x52, 0x41, 0xcf, 0x5b, 0xd6, 0x7f,
+ 0x94, 0x02, 0x41, 0xe0, 0x40, 0x22, 0x03, 0x5e,
+ 0xd1, 0x53, 0xd4, 0x86, 0xd3, 0x2c, 0x9f, 0x0f,
+ 0x96, 0xe3, 0x6b, 0x9a, 0x76, 0x32, 0x06, 0x47,
+ 0x4b, 0x11, 0xb3, 0xdd, 0x03, 0x65, 0xbd, 0x9b,
+ 0x01, 0xda, 0x9c, 0xb9, 0x7e, 0x3f, 0x6a, 0xc4,
+ 0x7b, 0xea, 0xd4, 0x3c, 0xb9, 0xfb, 0x5c, 0x6b,
+ 0x64, 0x33, 0x52, 0xba, 0x64, 0x78, 0x8f, 0xa4,
+ 0xaf, 0x7a, 0x61, 0x8d, 0xbc, 0xc5, 0x73, 0xe9,
+ 0x6b, 0x58, 0x97, 0x4b, 0xbf, 0x63, 0x22, 0xd3,
+ 0x37, 0x02, 0x54, 0xc5, 0xb9, 0x16, 0x4a, 0xf0,
+ 0x19, 0xd8, 0x94, 0x57, 0xb8, 0x8a, 0xb3, 0x16,
+ 0x3b, 0xd0, 0x84, 0x8e, 0x67, 0xa6, 0xa3, 0x7d,
+ 0x78, 0xec, 0x00
+};
+static const u8 dec_assoc013[] __initconst = {
+ 0xb1, 0x69, 0x83, 0x87, 0x30, 0xaa, 0x5d, 0xb8,
+ 0x77, 0xe8, 0x21, 0xff, 0x06, 0x59, 0x35, 0xce,
+ 0x75, 0xfe, 0x38, 0xef, 0xb8, 0x91, 0x43, 0x8c,
+ 0xcf, 0x70, 0xdd, 0x0a, 0x68, 0xbf, 0xd4, 0xbc,
+ 0x16, 0x76, 0x99, 0x36, 0x1e, 0x58, 0x79, 0x5e,
+ 0xd4, 0x29, 0xf7, 0x33, 0x93, 0x48, 0xdb, 0x5f,
+ 0x01, 0xae, 0x9c, 0xb6, 0xe4, 0x88, 0x6d, 0x2b,
+ 0x76, 0x75, 0xe0, 0xf3, 0x74, 0xe2, 0xc9
+};
+static const u8 dec_nonce013[] __initconst = {
+ 0x05, 0xa3, 0x93, 0xed, 0x30, 0xc5, 0xa2, 0x06
+};
+static const u8 dec_key013[] __initconst = {
+ 0xb3, 0x35, 0x50, 0x03, 0x54, 0x2e, 0x40, 0x5e,
+ 0x8f, 0x59, 0x8e, 0xc5, 0x90, 0xd5, 0x27, 0x2d,
+ 0xba, 0x29, 0x2e, 0xcb, 0x1b, 0x70, 0x44, 0x1e,
+ 0x65, 0x91, 0x6e, 0x2a, 0x79, 0x22, 0xda, 0x64
+};
+
+static const struct chacha20poly1305_testvec
+chacha20poly1305_dec_vectors[] __initconst = {
+ { dec_input001, dec_output001, dec_assoc001, dec_nonce001, dec_key001,
+ sizeof(dec_input001), sizeof(dec_assoc001), sizeof(dec_nonce001) },
+ { dec_input002, dec_output002, dec_assoc002, dec_nonce002, dec_key002,
+ sizeof(dec_input002), sizeof(dec_assoc002), sizeof(dec_nonce002) },
+ { dec_input003, dec_output003, dec_assoc003, dec_nonce003, dec_key003,
+ sizeof(dec_input003), sizeof(dec_assoc003), sizeof(dec_nonce003) },
+ { dec_input004, dec_output004, dec_assoc004, dec_nonce004, dec_key004,
+ sizeof(dec_input004), sizeof(dec_assoc004), sizeof(dec_nonce004) },
+ { dec_input005, dec_output005, dec_assoc005, dec_nonce005, dec_key005,
+ sizeof(dec_input005), sizeof(dec_assoc005), sizeof(dec_nonce005) },
+ { dec_input006, dec_output006, dec_assoc006, dec_nonce006, dec_key006,
+ sizeof(dec_input006), sizeof(dec_assoc006), sizeof(dec_nonce006) },
+ { dec_input007, dec_output007, dec_assoc007, dec_nonce007, dec_key007,
+ sizeof(dec_input007), sizeof(dec_assoc007), sizeof(dec_nonce007) },
+ { dec_input008, dec_output008, dec_assoc008, dec_nonce008, dec_key008,
+ sizeof(dec_input008), sizeof(dec_assoc008), sizeof(dec_nonce008) },
+ { dec_input009, dec_output009, dec_assoc009, dec_nonce009, dec_key009,
+ sizeof(dec_input009), sizeof(dec_assoc009), sizeof(dec_nonce009) },
+ { dec_input010, dec_output010, dec_assoc010, dec_nonce010, dec_key010,
+ sizeof(dec_input010), sizeof(dec_assoc010), sizeof(dec_nonce010) },
+ { dec_input011, dec_output011, dec_assoc011, dec_nonce011, dec_key011,
+ sizeof(dec_input011), sizeof(dec_assoc011), sizeof(dec_nonce011) },
+ { dec_input012, dec_output012, dec_assoc012, dec_nonce012, dec_key012,
+ sizeof(dec_input012), sizeof(dec_assoc012), sizeof(dec_nonce012) },
+ { dec_input013, dec_output013, dec_assoc013, dec_nonce013, dec_key013,
+ sizeof(dec_input013), sizeof(dec_assoc013), sizeof(dec_nonce013),
+ true }
+};
+
+static const u8 xenc_input001[] __initconst = {
+ 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x65, 0x74,
+ 0x2d, 0x44, 0x72, 0x61, 0x66, 0x74, 0x73, 0x20,
+ 0x61, 0x72, 0x65, 0x20, 0x64, 0x72, 0x61, 0x66,
+ 0x74, 0x20, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65,
+ 0x6e, 0x74, 0x73, 0x20, 0x76, 0x61, 0x6c, 0x69,
+ 0x64, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x61, 0x20,
+ 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x20,
+ 0x6f, 0x66, 0x20, 0x73, 0x69, 0x78, 0x20, 0x6d,
+ 0x6f, 0x6e, 0x74, 0x68, 0x73, 0x20, 0x61, 0x6e,
+ 0x64, 0x20, 0x6d, 0x61, 0x79, 0x20, 0x62, 0x65,
+ 0x20, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64,
+ 0x2c, 0x20, 0x72, 0x65, 0x70, 0x6c, 0x61, 0x63,
+ 0x65, 0x64, 0x2c, 0x20, 0x6f, 0x72, 0x20, 0x6f,
+ 0x62, 0x73, 0x6f, 0x6c, 0x65, 0x74, 0x65, 0x64,
+ 0x20, 0x62, 0x79, 0x20, 0x6f, 0x74, 0x68, 0x65,
+ 0x72, 0x20, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65,
+ 0x6e, 0x74, 0x73, 0x20, 0x61, 0x74, 0x20, 0x61,
+ 0x6e, 0x79, 0x20, 0x74, 0x69, 0x6d, 0x65, 0x2e,
+ 0x20, 0x49, 0x74, 0x20, 0x69, 0x73, 0x20, 0x69,
+ 0x6e, 0x61, 0x70, 0x70, 0x72, 0x6f, 0x70, 0x72,
+ 0x69, 0x61, 0x74, 0x65, 0x20, 0x74, 0x6f, 0x20,
+ 0x75, 0x73, 0x65, 0x20, 0x49, 0x6e, 0x74, 0x65,
+ 0x72, 0x6e, 0x65, 0x74, 0x2d, 0x44, 0x72, 0x61,
+ 0x66, 0x74, 0x73, 0x20, 0x61, 0x73, 0x20, 0x72,
+ 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65,
+ 0x20, 0x6d, 0x61, 0x74, 0x65, 0x72, 0x69, 0x61,
+ 0x6c, 0x20, 0x6f, 0x72, 0x20, 0x74, 0x6f, 0x20,
+ 0x63, 0x69, 0x74, 0x65, 0x20, 0x74, 0x68, 0x65,
+ 0x6d, 0x20, 0x6f, 0x74, 0x68, 0x65, 0x72, 0x20,
+ 0x74, 0x68, 0x61, 0x6e, 0x20, 0x61, 0x73, 0x20,
+ 0x2f, 0xe2, 0x80, 0x9c, 0x77, 0x6f, 0x72, 0x6b,
+ 0x20, 0x69, 0x6e, 0x20, 0x70, 0x72, 0x6f, 0x67,
+ 0x72, 0x65, 0x73, 0x73, 0x2e, 0x2f, 0xe2, 0x80,
+ 0x9d
+};
+static const u8 xenc_output001[] __initconst = {
+ 0x1a, 0x6e, 0x3a, 0xd9, 0xfd, 0x41, 0x3f, 0x77,
+ 0x54, 0x72, 0x0a, 0x70, 0x9a, 0xa0, 0x29, 0x92,
+ 0x2e, 0xed, 0x93, 0xcf, 0x0f, 0x71, 0x88, 0x18,
+ 0x7a, 0x9d, 0x2d, 0x24, 0xe0, 0xf5, 0xea, 0x3d,
+ 0x55, 0x64, 0xd7, 0xad, 0x2a, 0x1a, 0x1f, 0x7e,
+ 0x86, 0x6d, 0xb0, 0xce, 0x80, 0x41, 0x72, 0x86,
+ 0x26, 0xee, 0x84, 0xd7, 0xef, 0x82, 0x9e, 0xe2,
+ 0x60, 0x9d, 0x5a, 0xfc, 0xf0, 0xe4, 0x19, 0x85,
+ 0xea, 0x09, 0xc6, 0xfb, 0xb3, 0xa9, 0x50, 0x09,
+ 0xec, 0x5e, 0x11, 0x90, 0xa1, 0xc5, 0x4e, 0x49,
+ 0xef, 0x50, 0xd8, 0x8f, 0xe0, 0x78, 0xd7, 0xfd,
+ 0xb9, 0x3b, 0xc9, 0xf2, 0x91, 0xc8, 0x25, 0xc8,
+ 0xa7, 0x63, 0x60, 0xce, 0x10, 0xcd, 0xc6, 0x7f,
+ 0xf8, 0x16, 0xf8, 0xe1, 0x0a, 0xd9, 0xde, 0x79,
+ 0x50, 0x33, 0xf2, 0x16, 0x0f, 0x17, 0xba, 0xb8,
+ 0x5d, 0xd8, 0xdf, 0x4e, 0x51, 0xa8, 0x39, 0xd0,
+ 0x85, 0xca, 0x46, 0x6a, 0x10, 0xa7, 0xa3, 0x88,
+ 0xef, 0x79, 0xb9, 0xf8, 0x24, 0xf3, 0xe0, 0x71,
+ 0x7b, 0x76, 0x28, 0x46, 0x3a, 0x3a, 0x1b, 0x91,
+ 0xb6, 0xd4, 0x3e, 0x23, 0xe5, 0x44, 0x15, 0xbf,
+ 0x60, 0x43, 0x9d, 0xa4, 0xbb, 0xd5, 0x5f, 0x89,
+ 0xeb, 0xef, 0x8e, 0xfd, 0xdd, 0xb4, 0x0d, 0x46,
+ 0xf0, 0x69, 0x23, 0x63, 0xae, 0x94, 0xf5, 0x5e,
+ 0xa5, 0xad, 0x13, 0x1c, 0x41, 0x76, 0xe6, 0x90,
+ 0xd6, 0x6d, 0xa2, 0x8f, 0x97, 0x4c, 0xa8, 0x0b,
+ 0xcf, 0x8d, 0x43, 0x2b, 0x9c, 0x9b, 0xc5, 0x58,
+ 0xa5, 0xb6, 0x95, 0x9a, 0xbf, 0x81, 0xc6, 0x54,
+ 0xc9, 0x66, 0x0c, 0xe5, 0x4f, 0x6a, 0x53, 0xa1,
+ 0xe5, 0x0c, 0xba, 0x31, 0xde, 0x34, 0x64, 0x73,
+ 0x8a, 0x3b, 0xbd, 0x92, 0x01, 0xdb, 0x71, 0x69,
+ 0xf3, 0x58, 0x99, 0xbc, 0xd1, 0xcb, 0x4a, 0x05,
+ 0xe2, 0x58, 0x9c, 0x25, 0x17, 0xcd, 0xdc, 0x83,
+ 0xb7, 0xff, 0xfb, 0x09, 0x61, 0xad, 0xbf, 0x13,
+ 0x5b, 0x5e, 0xed, 0x46, 0x82, 0x6f, 0x22, 0xd8,
+ 0x93, 0xa6, 0x85, 0x5b, 0x40, 0x39, 0x5c, 0xc5,
+ 0x9c
+};
+static const u8 xenc_assoc001[] __initconst = {
+ 0xf3, 0x33, 0x88, 0x86, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x4e, 0x91
+};
+static const u8 xenc_nonce001[] __initconst = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17
+};
+static const u8 xenc_key001[] __initconst = {
+ 0x1c, 0x92, 0x40, 0xa5, 0xeb, 0x55, 0xd3, 0x8a,
+ 0xf3, 0x33, 0x88, 0x86, 0x04, 0xf6, 0xb5, 0xf0,
+ 0x47, 0x39, 0x17, 0xc1, 0x40, 0x2b, 0x80, 0x09,
+ 0x9d, 0xca, 0x5c, 0xbc, 0x20, 0x70, 0x75, 0xc0
+};
+
+static const struct chacha20poly1305_testvec
+xchacha20poly1305_enc_vectors[] __initconst = {
+ { xenc_input001, xenc_output001, xenc_assoc001, xenc_nonce001, xenc_key001,
+ sizeof(xenc_input001), sizeof(xenc_assoc001), sizeof(xenc_nonce001) }
+};
+
+static const u8 xdec_input001[] __initconst = {
+ 0x1a, 0x6e, 0x3a, 0xd9, 0xfd, 0x41, 0x3f, 0x77,
+ 0x54, 0x72, 0x0a, 0x70, 0x9a, 0xa0, 0x29, 0x92,
+ 0x2e, 0xed, 0x93, 0xcf, 0x0f, 0x71, 0x88, 0x18,
+ 0x7a, 0x9d, 0x2d, 0x24, 0xe0, 0xf5, 0xea, 0x3d,
+ 0x55, 0x64, 0xd7, 0xad, 0x2a, 0x1a, 0x1f, 0x7e,
+ 0x86, 0x6d, 0xb0, 0xce, 0x80, 0x41, 0x72, 0x86,
+ 0x26, 0xee, 0x84, 0xd7, 0xef, 0x82, 0x9e, 0xe2,
+ 0x60, 0x9d, 0x5a, 0xfc, 0xf0, 0xe4, 0x19, 0x85,
+ 0xea, 0x09, 0xc6, 0xfb, 0xb3, 0xa9, 0x50, 0x09,
+ 0xec, 0x5e, 0x11, 0x90, 0xa1, 0xc5, 0x4e, 0x49,
+ 0xef, 0x50, 0xd8, 0x8f, 0xe0, 0x78, 0xd7, 0xfd,
+ 0xb9, 0x3b, 0xc9, 0xf2, 0x91, 0xc8, 0x25, 0xc8,
+ 0xa7, 0x63, 0x60, 0xce, 0x10, 0xcd, 0xc6, 0x7f,
+ 0xf8, 0x16, 0xf8, 0xe1, 0x0a, 0xd9, 0xde, 0x79,
+ 0x50, 0x33, 0xf2, 0x16, 0x0f, 0x17, 0xba, 0xb8,
+ 0x5d, 0xd8, 0xdf, 0x4e, 0x51, 0xa8, 0x39, 0xd0,
+ 0x85, 0xca, 0x46, 0x6a, 0x10, 0xa7, 0xa3, 0x88,
+ 0xef, 0x79, 0xb9, 0xf8, 0x24, 0xf3, 0xe0, 0x71,
+ 0x7b, 0x76, 0x28, 0x46, 0x3a, 0x3a, 0x1b, 0x91,
+ 0xb6, 0xd4, 0x3e, 0x23, 0xe5, 0x44, 0x15, 0xbf,
+ 0x60, 0x43, 0x9d, 0xa4, 0xbb, 0xd5, 0x5f, 0x89,
+ 0xeb, 0xef, 0x8e, 0xfd, 0xdd, 0xb4, 0x0d, 0x46,
+ 0xf0, 0x69, 0x23, 0x63, 0xae, 0x94, 0xf5, 0x5e,
+ 0xa5, 0xad, 0x13, 0x1c, 0x41, 0x76, 0xe6, 0x90,
+ 0xd6, 0x6d, 0xa2, 0x8f, 0x97, 0x4c, 0xa8, 0x0b,
+ 0xcf, 0x8d, 0x43, 0x2b, 0x9c, 0x9b, 0xc5, 0x58,
+ 0xa5, 0xb6, 0x95, 0x9a, 0xbf, 0x81, 0xc6, 0x54,
+ 0xc9, 0x66, 0x0c, 0xe5, 0x4f, 0x6a, 0x53, 0xa1,
+ 0xe5, 0x0c, 0xba, 0x31, 0xde, 0x34, 0x64, 0x73,
+ 0x8a, 0x3b, 0xbd, 0x92, 0x01, 0xdb, 0x71, 0x69,
+ 0xf3, 0x58, 0x99, 0xbc, 0xd1, 0xcb, 0x4a, 0x05,
+ 0xe2, 0x58, 0x9c, 0x25, 0x17, 0xcd, 0xdc, 0x83,
+ 0xb7, 0xff, 0xfb, 0x09, 0x61, 0xad, 0xbf, 0x13,
+ 0x5b, 0x5e, 0xed, 0x46, 0x82, 0x6f, 0x22, 0xd8,
+ 0x93, 0xa6, 0x85, 0x5b, 0x40, 0x39, 0x5c, 0xc5,
+ 0x9c
+};
+static const u8 xdec_output001[] __initconst = {
+ 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x65, 0x74,
+ 0x2d, 0x44, 0x72, 0x61, 0x66, 0x74, 0x73, 0x20,
+ 0x61, 0x72, 0x65, 0x20, 0x64, 0x72, 0x61, 0x66,
+ 0x74, 0x20, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65,
+ 0x6e, 0x74, 0x73, 0x20, 0x76, 0x61, 0x6c, 0x69,
+ 0x64, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x61, 0x20,
+ 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x20,
+ 0x6f, 0x66, 0x20, 0x73, 0x69, 0x78, 0x20, 0x6d,
+ 0x6f, 0x6e, 0x74, 0x68, 0x73, 0x20, 0x61, 0x6e,
+ 0x64, 0x20, 0x6d, 0x61, 0x79, 0x20, 0x62, 0x65,
+ 0x20, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64,
+ 0x2c, 0x20, 0x72, 0x65, 0x70, 0x6c, 0x61, 0x63,
+ 0x65, 0x64, 0x2c, 0x20, 0x6f, 0x72, 0x20, 0x6f,
+ 0x62, 0x73, 0x6f, 0x6c, 0x65, 0x74, 0x65, 0x64,
+ 0x20, 0x62, 0x79, 0x20, 0x6f, 0x74, 0x68, 0x65,
+ 0x72, 0x20, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65,
+ 0x6e, 0x74, 0x73, 0x20, 0x61, 0x74, 0x20, 0x61,
+ 0x6e, 0x79, 0x20, 0x74, 0x69, 0x6d, 0x65, 0x2e,
+ 0x20, 0x49, 0x74, 0x20, 0x69, 0x73, 0x20, 0x69,
+ 0x6e, 0x61, 0x70, 0x70, 0x72, 0x6f, 0x70, 0x72,
+ 0x69, 0x61, 0x74, 0x65, 0x20, 0x74, 0x6f, 0x20,
+ 0x75, 0x73, 0x65, 0x20, 0x49, 0x6e, 0x74, 0x65,
+ 0x72, 0x6e, 0x65, 0x74, 0x2d, 0x44, 0x72, 0x61,
+ 0x66, 0x74, 0x73, 0x20, 0x61, 0x73, 0x20, 0x72,
+ 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65,
+ 0x20, 0x6d, 0x61, 0x74, 0x65, 0x72, 0x69, 0x61,
+ 0x6c, 0x20, 0x6f, 0x72, 0x20, 0x74, 0x6f, 0x20,
+ 0x63, 0x69, 0x74, 0x65, 0x20, 0x74, 0x68, 0x65,
+ 0x6d, 0x20, 0x6f, 0x74, 0x68, 0x65, 0x72, 0x20,
+ 0x74, 0x68, 0x61, 0x6e, 0x20, 0x61, 0x73, 0x20,
+ 0x2f, 0xe2, 0x80, 0x9c, 0x77, 0x6f, 0x72, 0x6b,
+ 0x20, 0x69, 0x6e, 0x20, 0x70, 0x72, 0x6f, 0x67,
+ 0x72, 0x65, 0x73, 0x73, 0x2e, 0x2f, 0xe2, 0x80,
+ 0x9d
+};
+static const u8 xdec_assoc001[] __initconst = {
+ 0xf3, 0x33, 0x88, 0x86, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x4e, 0x91
+};
+static const u8 xdec_nonce001[] __initconst = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17
+};
+static const u8 xdec_key001[] __initconst = {
+ 0x1c, 0x92, 0x40, 0xa5, 0xeb, 0x55, 0xd3, 0x8a,
+ 0xf3, 0x33, 0x88, 0x86, 0x04, 0xf6, 0xb5, 0xf0,
+ 0x47, 0x39, 0x17, 0xc1, 0x40, 0x2b, 0x80, 0x09,
+ 0x9d, 0xca, 0x5c, 0xbc, 0x20, 0x70, 0x75, 0xc0
+};
+
+static const struct chacha20poly1305_testvec
+xchacha20poly1305_dec_vectors[] __initconst = {
+ { xdec_input001, xdec_output001, xdec_assoc001, xdec_nonce001, xdec_key001,
+ sizeof(xdec_input001), sizeof(xdec_assoc001), sizeof(xdec_nonce001) }
+};
+
+/* This is for the selftests-only, since it is only useful for the purpose of
+ * testing the underlying primitives and interactions.
+ */
+static void __init
+chacha20poly1305_encrypt_bignonce(u8 *dst, const u8 *src, const size_t src_len,
+ const u8 *ad, const size_t ad_len,
+ const u8 nonce[12],
+ const u8 key[CHACHA20POLY1305_KEY_SIZE])
+{
+ const u8 *pad0 = page_address(ZERO_PAGE(0));
+ struct poly1305_desc_ctx poly1305_state;
+ u32 chacha20_state[CHACHA_STATE_WORDS];
+ union {
+ u8 block0[POLY1305_KEY_SIZE];
+ __le64 lens[2];
+ } b = {{ 0 }};
+ u8 bottom_row[16] = { 0 };
+ u32 le_key[8];
+ int i;
+
+ memcpy(&bottom_row[4], nonce, 12);
+ for (i = 0; i < 8; ++i)
+ le_key[i] = get_unaligned_le32(key + sizeof(le_key[i]) * i);
+ chacha_init(chacha20_state, le_key, bottom_row);
+ chacha20_crypt(chacha20_state, b.block0, b.block0, sizeof(b.block0));
+ poly1305_init(&poly1305_state, b.block0);
+ poly1305_update(&poly1305_state, ad, ad_len);
+ poly1305_update(&poly1305_state, pad0, (0x10 - ad_len) & 0xf);
+ chacha20_crypt(chacha20_state, dst, src, src_len);
+ poly1305_update(&poly1305_state, dst, src_len);
+ poly1305_update(&poly1305_state, pad0, (0x10 - src_len) & 0xf);
+ b.lens[0] = cpu_to_le64(ad_len);
+ b.lens[1] = cpu_to_le64(src_len);
+ poly1305_update(&poly1305_state, (u8 *)b.lens, sizeof(b.lens));
+ poly1305_final(&poly1305_state, dst + src_len);
+}
+
+static void __init
+chacha20poly1305_selftest_encrypt(u8 *dst, const u8 *src, const size_t src_len,
+ const u8 *ad, const size_t ad_len,
+ const u8 *nonce, const size_t nonce_len,
+ const u8 key[CHACHA20POLY1305_KEY_SIZE])
+{
+ if (nonce_len == 8)
+ chacha20poly1305_encrypt(dst, src, src_len, ad, ad_len,
+ get_unaligned_le64(nonce), key);
+ else if (nonce_len == 12)
+ chacha20poly1305_encrypt_bignonce(dst, src, src_len, ad,
+ ad_len, nonce, key);
+ else
+ BUG();
+}
+
+static bool __init
+decryption_success(bool func_ret, bool expect_failure, int memcmp_result)
+{
+ if (expect_failure)
+ return !func_ret;
+ return func_ret && !memcmp_result;
+}
+
+bool __init chacha20poly1305_selftest(void)
+{
+ enum { MAXIMUM_TEST_BUFFER_LEN = 1UL << 12 };
+ size_t i, j, k, total_len;
+ u8 *computed_output = NULL, *input = NULL;
+ bool success = true, ret;
+ struct scatterlist sg_src[3];
+
+ computed_output = kmalloc(MAXIMUM_TEST_BUFFER_LEN, GFP_KERNEL);
+ input = kmalloc(MAXIMUM_TEST_BUFFER_LEN, GFP_KERNEL);
+ if (!computed_output || !input) {
+ pr_err("chacha20poly1305 self-test malloc: FAIL\n");
+ success = false;
+ goto out;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(chacha20poly1305_enc_vectors); ++i) {
+ memset(computed_output, 0, MAXIMUM_TEST_BUFFER_LEN);
+ chacha20poly1305_selftest_encrypt(computed_output,
+ chacha20poly1305_enc_vectors[i].input,
+ chacha20poly1305_enc_vectors[i].ilen,
+ chacha20poly1305_enc_vectors[i].assoc,
+ chacha20poly1305_enc_vectors[i].alen,
+ chacha20poly1305_enc_vectors[i].nonce,
+ chacha20poly1305_enc_vectors[i].nlen,
+ chacha20poly1305_enc_vectors[i].key);
+ if (memcmp(computed_output,
+ chacha20poly1305_enc_vectors[i].output,
+ chacha20poly1305_enc_vectors[i].ilen +
+ POLY1305_DIGEST_SIZE)) {
+ pr_err("chacha20poly1305 encryption self-test %zu: FAIL\n",
+ i + 1);
+ success = false;
+ }
+ }
+
+ for (i = 0; i < ARRAY_SIZE(chacha20poly1305_enc_vectors); ++i) {
+ if (chacha20poly1305_enc_vectors[i].nlen != 8)
+ continue;
+ memcpy(computed_output, chacha20poly1305_enc_vectors[i].input,
+ chacha20poly1305_enc_vectors[i].ilen);
+ sg_init_one(sg_src, computed_output,
+ chacha20poly1305_enc_vectors[i].ilen + POLY1305_DIGEST_SIZE);
+ ret = chacha20poly1305_encrypt_sg_inplace(sg_src,
+ chacha20poly1305_enc_vectors[i].ilen,
+ chacha20poly1305_enc_vectors[i].assoc,
+ chacha20poly1305_enc_vectors[i].alen,
+ get_unaligned_le64(chacha20poly1305_enc_vectors[i].nonce),
+ chacha20poly1305_enc_vectors[i].key);
+ if (!ret || memcmp(computed_output,
+ chacha20poly1305_enc_vectors[i].output,
+ chacha20poly1305_enc_vectors[i].ilen +
+ POLY1305_DIGEST_SIZE)) {
+ pr_err("chacha20poly1305 sg encryption self-test %zu: FAIL\n",
+ i + 1);
+ success = false;
+ }
+ }
+
+ for (i = 0; i < ARRAY_SIZE(chacha20poly1305_dec_vectors); ++i) {
+ memset(computed_output, 0, MAXIMUM_TEST_BUFFER_LEN);
+ ret = chacha20poly1305_decrypt(computed_output,
+ chacha20poly1305_dec_vectors[i].input,
+ chacha20poly1305_dec_vectors[i].ilen,
+ chacha20poly1305_dec_vectors[i].assoc,
+ chacha20poly1305_dec_vectors[i].alen,
+ get_unaligned_le64(chacha20poly1305_dec_vectors[i].nonce),
+ chacha20poly1305_dec_vectors[i].key);
+ if (!decryption_success(ret,
+ chacha20poly1305_dec_vectors[i].failure,
+ memcmp(computed_output,
+ chacha20poly1305_dec_vectors[i].output,
+ chacha20poly1305_dec_vectors[i].ilen -
+ POLY1305_DIGEST_SIZE))) {
+ pr_err("chacha20poly1305 decryption self-test %zu: FAIL\n",
+ i + 1);
+ success = false;
+ }
+ }
+
+ for (i = 0; i < ARRAY_SIZE(chacha20poly1305_dec_vectors); ++i) {
+ memcpy(computed_output, chacha20poly1305_dec_vectors[i].input,
+ chacha20poly1305_dec_vectors[i].ilen);
+ sg_init_one(sg_src, computed_output,
+ chacha20poly1305_dec_vectors[i].ilen);
+ ret = chacha20poly1305_decrypt_sg_inplace(sg_src,
+ chacha20poly1305_dec_vectors[i].ilen,
+ chacha20poly1305_dec_vectors[i].assoc,
+ chacha20poly1305_dec_vectors[i].alen,
+ get_unaligned_le64(chacha20poly1305_dec_vectors[i].nonce),
+ chacha20poly1305_dec_vectors[i].key);
+ if (!decryption_success(ret,
+ chacha20poly1305_dec_vectors[i].failure,
+ memcmp(computed_output, chacha20poly1305_dec_vectors[i].output,
+ chacha20poly1305_dec_vectors[i].ilen -
+ POLY1305_DIGEST_SIZE))) {
+ pr_err("chacha20poly1305 sg decryption self-test %zu: FAIL\n",
+ i + 1);
+ success = false;
+ }
+ }
+
+ for (i = 0; i < ARRAY_SIZE(xchacha20poly1305_enc_vectors); ++i) {
+ memset(computed_output, 0, MAXIMUM_TEST_BUFFER_LEN);
+ xchacha20poly1305_encrypt(computed_output,
+ xchacha20poly1305_enc_vectors[i].input,
+ xchacha20poly1305_enc_vectors[i].ilen,
+ xchacha20poly1305_enc_vectors[i].assoc,
+ xchacha20poly1305_enc_vectors[i].alen,
+ xchacha20poly1305_enc_vectors[i].nonce,
+ xchacha20poly1305_enc_vectors[i].key);
+ if (memcmp(computed_output,
+ xchacha20poly1305_enc_vectors[i].output,
+ xchacha20poly1305_enc_vectors[i].ilen +
+ POLY1305_DIGEST_SIZE)) {
+ pr_err("xchacha20poly1305 encryption self-test %zu: FAIL\n",
+ i + 1);
+ success = false;
+ }
+ }
+
+ for (i = 0; i < ARRAY_SIZE(xchacha20poly1305_dec_vectors); ++i) {
+ memset(computed_output, 0, MAXIMUM_TEST_BUFFER_LEN);
+ ret = xchacha20poly1305_decrypt(computed_output,
+ xchacha20poly1305_dec_vectors[i].input,
+ xchacha20poly1305_dec_vectors[i].ilen,
+ xchacha20poly1305_dec_vectors[i].assoc,
+ xchacha20poly1305_dec_vectors[i].alen,
+ xchacha20poly1305_dec_vectors[i].nonce,
+ xchacha20poly1305_dec_vectors[i].key);
+ if (!decryption_success(ret,
+ xchacha20poly1305_dec_vectors[i].failure,
+ memcmp(computed_output,
+ xchacha20poly1305_dec_vectors[i].output,
+ xchacha20poly1305_dec_vectors[i].ilen -
+ POLY1305_DIGEST_SIZE))) {
+ pr_err("xchacha20poly1305 decryption self-test %zu: FAIL\n",
+ i + 1);
+ success = false;
+ }
+ }
+
+ for (total_len = POLY1305_DIGEST_SIZE; IS_ENABLED(DEBUG_CHACHA20POLY1305_SLOW_CHUNK_TEST)
+ && total_len <= 1 << 10; ++total_len) {
+ for (i = 0; i <= total_len; ++i) {
+ for (j = i; j <= total_len; ++j) {
+ k = 0;
+ sg_init_table(sg_src, 3);
+ if (i)
+ sg_set_buf(&sg_src[k++], input, i);
+ if (j - i)
+ sg_set_buf(&sg_src[k++], input + i, j - i);
+ if (total_len - j)
+ sg_set_buf(&sg_src[k++], input + j, total_len - j);
+ sg_init_marker(sg_src, k);
+ memset(computed_output, 0, total_len);
+ memset(input, 0, total_len);
+
+ if (!chacha20poly1305_encrypt_sg_inplace(sg_src,
+ total_len - POLY1305_DIGEST_SIZE, NULL, 0,
+ 0, enc_key001))
+ goto chunkfail;
+ chacha20poly1305_encrypt(computed_output,
+ computed_output,
+ total_len - POLY1305_DIGEST_SIZE, NULL, 0, 0,
+ enc_key001);
+ if (memcmp(computed_output, input, total_len))
+ goto chunkfail;
+ if (!chacha20poly1305_decrypt(computed_output,
+ input, total_len, NULL, 0, 0, enc_key001))
+ goto chunkfail;
+ for (k = 0; k < total_len - POLY1305_DIGEST_SIZE; ++k) {
+ if (computed_output[k])
+ goto chunkfail;
+ }
+ if (!chacha20poly1305_decrypt_sg_inplace(sg_src,
+ total_len, NULL, 0, 0, enc_key001))
+ goto chunkfail;
+ for (k = 0; k < total_len - POLY1305_DIGEST_SIZE; ++k) {
+ if (input[k])
+ goto chunkfail;
+ }
+ continue;
+
+ chunkfail:
+ pr_err("chacha20poly1305 chunked self-test %zu/%zu/%zu: FAIL\n",
+ total_len, i, j);
+ success = false;
+ }
+
+ }
+ }
+
+out:
+ kfree(computed_output);
+ kfree(input);
+ return success;
+}
diff --git a/lib/crypto/chacha20poly1305.c b/lib/crypto/chacha20poly1305.c
new file mode 100644
index 000000000000..fa6a9440fc95
--- /dev/null
+++ b/lib/crypto/chacha20poly1305.c
@@ -0,0 +1,373 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/*
+ * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ *
+ * This is an implementation of the ChaCha20Poly1305 AEAD construction.
+ *
+ * Information: https://tools.ietf.org/html/rfc8439
+ */
+
+#include <crypto/algapi.h>
+#include <crypto/chacha20poly1305.h>
+#include <crypto/chacha.h>
+#include <crypto/poly1305.h>
+#include <crypto/scatterwalk.h>
+
+#include <asm/unaligned.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+
+#define CHACHA_KEY_WORDS (CHACHA_KEY_SIZE / sizeof(u32))
+
+static void chacha_load_key(u32 *k, const u8 *in)
+{
+ k[0] = get_unaligned_le32(in);
+ k[1] = get_unaligned_le32(in + 4);
+ k[2] = get_unaligned_le32(in + 8);
+ k[3] = get_unaligned_le32(in + 12);
+ k[4] = get_unaligned_le32(in + 16);
+ k[5] = get_unaligned_le32(in + 20);
+ k[6] = get_unaligned_le32(in + 24);
+ k[7] = get_unaligned_le32(in + 28);
+}
+
+static void xchacha_init(u32 *chacha_state, const u8 *key, const u8 *nonce)
+{
+ u32 k[CHACHA_KEY_WORDS];
+ u8 iv[CHACHA_IV_SIZE];
+
+ memset(iv, 0, 8);
+ memcpy(iv + 8, nonce + 16, 8);
+
+ chacha_load_key(k, key);
+
+ /* Compute the subkey given the original key and first 128 nonce bits */
+ chacha_init(chacha_state, k, nonce);
+ hchacha_block(chacha_state, k, 20);
+
+ chacha_init(chacha_state, k, iv);
+
+ memzero_explicit(k, sizeof(k));
+ memzero_explicit(iv, sizeof(iv));
+}
+
+static void
+__chacha20poly1305_encrypt(u8 *dst, const u8 *src, const size_t src_len,
+ const u8 *ad, const size_t ad_len, u32 *chacha_state)
+{
+ const u8 *pad0 = page_address(ZERO_PAGE(0));
+ struct poly1305_desc_ctx poly1305_state;
+ union {
+ u8 block0[POLY1305_KEY_SIZE];
+ __le64 lens[2];
+ } b;
+
+ chacha20_crypt(chacha_state, b.block0, pad0, sizeof(b.block0));
+ poly1305_init(&poly1305_state, b.block0);
+
+ poly1305_update(&poly1305_state, ad, ad_len);
+ if (ad_len & 0xf)
+ poly1305_update(&poly1305_state, pad0, 0x10 - (ad_len & 0xf));
+
+ chacha20_crypt(chacha_state, dst, src, src_len);
+
+ poly1305_update(&poly1305_state, dst, src_len);
+ if (src_len & 0xf)
+ poly1305_update(&poly1305_state, pad0, 0x10 - (src_len & 0xf));
+
+ b.lens[0] = cpu_to_le64(ad_len);
+ b.lens[1] = cpu_to_le64(src_len);
+ poly1305_update(&poly1305_state, (u8 *)b.lens, sizeof(b.lens));
+
+ poly1305_final(&poly1305_state, dst + src_len);
+
+ memzero_explicit(chacha_state, CHACHA_STATE_WORDS * sizeof(u32));
+ memzero_explicit(&b, sizeof(b));
+}
+
+void chacha20poly1305_encrypt(u8 *dst, const u8 *src, const size_t src_len,
+ const u8 *ad, const size_t ad_len,
+ const u64 nonce,
+ const u8 key[CHACHA20POLY1305_KEY_SIZE])
+{
+ u32 chacha_state[CHACHA_STATE_WORDS];
+ u32 k[CHACHA_KEY_WORDS];
+ __le64 iv[2];
+
+ chacha_load_key(k, key);
+
+ iv[0] = 0;
+ iv[1] = cpu_to_le64(nonce);
+
+ chacha_init(chacha_state, k, (u8 *)iv);
+ __chacha20poly1305_encrypt(dst, src, src_len, ad, ad_len, chacha_state);
+
+ memzero_explicit(iv, sizeof(iv));
+ memzero_explicit(k, sizeof(k));
+}
+EXPORT_SYMBOL(chacha20poly1305_encrypt);
+
+void xchacha20poly1305_encrypt(u8 *dst, const u8 *src, const size_t src_len,
+ const u8 *ad, const size_t ad_len,
+ const u8 nonce[XCHACHA20POLY1305_NONCE_SIZE],
+ const u8 key[CHACHA20POLY1305_KEY_SIZE])
+{
+ u32 chacha_state[CHACHA_STATE_WORDS];
+
+ xchacha_init(chacha_state, key, nonce);
+ __chacha20poly1305_encrypt(dst, src, src_len, ad, ad_len, chacha_state);
+}
+EXPORT_SYMBOL(xchacha20poly1305_encrypt);
+
+static bool
+__chacha20poly1305_decrypt(u8 *dst, const u8 *src, const size_t src_len,
+ const u8 *ad, const size_t ad_len, u32 *chacha_state)
+{
+ const u8 *pad0 = page_address(ZERO_PAGE(0));
+ struct poly1305_desc_ctx poly1305_state;
+ size_t dst_len;
+ int ret;
+ union {
+ u8 block0[POLY1305_KEY_SIZE];
+ u8 mac[POLY1305_DIGEST_SIZE];
+ __le64 lens[2];
+ } b;
+
+ if (unlikely(src_len < POLY1305_DIGEST_SIZE))
+ return false;
+
+ chacha20_crypt(chacha_state, b.block0, pad0, sizeof(b.block0));
+ poly1305_init(&poly1305_state, b.block0);
+
+ poly1305_update(&poly1305_state, ad, ad_len);
+ if (ad_len & 0xf)
+ poly1305_update(&poly1305_state, pad0, 0x10 - (ad_len & 0xf));
+
+ dst_len = src_len - POLY1305_DIGEST_SIZE;
+ poly1305_update(&poly1305_state, src, dst_len);
+ if (dst_len & 0xf)
+ poly1305_update(&poly1305_state, pad0, 0x10 - (dst_len & 0xf));
+
+ b.lens[0] = cpu_to_le64(ad_len);
+ b.lens[1] = cpu_to_le64(dst_len);
+ poly1305_update(&poly1305_state, (u8 *)b.lens, sizeof(b.lens));
+
+ poly1305_final(&poly1305_state, b.mac);
+
+ ret = crypto_memneq(b.mac, src + dst_len, POLY1305_DIGEST_SIZE);
+ if (likely(!ret))
+ chacha20_crypt(chacha_state, dst, src, dst_len);
+
+ memzero_explicit(&b, sizeof(b));
+
+ return !ret;
+}
+
+bool chacha20poly1305_decrypt(u8 *dst, const u8 *src, const size_t src_len,
+ const u8 *ad, const size_t ad_len,
+ const u64 nonce,
+ const u8 key[CHACHA20POLY1305_KEY_SIZE])
+{
+ u32 chacha_state[CHACHA_STATE_WORDS];
+ u32 k[CHACHA_KEY_WORDS];
+ __le64 iv[2];
+ bool ret;
+
+ chacha_load_key(k, key);
+
+ iv[0] = 0;
+ iv[1] = cpu_to_le64(nonce);
+
+ chacha_init(chacha_state, k, (u8 *)iv);
+ ret = __chacha20poly1305_decrypt(dst, src, src_len, ad, ad_len,
+ chacha_state);
+
+ memzero_explicit(chacha_state, sizeof(chacha_state));
+ memzero_explicit(iv, sizeof(iv));
+ memzero_explicit(k, sizeof(k));
+ return ret;
+}
+EXPORT_SYMBOL(chacha20poly1305_decrypt);
+
+bool xchacha20poly1305_decrypt(u8 *dst, const u8 *src, const size_t src_len,
+ const u8 *ad, const size_t ad_len,
+ const u8 nonce[XCHACHA20POLY1305_NONCE_SIZE],
+ const u8 key[CHACHA20POLY1305_KEY_SIZE])
+{
+ u32 chacha_state[CHACHA_STATE_WORDS];
+
+ xchacha_init(chacha_state, key, nonce);
+ return __chacha20poly1305_decrypt(dst, src, src_len, ad, ad_len,
+ chacha_state);
+}
+EXPORT_SYMBOL(xchacha20poly1305_decrypt);
+
+static
+bool chacha20poly1305_crypt_sg_inplace(struct scatterlist *src,
+ const size_t src_len,
+ const u8 *ad, const size_t ad_len,
+ const u64 nonce,
+ const u8 key[CHACHA20POLY1305_KEY_SIZE],
+ int encrypt)
+{
+ const u8 *pad0 = page_address(ZERO_PAGE(0));
+ struct poly1305_desc_ctx poly1305_state;
+ u32 chacha_state[CHACHA_STATE_WORDS];
+ struct sg_mapping_iter miter;
+ size_t partial = 0;
+ unsigned int flags;
+ bool ret = true;
+ int sl;
+ union {
+ struct {
+ u32 k[CHACHA_KEY_WORDS];
+ __le64 iv[2];
+ };
+ u8 block0[POLY1305_KEY_SIZE];
+ u8 chacha_stream[CHACHA_BLOCK_SIZE];
+ struct {
+ u8 mac[2][POLY1305_DIGEST_SIZE];
+ };
+ __le64 lens[2];
+ } b __aligned(16);
+
+ if (WARN_ON(src_len > INT_MAX))
+ return false;
+
+ chacha_load_key(b.k, key);
+
+ b.iv[0] = 0;
+ b.iv[1] = cpu_to_le64(nonce);
+
+ chacha_init(chacha_state, b.k, (u8 *)b.iv);
+ chacha20_crypt(chacha_state, b.block0, pad0, sizeof(b.block0));
+ poly1305_init(&poly1305_state, b.block0);
+
+ if (unlikely(ad_len)) {
+ poly1305_update(&poly1305_state, ad, ad_len);
+ if (ad_len & 0xf)
+ poly1305_update(&poly1305_state, pad0, 0x10 - (ad_len & 0xf));
+ }
+
+ flags = SG_MITER_TO_SG | SG_MITER_ATOMIC;
+
+ sg_miter_start(&miter, src, sg_nents(src), flags);
+
+ for (sl = src_len; sl > 0 && sg_miter_next(&miter); sl -= miter.length) {
+ u8 *addr = miter.addr;
+ size_t length = min_t(size_t, sl, miter.length);
+
+ if (!encrypt)
+ poly1305_update(&poly1305_state, addr, length);
+
+ if (unlikely(partial)) {
+ size_t l = min(length, CHACHA_BLOCK_SIZE - partial);
+
+ crypto_xor(addr, b.chacha_stream + partial, l);
+ partial = (partial + l) & (CHACHA_BLOCK_SIZE - 1);
+
+ addr += l;
+ length -= l;
+ }
+
+ if (likely(length >= CHACHA_BLOCK_SIZE || length == sl)) {
+ size_t l = length;
+
+ if (unlikely(length < sl))
+ l &= ~(CHACHA_BLOCK_SIZE - 1);
+ chacha20_crypt(chacha_state, addr, addr, l);
+ addr += l;
+ length -= l;
+ }
+
+ if (unlikely(length > 0)) {
+ chacha20_crypt(chacha_state, b.chacha_stream, pad0,
+ CHACHA_BLOCK_SIZE);
+ crypto_xor(addr, b.chacha_stream, length);
+ partial = length;
+ }
+
+ if (encrypt)
+ poly1305_update(&poly1305_state, miter.addr,
+ min_t(size_t, sl, miter.length));
+ }
+
+ if (src_len & 0xf)
+ poly1305_update(&poly1305_state, pad0, 0x10 - (src_len & 0xf));
+
+ b.lens[0] = cpu_to_le64(ad_len);
+ b.lens[1] = cpu_to_le64(src_len);
+ poly1305_update(&poly1305_state, (u8 *)b.lens, sizeof(b.lens));
+
+ if (likely(sl <= -POLY1305_DIGEST_SIZE)) {
+ if (encrypt) {
+ poly1305_final(&poly1305_state,
+ miter.addr + miter.length + sl);
+ ret = true;
+ } else {
+ poly1305_final(&poly1305_state, b.mac[0]);
+ ret = !crypto_memneq(b.mac[0],
+ miter.addr + miter.length + sl,
+ POLY1305_DIGEST_SIZE);
+ }
+ }
+
+ sg_miter_stop(&miter);
+
+ if (unlikely(sl > -POLY1305_DIGEST_SIZE)) {
+ poly1305_final(&poly1305_state, b.mac[1]);
+ scatterwalk_map_and_copy(b.mac[encrypt], src, src_len,
+ sizeof(b.mac[1]), encrypt);
+ ret = encrypt ||
+ !crypto_memneq(b.mac[0], b.mac[1], POLY1305_DIGEST_SIZE);
+ }
+
+ memzero_explicit(chacha_state, sizeof(chacha_state));
+ memzero_explicit(&b, sizeof(b));
+
+ return ret;
+}
+
+bool chacha20poly1305_encrypt_sg_inplace(struct scatterlist *src, size_t src_len,
+ const u8 *ad, const size_t ad_len,
+ const u64 nonce,
+ const u8 key[CHACHA20POLY1305_KEY_SIZE])
+{
+ return chacha20poly1305_crypt_sg_inplace(src, src_len, ad, ad_len,
+ nonce, key, 1);
+}
+EXPORT_SYMBOL(chacha20poly1305_encrypt_sg_inplace);
+
+bool chacha20poly1305_decrypt_sg_inplace(struct scatterlist *src, size_t src_len,
+ const u8 *ad, const size_t ad_len,
+ const u64 nonce,
+ const u8 key[CHACHA20POLY1305_KEY_SIZE])
+{
+ if (unlikely(src_len < POLY1305_DIGEST_SIZE))
+ return false;
+
+ return chacha20poly1305_crypt_sg_inplace(src,
+ src_len - POLY1305_DIGEST_SIZE,
+ ad, ad_len, nonce, key, 0);
+}
+EXPORT_SYMBOL(chacha20poly1305_decrypt_sg_inplace);
+
+static int __init chacha20poly1305_init(void)
+{
+ if (!IS_ENABLED(CONFIG_CRYPTO_MANAGER_DISABLE_TESTS) &&
+ WARN_ON(!chacha20poly1305_selftest()))
+ return -ENODEV;
+ return 0;
+}
+
+static void __exit chacha20poly1305_exit(void)
+{
+}
+
+module_init(chacha20poly1305_init);
+module_exit(chacha20poly1305_exit);
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("ChaCha20Poly1305 AEAD construction");
+MODULE_AUTHOR("Jason A. Donenfeld <Jason@zx2c4.com>");
diff --git a/lib/crypto/curve25519-fiat32.c b/lib/crypto/curve25519-fiat32.c
new file mode 100644
index 000000000000..2fde0ec33dbd
--- /dev/null
+++ b/lib/crypto/curve25519-fiat32.c
@@ -0,0 +1,864 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/*
+ * Copyright (C) 2015-2016 The fiat-crypto Authors.
+ * Copyright (C) 2018-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ *
+ * This is a machine-generated formally verified implementation of Curve25519
+ * ECDH from: <https://github.com/mit-plv/fiat-crypto>. Though originally
+ * machine generated, it has been tweaked to be suitable for use in the kernel.
+ * It is optimized for 32-bit machines and machines that cannot work efficiently
+ * with 128-bit integer types.
+ */
+
+#include <asm/unaligned.h>
+#include <crypto/curve25519.h>
+#include <linux/string.h>
+
+/* fe means field element. Here the field is \Z/(2^255-19). An element t,
+ * entries t[0]...t[9], represents the integer t[0]+2^26 t[1]+2^51 t[2]+2^77
+ * t[3]+2^102 t[4]+...+2^230 t[9].
+ * fe limbs are bounded by 1.125*2^26,1.125*2^25,1.125*2^26,1.125*2^25,etc.
+ * Multiplication and carrying produce fe from fe_loose.
+ */
+typedef struct fe { u32 v[10]; } fe;
+
+/* fe_loose limbs are bounded by 3.375*2^26,3.375*2^25,3.375*2^26,3.375*2^25,etc
+ * Addition and subtraction produce fe_loose from (fe, fe).
+ */
+typedef struct fe_loose { u32 v[10]; } fe_loose;
+
+static __always_inline void fe_frombytes_impl(u32 h[10], const u8 *s)
+{
+ /* Ignores top bit of s. */
+ u32 a0 = get_unaligned_le32(s);
+ u32 a1 = get_unaligned_le32(s+4);
+ u32 a2 = get_unaligned_le32(s+8);
+ u32 a3 = get_unaligned_le32(s+12);
+ u32 a4 = get_unaligned_le32(s+16);
+ u32 a5 = get_unaligned_le32(s+20);
+ u32 a6 = get_unaligned_le32(s+24);
+ u32 a7 = get_unaligned_le32(s+28);
+ h[0] = a0&((1<<26)-1); /* 26 used, 32-26 left. 26 */
+ h[1] = (a0>>26) | ((a1&((1<<19)-1))<< 6); /* (32-26) + 19 = 6+19 = 25 */
+ h[2] = (a1>>19) | ((a2&((1<<13)-1))<<13); /* (32-19) + 13 = 13+13 = 26 */
+ h[3] = (a2>>13) | ((a3&((1<< 6)-1))<<19); /* (32-13) + 6 = 19+ 6 = 25 */
+ h[4] = (a3>> 6); /* (32- 6) = 26 */
+ h[5] = a4&((1<<25)-1); /* 25 */
+ h[6] = (a4>>25) | ((a5&((1<<19)-1))<< 7); /* (32-25) + 19 = 7+19 = 26 */
+ h[7] = (a5>>19) | ((a6&((1<<12)-1))<<13); /* (32-19) + 12 = 13+12 = 25 */
+ h[8] = (a6>>12) | ((a7&((1<< 6)-1))<<20); /* (32-12) + 6 = 20+ 6 = 26 */
+ h[9] = (a7>> 6)&((1<<25)-1); /* 25 */
+}
+
+static __always_inline void fe_frombytes(fe *h, const u8 *s)
+{
+ fe_frombytes_impl(h->v, s);
+}
+
+static __always_inline u8 /*bool*/
+addcarryx_u25(u8 /*bool*/ c, u32 a, u32 b, u32 *low)
+{
+ /* This function extracts 25 bits of result and 1 bit of carry
+ * (26 total), so a 32-bit intermediate is sufficient.
+ */
+ u32 x = a + b + c;
+ *low = x & ((1 << 25) - 1);
+ return (x >> 25) & 1;
+}
+
+static __always_inline u8 /*bool*/
+addcarryx_u26(u8 /*bool*/ c, u32 a, u32 b, u32 *low)
+{
+ /* This function extracts 26 bits of result and 1 bit of carry
+ * (27 total), so a 32-bit intermediate is sufficient.
+ */
+ u32 x = a + b + c;
+ *low = x & ((1 << 26) - 1);
+ return (x >> 26) & 1;
+}
+
+static __always_inline u8 /*bool*/
+subborrow_u25(u8 /*bool*/ c, u32 a, u32 b, u32 *low)
+{
+ /* This function extracts 25 bits of result and 1 bit of borrow
+ * (26 total), so a 32-bit intermediate is sufficient.
+ */
+ u32 x = a - b - c;
+ *low = x & ((1 << 25) - 1);
+ return x >> 31;
+}
+
+static __always_inline u8 /*bool*/
+subborrow_u26(u8 /*bool*/ c, u32 a, u32 b, u32 *low)
+{
+ /* This function extracts 26 bits of result and 1 bit of borrow
+ *(27 total), so a 32-bit intermediate is sufficient.
+ */
+ u32 x = a - b - c;
+ *low = x & ((1 << 26) - 1);
+ return x >> 31;
+}
+
+static __always_inline u32 cmovznz32(u32 t, u32 z, u32 nz)
+{
+ t = -!!t; /* all set if nonzero, 0 if 0 */
+ return (t&nz) | ((~t)&z);
+}
+
+static __always_inline void fe_freeze(u32 out[10], const u32 in1[10])
+{
+ { const u32 x17 = in1[9];
+ { const u32 x18 = in1[8];
+ { const u32 x16 = in1[7];
+ { const u32 x14 = in1[6];
+ { const u32 x12 = in1[5];
+ { const u32 x10 = in1[4];
+ { const u32 x8 = in1[3];
+ { const u32 x6 = in1[2];
+ { const u32 x4 = in1[1];
+ { const u32 x2 = in1[0];
+ { u32 x20; u8/*bool*/ x21 = subborrow_u26(0x0, x2, 0x3ffffed, &x20);
+ { u32 x23; u8/*bool*/ x24 = subborrow_u25(x21, x4, 0x1ffffff, &x23);
+ { u32 x26; u8/*bool*/ x27 = subborrow_u26(x24, x6, 0x3ffffff, &x26);
+ { u32 x29; u8/*bool*/ x30 = subborrow_u25(x27, x8, 0x1ffffff, &x29);
+ { u32 x32; u8/*bool*/ x33 = subborrow_u26(x30, x10, 0x3ffffff, &x32);
+ { u32 x35; u8/*bool*/ x36 = subborrow_u25(x33, x12, 0x1ffffff, &x35);
+ { u32 x38; u8/*bool*/ x39 = subborrow_u26(x36, x14, 0x3ffffff, &x38);
+ { u32 x41; u8/*bool*/ x42 = subborrow_u25(x39, x16, 0x1ffffff, &x41);
+ { u32 x44; u8/*bool*/ x45 = subborrow_u26(x42, x18, 0x3ffffff, &x44);
+ { u32 x47; u8/*bool*/ x48 = subborrow_u25(x45, x17, 0x1ffffff, &x47);
+ { u32 x49 = cmovznz32(x48, 0x0, 0xffffffff);
+ { u32 x50 = (x49 & 0x3ffffed);
+ { u32 x52; u8/*bool*/ x53 = addcarryx_u26(0x0, x20, x50, &x52);
+ { u32 x54 = (x49 & 0x1ffffff);
+ { u32 x56; u8/*bool*/ x57 = addcarryx_u25(x53, x23, x54, &x56);
+ { u32 x58 = (x49 & 0x3ffffff);
+ { u32 x60; u8/*bool*/ x61 = addcarryx_u26(x57, x26, x58, &x60);
+ { u32 x62 = (x49 & 0x1ffffff);
+ { u32 x64; u8/*bool*/ x65 = addcarryx_u25(x61, x29, x62, &x64);
+ { u32 x66 = (x49 & 0x3ffffff);
+ { u32 x68; u8/*bool*/ x69 = addcarryx_u26(x65, x32, x66, &x68);
+ { u32 x70 = (x49 & 0x1ffffff);
+ { u32 x72; u8/*bool*/ x73 = addcarryx_u25(x69, x35, x70, &x72);
+ { u32 x74 = (x49 & 0x3ffffff);
+ { u32 x76; u8/*bool*/ x77 = addcarryx_u26(x73, x38, x74, &x76);
+ { u32 x78 = (x49 & 0x1ffffff);
+ { u32 x80; u8/*bool*/ x81 = addcarryx_u25(x77, x41, x78, &x80);
+ { u32 x82 = (x49 & 0x3ffffff);
+ { u32 x84; u8/*bool*/ x85 = addcarryx_u26(x81, x44, x82, &x84);
+ { u32 x86 = (x49 & 0x1ffffff);
+ { u32 x88; addcarryx_u25(x85, x47, x86, &x88);
+ out[0] = x52;
+ out[1] = x56;
+ out[2] = x60;
+ out[3] = x64;
+ out[4] = x68;
+ out[5] = x72;
+ out[6] = x76;
+ out[7] = x80;
+ out[8] = x84;
+ out[9] = x88;
+ }}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}
+}
+
+static __always_inline void fe_tobytes(u8 s[32], const fe *f)
+{
+ u32 h[10];
+ fe_freeze(h, f->v);
+ s[0] = h[0] >> 0;
+ s[1] = h[0] >> 8;
+ s[2] = h[0] >> 16;
+ s[3] = (h[0] >> 24) | (h[1] << 2);
+ s[4] = h[1] >> 6;
+ s[5] = h[1] >> 14;
+ s[6] = (h[1] >> 22) | (h[2] << 3);
+ s[7] = h[2] >> 5;
+ s[8] = h[2] >> 13;
+ s[9] = (h[2] >> 21) | (h[3] << 5);
+ s[10] = h[3] >> 3;
+ s[11] = h[3] >> 11;
+ s[12] = (h[3] >> 19) | (h[4] << 6);
+ s[13] = h[4] >> 2;
+ s[14] = h[4] >> 10;
+ s[15] = h[4] >> 18;
+ s[16] = h[5] >> 0;
+ s[17] = h[5] >> 8;
+ s[18] = h[5] >> 16;
+ s[19] = (h[5] >> 24) | (h[6] << 1);
+ s[20] = h[6] >> 7;
+ s[21] = h[6] >> 15;
+ s[22] = (h[6] >> 23) | (h[7] << 3);
+ s[23] = h[7] >> 5;
+ s[24] = h[7] >> 13;
+ s[25] = (h[7] >> 21) | (h[8] << 4);
+ s[26] = h[8] >> 4;
+ s[27] = h[8] >> 12;
+ s[28] = (h[8] >> 20) | (h[9] << 6);
+ s[29] = h[9] >> 2;
+ s[30] = h[9] >> 10;
+ s[31] = h[9] >> 18;
+}
+
+/* h = f */
+static __always_inline void fe_copy(fe *h, const fe *f)
+{
+ memmove(h, f, sizeof(u32) * 10);
+}
+
+static __always_inline void fe_copy_lt(fe_loose *h, const fe *f)
+{
+ memmove(h, f, sizeof(u32) * 10);
+}
+
+/* h = 0 */
+static __always_inline void fe_0(fe *h)
+{
+ memset(h, 0, sizeof(u32) * 10);
+}
+
+/* h = 1 */
+static __always_inline void fe_1(fe *h)
+{
+ memset(h, 0, sizeof(u32) * 10);
+ h->v[0] = 1;
+}
+
+static noinline void fe_add_impl(u32 out[10], const u32 in1[10], const u32 in2[10])
+{
+ { const u32 x20 = in1[9];
+ { const u32 x21 = in1[8];
+ { const u32 x19 = in1[7];
+ { const u32 x17 = in1[6];
+ { const u32 x15 = in1[5];
+ { const u32 x13 = in1[4];
+ { const u32 x11 = in1[3];
+ { const u32 x9 = in1[2];
+ { const u32 x7 = in1[1];
+ { const u32 x5 = in1[0];
+ { const u32 x38 = in2[9];
+ { const u32 x39 = in2[8];
+ { const u32 x37 = in2[7];
+ { const u32 x35 = in2[6];
+ { const u32 x33 = in2[5];
+ { const u32 x31 = in2[4];
+ { const u32 x29 = in2[3];
+ { const u32 x27 = in2[2];
+ { const u32 x25 = in2[1];
+ { const u32 x23 = in2[0];
+ out[0] = (x5 + x23);
+ out[1] = (x7 + x25);
+ out[2] = (x9 + x27);
+ out[3] = (x11 + x29);
+ out[4] = (x13 + x31);
+ out[5] = (x15 + x33);
+ out[6] = (x17 + x35);
+ out[7] = (x19 + x37);
+ out[8] = (x21 + x39);
+ out[9] = (x20 + x38);
+ }}}}}}}}}}}}}}}}}}}}
+}
+
+/* h = f + g
+ * Can overlap h with f or g.
+ */
+static __always_inline void fe_add(fe_loose *h, const fe *f, const fe *g)
+{
+ fe_add_impl(h->v, f->v, g->v);
+}
+
+static noinline void fe_sub_impl(u32 out[10], const u32 in1[10], const u32 in2[10])
+{
+ { const u32 x20 = in1[9];
+ { const u32 x21 = in1[8];
+ { const u32 x19 = in1[7];
+ { const u32 x17 = in1[6];
+ { const u32 x15 = in1[5];
+ { const u32 x13 = in1[4];
+ { const u32 x11 = in1[3];
+ { const u32 x9 = in1[2];
+ { const u32 x7 = in1[1];
+ { const u32 x5 = in1[0];
+ { const u32 x38 = in2[9];
+ { const u32 x39 = in2[8];
+ { const u32 x37 = in2[7];
+ { const u32 x35 = in2[6];
+ { const u32 x33 = in2[5];
+ { const u32 x31 = in2[4];
+ { const u32 x29 = in2[3];
+ { const u32 x27 = in2[2];
+ { const u32 x25 = in2[1];
+ { const u32 x23 = in2[0];
+ out[0] = ((0x7ffffda + x5) - x23);
+ out[1] = ((0x3fffffe + x7) - x25);
+ out[2] = ((0x7fffffe + x9) - x27);
+ out[3] = ((0x3fffffe + x11) - x29);
+ out[4] = ((0x7fffffe + x13) - x31);
+ out[5] = ((0x3fffffe + x15) - x33);
+ out[6] = ((0x7fffffe + x17) - x35);
+ out[7] = ((0x3fffffe + x19) - x37);
+ out[8] = ((0x7fffffe + x21) - x39);
+ out[9] = ((0x3fffffe + x20) - x38);
+ }}}}}}}}}}}}}}}}}}}}
+}
+
+/* h = f - g
+ * Can overlap h with f or g.
+ */
+static __always_inline void fe_sub(fe_loose *h, const fe *f, const fe *g)
+{
+ fe_sub_impl(h->v, f->v, g->v);
+}
+
+static noinline void fe_mul_impl(u32 out[10], const u32 in1[10], const u32 in2[10])
+{
+ { const u32 x20 = in1[9];
+ { const u32 x21 = in1[8];
+ { const u32 x19 = in1[7];
+ { const u32 x17 = in1[6];
+ { const u32 x15 = in1[5];
+ { const u32 x13 = in1[4];
+ { const u32 x11 = in1[3];
+ { const u32 x9 = in1[2];
+ { const u32 x7 = in1[1];
+ { const u32 x5 = in1[0];
+ { const u32 x38 = in2[9];
+ { const u32 x39 = in2[8];
+ { const u32 x37 = in2[7];
+ { const u32 x35 = in2[6];
+ { const u32 x33 = in2[5];
+ { const u32 x31 = in2[4];
+ { const u32 x29 = in2[3];
+ { const u32 x27 = in2[2];
+ { const u32 x25 = in2[1];
+ { const u32 x23 = in2[0];
+ { u64 x40 = ((u64)x23 * x5);
+ { u64 x41 = (((u64)x23 * x7) + ((u64)x25 * x5));
+ { u64 x42 = ((((u64)(0x2 * x25) * x7) + ((u64)x23 * x9)) + ((u64)x27 * x5));
+ { u64 x43 = (((((u64)x25 * x9) + ((u64)x27 * x7)) + ((u64)x23 * x11)) + ((u64)x29 * x5));
+ { u64 x44 = (((((u64)x27 * x9) + (0x2 * (((u64)x25 * x11) + ((u64)x29 * x7)))) + ((u64)x23 * x13)) + ((u64)x31 * x5));
+ { u64 x45 = (((((((u64)x27 * x11) + ((u64)x29 * x9)) + ((u64)x25 * x13)) + ((u64)x31 * x7)) + ((u64)x23 * x15)) + ((u64)x33 * x5));
+ { u64 x46 = (((((0x2 * ((((u64)x29 * x11) + ((u64)x25 * x15)) + ((u64)x33 * x7))) + ((u64)x27 * x13)) + ((u64)x31 * x9)) + ((u64)x23 * x17)) + ((u64)x35 * x5));
+ { u64 x47 = (((((((((u64)x29 * x13) + ((u64)x31 * x11)) + ((u64)x27 * x15)) + ((u64)x33 * x9)) + ((u64)x25 * x17)) + ((u64)x35 * x7)) + ((u64)x23 * x19)) + ((u64)x37 * x5));
+ { u64 x48 = (((((((u64)x31 * x13) + (0x2 * (((((u64)x29 * x15) + ((u64)x33 * x11)) + ((u64)x25 * x19)) + ((u64)x37 * x7)))) + ((u64)x27 * x17)) + ((u64)x35 * x9)) + ((u64)x23 * x21)) + ((u64)x39 * x5));
+ { u64 x49 = (((((((((((u64)x31 * x15) + ((u64)x33 * x13)) + ((u64)x29 * x17)) + ((u64)x35 * x11)) + ((u64)x27 * x19)) + ((u64)x37 * x9)) + ((u64)x25 * x21)) + ((u64)x39 * x7)) + ((u64)x23 * x20)) + ((u64)x38 * x5));
+ { u64 x50 = (((((0x2 * ((((((u64)x33 * x15) + ((u64)x29 * x19)) + ((u64)x37 * x11)) + ((u64)x25 * x20)) + ((u64)x38 * x7))) + ((u64)x31 * x17)) + ((u64)x35 * x13)) + ((u64)x27 * x21)) + ((u64)x39 * x9));
+ { u64 x51 = (((((((((u64)x33 * x17) + ((u64)x35 * x15)) + ((u64)x31 * x19)) + ((u64)x37 * x13)) + ((u64)x29 * x21)) + ((u64)x39 * x11)) + ((u64)x27 * x20)) + ((u64)x38 * x9));
+ { u64 x52 = (((((u64)x35 * x17) + (0x2 * (((((u64)x33 * x19) + ((u64)x37 * x15)) + ((u64)x29 * x20)) + ((u64)x38 * x11)))) + ((u64)x31 * x21)) + ((u64)x39 * x13));
+ { u64 x53 = (((((((u64)x35 * x19) + ((u64)x37 * x17)) + ((u64)x33 * x21)) + ((u64)x39 * x15)) + ((u64)x31 * x20)) + ((u64)x38 * x13));
+ { u64 x54 = (((0x2 * ((((u64)x37 * x19) + ((u64)x33 * x20)) + ((u64)x38 * x15))) + ((u64)x35 * x21)) + ((u64)x39 * x17));
+ { u64 x55 = (((((u64)x37 * x21) + ((u64)x39 * x19)) + ((u64)x35 * x20)) + ((u64)x38 * x17));
+ { u64 x56 = (((u64)x39 * x21) + (0x2 * (((u64)x37 * x20) + ((u64)x38 * x19))));
+ { u64 x57 = (((u64)x39 * x20) + ((u64)x38 * x21));
+ { u64 x58 = ((u64)(0x2 * x38) * x20);
+ { u64 x59 = (x48 + (x58 << 0x4));
+ { u64 x60 = (x59 + (x58 << 0x1));
+ { u64 x61 = (x60 + x58);
+ { u64 x62 = (x47 + (x57 << 0x4));
+ { u64 x63 = (x62 + (x57 << 0x1));
+ { u64 x64 = (x63 + x57);
+ { u64 x65 = (x46 + (x56 << 0x4));
+ { u64 x66 = (x65 + (x56 << 0x1));
+ { u64 x67 = (x66 + x56);
+ { u64 x68 = (x45 + (x55 << 0x4));
+ { u64 x69 = (x68 + (x55 << 0x1));
+ { u64 x70 = (x69 + x55);
+ { u64 x71 = (x44 + (x54 << 0x4));
+ { u64 x72 = (x71 + (x54 << 0x1));
+ { u64 x73 = (x72 + x54);
+ { u64 x74 = (x43 + (x53 << 0x4));
+ { u64 x75 = (x74 + (x53 << 0x1));
+ { u64 x76 = (x75 + x53);
+ { u64 x77 = (x42 + (x52 << 0x4));
+ { u64 x78 = (x77 + (x52 << 0x1));
+ { u64 x79 = (x78 + x52);
+ { u64 x80 = (x41 + (x51 << 0x4));
+ { u64 x81 = (x80 + (x51 << 0x1));
+ { u64 x82 = (x81 + x51);
+ { u64 x83 = (x40 + (x50 << 0x4));
+ { u64 x84 = (x83 + (x50 << 0x1));
+ { u64 x85 = (x84 + x50);
+ { u64 x86 = (x85 >> 0x1a);
+ { u32 x87 = ((u32)x85 & 0x3ffffff);
+ { u64 x88 = (x86 + x82);
+ { u64 x89 = (x88 >> 0x19);
+ { u32 x90 = ((u32)x88 & 0x1ffffff);
+ { u64 x91 = (x89 + x79);
+ { u64 x92 = (x91 >> 0x1a);
+ { u32 x93 = ((u32)x91 & 0x3ffffff);
+ { u64 x94 = (x92 + x76);
+ { u64 x95 = (x94 >> 0x19);
+ { u32 x96 = ((u32)x94 & 0x1ffffff);
+ { u64 x97 = (x95 + x73);
+ { u64 x98 = (x97 >> 0x1a);
+ { u32 x99 = ((u32)x97 & 0x3ffffff);
+ { u64 x100 = (x98 + x70);
+ { u64 x101 = (x100 >> 0x19);
+ { u32 x102 = ((u32)x100 & 0x1ffffff);
+ { u64 x103 = (x101 + x67);
+ { u64 x104 = (x103 >> 0x1a);
+ { u32 x105 = ((u32)x103 & 0x3ffffff);
+ { u64 x106 = (x104 + x64);
+ { u64 x107 = (x106 >> 0x19);
+ { u32 x108 = ((u32)x106 & 0x1ffffff);
+ { u64 x109 = (x107 + x61);
+ { u64 x110 = (x109 >> 0x1a);
+ { u32 x111 = ((u32)x109 & 0x3ffffff);
+ { u64 x112 = (x110 + x49);
+ { u64 x113 = (x112 >> 0x19);
+ { u32 x114 = ((u32)x112 & 0x1ffffff);
+ { u64 x115 = (x87 + (0x13 * x113));
+ { u32 x116 = (u32) (x115 >> 0x1a);
+ { u32 x117 = ((u32)x115 & 0x3ffffff);
+ { u32 x118 = (x116 + x90);
+ { u32 x119 = (x118 >> 0x19);
+ { u32 x120 = (x118 & 0x1ffffff);
+ out[0] = x117;
+ out[1] = x120;
+ out[2] = (x119 + x93);
+ out[3] = x96;
+ out[4] = x99;
+ out[5] = x102;
+ out[6] = x105;
+ out[7] = x108;
+ out[8] = x111;
+ out[9] = x114;
+ }}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}
+}
+
+static __always_inline void fe_mul_ttt(fe *h, const fe *f, const fe *g)
+{
+ fe_mul_impl(h->v, f->v, g->v);
+}
+
+static __always_inline void fe_mul_tlt(fe *h, const fe_loose *f, const fe *g)
+{
+ fe_mul_impl(h->v, f->v, g->v);
+}
+
+static __always_inline void
+fe_mul_tll(fe *h, const fe_loose *f, const fe_loose *g)
+{
+ fe_mul_impl(h->v, f->v, g->v);
+}
+
+static noinline void fe_sqr_impl(u32 out[10], const u32 in1[10])
+{
+ { const u32 x17 = in1[9];
+ { const u32 x18 = in1[8];
+ { const u32 x16 = in1[7];
+ { const u32 x14 = in1[6];
+ { const u32 x12 = in1[5];
+ { const u32 x10 = in1[4];
+ { const u32 x8 = in1[3];
+ { const u32 x6 = in1[2];
+ { const u32 x4 = in1[1];
+ { const u32 x2 = in1[0];
+ { u64 x19 = ((u64)x2 * x2);
+ { u64 x20 = ((u64)(0x2 * x2) * x4);
+ { u64 x21 = (0x2 * (((u64)x4 * x4) + ((u64)x2 * x6)));
+ { u64 x22 = (0x2 * (((u64)x4 * x6) + ((u64)x2 * x8)));
+ { u64 x23 = ((((u64)x6 * x6) + ((u64)(0x4 * x4) * x8)) + ((u64)(0x2 * x2) * x10));
+ { u64 x24 = (0x2 * ((((u64)x6 * x8) + ((u64)x4 * x10)) + ((u64)x2 * x12)));
+ { u64 x25 = (0x2 * (((((u64)x8 * x8) + ((u64)x6 * x10)) + ((u64)x2 * x14)) + ((u64)(0x2 * x4) * x12)));
+ { u64 x26 = (0x2 * (((((u64)x8 * x10) + ((u64)x6 * x12)) + ((u64)x4 * x14)) + ((u64)x2 * x16)));
+ { u64 x27 = (((u64)x10 * x10) + (0x2 * ((((u64)x6 * x14) + ((u64)x2 * x18)) + (0x2 * (((u64)x4 * x16) + ((u64)x8 * x12))))));
+ { u64 x28 = (0x2 * ((((((u64)x10 * x12) + ((u64)x8 * x14)) + ((u64)x6 * x16)) + ((u64)x4 * x18)) + ((u64)x2 * x17)));
+ { u64 x29 = (0x2 * (((((u64)x12 * x12) + ((u64)x10 * x14)) + ((u64)x6 * x18)) + (0x2 * (((u64)x8 * x16) + ((u64)x4 * x17)))));
+ { u64 x30 = (0x2 * (((((u64)x12 * x14) + ((u64)x10 * x16)) + ((u64)x8 * x18)) + ((u64)x6 * x17)));
+ { u64 x31 = (((u64)x14 * x14) + (0x2 * (((u64)x10 * x18) + (0x2 * (((u64)x12 * x16) + ((u64)x8 * x17))))));
+ { u64 x32 = (0x2 * ((((u64)x14 * x16) + ((u64)x12 * x18)) + ((u64)x10 * x17)));
+ { u64 x33 = (0x2 * ((((u64)x16 * x16) + ((u64)x14 * x18)) + ((u64)(0x2 * x12) * x17)));
+ { u64 x34 = (0x2 * (((u64)x16 * x18) + ((u64)x14 * x17)));
+ { u64 x35 = (((u64)x18 * x18) + ((u64)(0x4 * x16) * x17));
+ { u64 x36 = ((u64)(0x2 * x18) * x17);
+ { u64 x37 = ((u64)(0x2 * x17) * x17);
+ { u64 x38 = (x27 + (x37 << 0x4));
+ { u64 x39 = (x38 + (x37 << 0x1));
+ { u64 x40 = (x39 + x37);
+ { u64 x41 = (x26 + (x36 << 0x4));
+ { u64 x42 = (x41 + (x36 << 0x1));
+ { u64 x43 = (x42 + x36);
+ { u64 x44 = (x25 + (x35 << 0x4));
+ { u64 x45 = (x44 + (x35 << 0x1));
+ { u64 x46 = (x45 + x35);
+ { u64 x47 = (x24 + (x34 << 0x4));
+ { u64 x48 = (x47 + (x34 << 0x1));
+ { u64 x49 = (x48 + x34);
+ { u64 x50 = (x23 + (x33 << 0x4));
+ { u64 x51 = (x50 + (x33 << 0x1));
+ { u64 x52 = (x51 + x33);
+ { u64 x53 = (x22 + (x32 << 0x4));
+ { u64 x54 = (x53 + (x32 << 0x1));
+ { u64 x55 = (x54 + x32);
+ { u64 x56 = (x21 + (x31 << 0x4));
+ { u64 x57 = (x56 + (x31 << 0x1));
+ { u64 x58 = (x57 + x31);
+ { u64 x59 = (x20 + (x30 << 0x4));
+ { u64 x60 = (x59 + (x30 << 0x1));
+ { u64 x61 = (x60 + x30);
+ { u64 x62 = (x19 + (x29 << 0x4));
+ { u64 x63 = (x62 + (x29 << 0x1));
+ { u64 x64 = (x63 + x29);
+ { u64 x65 = (x64 >> 0x1a);
+ { u32 x66 = ((u32)x64 & 0x3ffffff);
+ { u64 x67 = (x65 + x61);
+ { u64 x68 = (x67 >> 0x19);
+ { u32 x69 = ((u32)x67 & 0x1ffffff);
+ { u64 x70 = (x68 + x58);
+ { u64 x71 = (x70 >> 0x1a);
+ { u32 x72 = ((u32)x70 & 0x3ffffff);
+ { u64 x73 = (x71 + x55);
+ { u64 x74 = (x73 >> 0x19);
+ { u32 x75 = ((u32)x73 & 0x1ffffff);
+ { u64 x76 = (x74 + x52);
+ { u64 x77 = (x76 >> 0x1a);
+ { u32 x78 = ((u32)x76 & 0x3ffffff);
+ { u64 x79 = (x77 + x49);
+ { u64 x80 = (x79 >> 0x19);
+ { u32 x81 = ((u32)x79 & 0x1ffffff);
+ { u64 x82 = (x80 + x46);
+ { u64 x83 = (x82 >> 0x1a);
+ { u32 x84 = ((u32)x82 & 0x3ffffff);
+ { u64 x85 = (x83 + x43);
+ { u64 x86 = (x85 >> 0x19);
+ { u32 x87 = ((u32)x85 & 0x1ffffff);
+ { u64 x88 = (x86 + x40);
+ { u64 x89 = (x88 >> 0x1a);
+ { u32 x90 = ((u32)x88 & 0x3ffffff);
+ { u64 x91 = (x89 + x28);
+ { u64 x92 = (x91 >> 0x19);
+ { u32 x93 = ((u32)x91 & 0x1ffffff);
+ { u64 x94 = (x66 + (0x13 * x92));
+ { u32 x95 = (u32) (x94 >> 0x1a);
+ { u32 x96 = ((u32)x94 & 0x3ffffff);
+ { u32 x97 = (x95 + x69);
+ { u32 x98 = (x97 >> 0x19);
+ { u32 x99 = (x97 & 0x1ffffff);
+ out[0] = x96;
+ out[1] = x99;
+ out[2] = (x98 + x72);
+ out[3] = x75;
+ out[4] = x78;
+ out[5] = x81;
+ out[6] = x84;
+ out[7] = x87;
+ out[8] = x90;
+ out[9] = x93;
+ }}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}
+}
+
+static __always_inline void fe_sq_tl(fe *h, const fe_loose *f)
+{
+ fe_sqr_impl(h->v, f->v);
+}
+
+static __always_inline void fe_sq_tt(fe *h, const fe *f)
+{
+ fe_sqr_impl(h->v, f->v);
+}
+
+static __always_inline void fe_loose_invert(fe *out, const fe_loose *z)
+{
+ fe t0;
+ fe t1;
+ fe t2;
+ fe t3;
+ int i;
+
+ fe_sq_tl(&t0, z);
+ fe_sq_tt(&t1, &t0);
+ for (i = 1; i < 2; ++i)
+ fe_sq_tt(&t1, &t1);
+ fe_mul_tlt(&t1, z, &t1);
+ fe_mul_ttt(&t0, &t0, &t1);
+ fe_sq_tt(&t2, &t0);
+ fe_mul_ttt(&t1, &t1, &t2);
+ fe_sq_tt(&t2, &t1);
+ for (i = 1; i < 5; ++i)
+ fe_sq_tt(&t2, &t2);
+ fe_mul_ttt(&t1, &t2, &t1);
+ fe_sq_tt(&t2, &t1);
+ for (i = 1; i < 10; ++i)
+ fe_sq_tt(&t2, &t2);
+ fe_mul_ttt(&t2, &t2, &t1);
+ fe_sq_tt(&t3, &t2);
+ for (i = 1; i < 20; ++i)
+ fe_sq_tt(&t3, &t3);
+ fe_mul_ttt(&t2, &t3, &t2);
+ fe_sq_tt(&t2, &t2);
+ for (i = 1; i < 10; ++i)
+ fe_sq_tt(&t2, &t2);
+ fe_mul_ttt(&t1, &t2, &t1);
+ fe_sq_tt(&t2, &t1);
+ for (i = 1; i < 50; ++i)
+ fe_sq_tt(&t2, &t2);
+ fe_mul_ttt(&t2, &t2, &t1);
+ fe_sq_tt(&t3, &t2);
+ for (i = 1; i < 100; ++i)
+ fe_sq_tt(&t3, &t3);
+ fe_mul_ttt(&t2, &t3, &t2);
+ fe_sq_tt(&t2, &t2);
+ for (i = 1; i < 50; ++i)
+ fe_sq_tt(&t2, &t2);
+ fe_mul_ttt(&t1, &t2, &t1);
+ fe_sq_tt(&t1, &t1);
+ for (i = 1; i < 5; ++i)
+ fe_sq_tt(&t1, &t1);
+ fe_mul_ttt(out, &t1, &t0);
+}
+
+static __always_inline void fe_invert(fe *out, const fe *z)
+{
+ fe_loose l;
+ fe_copy_lt(&l, z);
+ fe_loose_invert(out, &l);
+}
+
+/* Replace (f,g) with (g,f) if b == 1;
+ * replace (f,g) with (f,g) if b == 0.
+ *
+ * Preconditions: b in {0,1}
+ */
+static noinline void fe_cswap(fe *f, fe *g, unsigned int b)
+{
+ unsigned i;
+ b = 0 - b;
+ for (i = 0; i < 10; i++) {
+ u32 x = f->v[i] ^ g->v[i];
+ x &= b;
+ f->v[i] ^= x;
+ g->v[i] ^= x;
+ }
+}
+
+/* NOTE: based on fiat-crypto fe_mul, edited for in2=121666, 0, 0.*/
+static __always_inline void fe_mul_121666_impl(u32 out[10], const u32 in1[10])
+{
+ { const u32 x20 = in1[9];
+ { const u32 x21 = in1[8];
+ { const u32 x19 = in1[7];
+ { const u32 x17 = in1[6];
+ { const u32 x15 = in1[5];
+ { const u32 x13 = in1[4];
+ { const u32 x11 = in1[3];
+ { const u32 x9 = in1[2];
+ { const u32 x7 = in1[1];
+ { const u32 x5 = in1[0];
+ { const u32 x38 = 0;
+ { const u32 x39 = 0;
+ { const u32 x37 = 0;
+ { const u32 x35 = 0;
+ { const u32 x33 = 0;
+ { const u32 x31 = 0;
+ { const u32 x29 = 0;
+ { const u32 x27 = 0;
+ { const u32 x25 = 0;
+ { const u32 x23 = 121666;
+ { u64 x40 = ((u64)x23 * x5);
+ { u64 x41 = (((u64)x23 * x7) + ((u64)x25 * x5));
+ { u64 x42 = ((((u64)(0x2 * x25) * x7) + ((u64)x23 * x9)) + ((u64)x27 * x5));
+ { u64 x43 = (((((u64)x25 * x9) + ((u64)x27 * x7)) + ((u64)x23 * x11)) + ((u64)x29 * x5));
+ { u64 x44 = (((((u64)x27 * x9) + (0x2 * (((u64)x25 * x11) + ((u64)x29 * x7)))) + ((u64)x23 * x13)) + ((u64)x31 * x5));
+ { u64 x45 = (((((((u64)x27 * x11) + ((u64)x29 * x9)) + ((u64)x25 * x13)) + ((u64)x31 * x7)) + ((u64)x23 * x15)) + ((u64)x33 * x5));
+ { u64 x46 = (((((0x2 * ((((u64)x29 * x11) + ((u64)x25 * x15)) + ((u64)x33 * x7))) + ((u64)x27 * x13)) + ((u64)x31 * x9)) + ((u64)x23 * x17)) + ((u64)x35 * x5));
+ { u64 x47 = (((((((((u64)x29 * x13) + ((u64)x31 * x11)) + ((u64)x27 * x15)) + ((u64)x33 * x9)) + ((u64)x25 * x17)) + ((u64)x35 * x7)) + ((u64)x23 * x19)) + ((u64)x37 * x5));
+ { u64 x48 = (((((((u64)x31 * x13) + (0x2 * (((((u64)x29 * x15) + ((u64)x33 * x11)) + ((u64)x25 * x19)) + ((u64)x37 * x7)))) + ((u64)x27 * x17)) + ((u64)x35 * x9)) + ((u64)x23 * x21)) + ((u64)x39 * x5));
+ { u64 x49 = (((((((((((u64)x31 * x15) + ((u64)x33 * x13)) + ((u64)x29 * x17)) + ((u64)x35 * x11)) + ((u64)x27 * x19)) + ((u64)x37 * x9)) + ((u64)x25 * x21)) + ((u64)x39 * x7)) + ((u64)x23 * x20)) + ((u64)x38 * x5));
+ { u64 x50 = (((((0x2 * ((((((u64)x33 * x15) + ((u64)x29 * x19)) + ((u64)x37 * x11)) + ((u64)x25 * x20)) + ((u64)x38 * x7))) + ((u64)x31 * x17)) + ((u64)x35 * x13)) + ((u64)x27 * x21)) + ((u64)x39 * x9));
+ { u64 x51 = (((((((((u64)x33 * x17) + ((u64)x35 * x15)) + ((u64)x31 * x19)) + ((u64)x37 * x13)) + ((u64)x29 * x21)) + ((u64)x39 * x11)) + ((u64)x27 * x20)) + ((u64)x38 * x9));
+ { u64 x52 = (((((u64)x35 * x17) + (0x2 * (((((u64)x33 * x19) + ((u64)x37 * x15)) + ((u64)x29 * x20)) + ((u64)x38 * x11)))) + ((u64)x31 * x21)) + ((u64)x39 * x13));
+ { u64 x53 = (((((((u64)x35 * x19) + ((u64)x37 * x17)) + ((u64)x33 * x21)) + ((u64)x39 * x15)) + ((u64)x31 * x20)) + ((u64)x38 * x13));
+ { u64 x54 = (((0x2 * ((((u64)x37 * x19) + ((u64)x33 * x20)) + ((u64)x38 * x15))) + ((u64)x35 * x21)) + ((u64)x39 * x17));
+ { u64 x55 = (((((u64)x37 * x21) + ((u64)x39 * x19)) + ((u64)x35 * x20)) + ((u64)x38 * x17));
+ { u64 x56 = (((u64)x39 * x21) + (0x2 * (((u64)x37 * x20) + ((u64)x38 * x19))));
+ { u64 x57 = (((u64)x39 * x20) + ((u64)x38 * x21));
+ { u64 x58 = ((u64)(0x2 * x38) * x20);
+ { u64 x59 = (x48 + (x58 << 0x4));
+ { u64 x60 = (x59 + (x58 << 0x1));
+ { u64 x61 = (x60 + x58);
+ { u64 x62 = (x47 + (x57 << 0x4));
+ { u64 x63 = (x62 + (x57 << 0x1));
+ { u64 x64 = (x63 + x57);
+ { u64 x65 = (x46 + (x56 << 0x4));
+ { u64 x66 = (x65 + (x56 << 0x1));
+ { u64 x67 = (x66 + x56);
+ { u64 x68 = (x45 + (x55 << 0x4));
+ { u64 x69 = (x68 + (x55 << 0x1));
+ { u64 x70 = (x69 + x55);
+ { u64 x71 = (x44 + (x54 << 0x4));
+ { u64 x72 = (x71 + (x54 << 0x1));
+ { u64 x73 = (x72 + x54);
+ { u64 x74 = (x43 + (x53 << 0x4));
+ { u64 x75 = (x74 + (x53 << 0x1));
+ { u64 x76 = (x75 + x53);
+ { u64 x77 = (x42 + (x52 << 0x4));
+ { u64 x78 = (x77 + (x52 << 0x1));
+ { u64 x79 = (x78 + x52);
+ { u64 x80 = (x41 + (x51 << 0x4));
+ { u64 x81 = (x80 + (x51 << 0x1));
+ { u64 x82 = (x81 + x51);
+ { u64 x83 = (x40 + (x50 << 0x4));
+ { u64 x84 = (x83 + (x50 << 0x1));
+ { u64 x85 = (x84 + x50);
+ { u64 x86 = (x85 >> 0x1a);
+ { u32 x87 = ((u32)x85 & 0x3ffffff);
+ { u64 x88 = (x86 + x82);
+ { u64 x89 = (x88 >> 0x19);
+ { u32 x90 = ((u32)x88 & 0x1ffffff);
+ { u64 x91 = (x89 + x79);
+ { u64 x92 = (x91 >> 0x1a);
+ { u32 x93 = ((u32)x91 & 0x3ffffff);
+ { u64 x94 = (x92 + x76);
+ { u64 x95 = (x94 >> 0x19);
+ { u32 x96 = ((u32)x94 & 0x1ffffff);
+ { u64 x97 = (x95 + x73);
+ { u64 x98 = (x97 >> 0x1a);
+ { u32 x99 = ((u32)x97 & 0x3ffffff);
+ { u64 x100 = (x98 + x70);
+ { u64 x101 = (x100 >> 0x19);
+ { u32 x102 = ((u32)x100 & 0x1ffffff);
+ { u64 x103 = (x101 + x67);
+ { u64 x104 = (x103 >> 0x1a);
+ { u32 x105 = ((u32)x103 & 0x3ffffff);
+ { u64 x106 = (x104 + x64);
+ { u64 x107 = (x106 >> 0x19);
+ { u32 x108 = ((u32)x106 & 0x1ffffff);
+ { u64 x109 = (x107 + x61);
+ { u64 x110 = (x109 >> 0x1a);
+ { u32 x111 = ((u32)x109 & 0x3ffffff);
+ { u64 x112 = (x110 + x49);
+ { u64 x113 = (x112 >> 0x19);
+ { u32 x114 = ((u32)x112 & 0x1ffffff);
+ { u64 x115 = (x87 + (0x13 * x113));
+ { u32 x116 = (u32) (x115 >> 0x1a);
+ { u32 x117 = ((u32)x115 & 0x3ffffff);
+ { u32 x118 = (x116 + x90);
+ { u32 x119 = (x118 >> 0x19);
+ { u32 x120 = (x118 & 0x1ffffff);
+ out[0] = x117;
+ out[1] = x120;
+ out[2] = (x119 + x93);
+ out[3] = x96;
+ out[4] = x99;
+ out[5] = x102;
+ out[6] = x105;
+ out[7] = x108;
+ out[8] = x111;
+ out[9] = x114;
+ }}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}
+}
+
+static __always_inline void fe_mul121666(fe *h, const fe_loose *f)
+{
+ fe_mul_121666_impl(h->v, f->v);
+}
+
+void curve25519_generic(u8 out[CURVE25519_KEY_SIZE],
+ const u8 scalar[CURVE25519_KEY_SIZE],
+ const u8 point[CURVE25519_KEY_SIZE])
+{
+ fe x1, x2, z2, x3, z3;
+ fe_loose x2l, z2l, x3l;
+ unsigned swap = 0;
+ int pos;
+ u8 e[32];
+
+ memcpy(e, scalar, 32);
+ curve25519_clamp_secret(e);
+
+ /* The following implementation was transcribed to Coq and proven to
+ * correspond to unary scalar multiplication in affine coordinates given
+ * that x1 != 0 is the x coordinate of some point on the curve. It was
+ * also checked in Coq that doing a ladderstep with x1 = x3 = 0 gives
+ * z2' = z3' = 0, and z2 = z3 = 0 gives z2' = z3' = 0. The statement was
+ * quantified over the underlying field, so it applies to Curve25519
+ * itself and the quadratic twist of Curve25519. It was not proven in
+ * Coq that prime-field arithmetic correctly simulates extension-field
+ * arithmetic on prime-field values. The decoding of the byte array
+ * representation of e was not considered.
+ *
+ * Specification of Montgomery curves in affine coordinates:
+ * <https://github.com/mit-plv/fiat-crypto/blob/2456d821825521f7e03e65882cc3521795b0320f/src/Spec/MontgomeryCurve.v#L27>
+ *
+ * Proof that these form a group that is isomorphic to a Weierstrass
+ * curve:
+ * <https://github.com/mit-plv/fiat-crypto/blob/2456d821825521f7e03e65882cc3521795b0320f/src/Curves/Montgomery/AffineProofs.v#L35>
+ *
+ * Coq transcription and correctness proof of the loop
+ * (where scalarbits=255):
+ * <https://github.com/mit-plv/fiat-crypto/blob/2456d821825521f7e03e65882cc3521795b0320f/src/Curves/Montgomery/XZ.v#L118>
+ * <https://github.com/mit-plv/fiat-crypto/blob/2456d821825521f7e03e65882cc3521795b0320f/src/Curves/Montgomery/XZProofs.v#L278>
+ * preconditions: 0 <= e < 2^255 (not necessarily e < order),
+ * fe_invert(0) = 0
+ */
+ fe_frombytes(&x1, point);
+ fe_1(&x2);
+ fe_0(&z2);
+ fe_copy(&x3, &x1);
+ fe_1(&z3);
+
+ for (pos = 254; pos >= 0; --pos) {
+ fe tmp0, tmp1;
+ fe_loose tmp0l, tmp1l;
+ /* loop invariant as of right before the test, for the case
+ * where x1 != 0:
+ * pos >= -1; if z2 = 0 then x2 is nonzero; if z3 = 0 then x3
+ * is nonzero
+ * let r := e >> (pos+1) in the following equalities of
+ * projective points:
+ * to_xz (r*P) === if swap then (x3, z3) else (x2, z2)
+ * to_xz ((r+1)*P) === if swap then (x2, z2) else (x3, z3)
+ * x1 is the nonzero x coordinate of the nonzero
+ * point (r*P-(r+1)*P)
+ */
+ unsigned b = 1 & (e[pos / 8] >> (pos & 7));
+ swap ^= b;
+ fe_cswap(&x2, &x3, swap);
+ fe_cswap(&z2, &z3, swap);
+ swap = b;
+ /* Coq transcription of ladderstep formula (called from
+ * transcribed loop):
+ * <https://github.com/mit-plv/fiat-crypto/blob/2456d821825521f7e03e65882cc3521795b0320f/src/Curves/Montgomery/XZ.v#L89>
+ * <https://github.com/mit-plv/fiat-crypto/blob/2456d821825521f7e03e65882cc3521795b0320f/src/Curves/Montgomery/XZProofs.v#L131>
+ * x1 != 0 <https://github.com/mit-plv/fiat-crypto/blob/2456d821825521f7e03e65882cc3521795b0320f/src/Curves/Montgomery/XZProofs.v#L217>
+ * x1 = 0 <https://github.com/mit-plv/fiat-crypto/blob/2456d821825521f7e03e65882cc3521795b0320f/src/Curves/Montgomery/XZProofs.v#L147>
+ */
+ fe_sub(&tmp0l, &x3, &z3);
+ fe_sub(&tmp1l, &x2, &z2);
+ fe_add(&x2l, &x2, &z2);
+ fe_add(&z2l, &x3, &z3);
+ fe_mul_tll(&z3, &tmp0l, &x2l);
+ fe_mul_tll(&z2, &z2l, &tmp1l);
+ fe_sq_tl(&tmp0, &tmp1l);
+ fe_sq_tl(&tmp1, &x2l);
+ fe_add(&x3l, &z3, &z2);
+ fe_sub(&z2l, &z3, &z2);
+ fe_mul_ttt(&x2, &tmp1, &tmp0);
+ fe_sub(&tmp1l, &tmp1, &tmp0);
+ fe_sq_tl(&z2, &z2l);
+ fe_mul121666(&z3, &tmp1l);
+ fe_sq_tl(&x3, &x3l);
+ fe_add(&tmp0l, &tmp0, &z3);
+ fe_mul_ttt(&z3, &x1, &z2);
+ fe_mul_tll(&z2, &tmp1l, &tmp0l);
+ }
+ /* here pos=-1, so r=e, so to_xz (e*P) === if swap then (x3, z3)
+ * else (x2, z2)
+ */
+ fe_cswap(&x2, &x3, swap);
+ fe_cswap(&z2, &z3, swap);
+
+ fe_invert(&z2, &z2);
+ fe_mul_ttt(&x2, &x2, &z2);
+ fe_tobytes(out, &x2);
+
+ memzero_explicit(&x1, sizeof(x1));
+ memzero_explicit(&x2, sizeof(x2));
+ memzero_explicit(&z2, sizeof(z2));
+ memzero_explicit(&x3, sizeof(x3));
+ memzero_explicit(&z3, sizeof(z3));
+ memzero_explicit(&x2l, sizeof(x2l));
+ memzero_explicit(&z2l, sizeof(z2l));
+ memzero_explicit(&x3l, sizeof(x3l));
+ memzero_explicit(&e, sizeof(e));
+}
diff --git a/lib/crypto/curve25519-generic.c b/lib/crypto/curve25519-generic.c
new file mode 100644
index 000000000000..de7c99172fa2
--- /dev/null
+++ b/lib/crypto/curve25519-generic.c
@@ -0,0 +1,24 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/*
+ * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ *
+ * This is an implementation of the Curve25519 ECDH algorithm, using either
+ * a 32-bit implementation or a 64-bit implementation with 128-bit integers,
+ * depending on what is supported by the target compiler.
+ *
+ * Information: https://cr.yp.to/ecdh.html
+ */
+
+#include <crypto/curve25519.h>
+#include <linux/module.h>
+
+const u8 curve25519_null_point[CURVE25519_KEY_SIZE] __aligned(32) = { 0 };
+const u8 curve25519_base_point[CURVE25519_KEY_SIZE] __aligned(32) = { 9 };
+
+EXPORT_SYMBOL(curve25519_null_point);
+EXPORT_SYMBOL(curve25519_base_point);
+EXPORT_SYMBOL(curve25519_generic);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Curve25519 scalar multiplication");
+MODULE_AUTHOR("Jason A. Donenfeld <Jason@zx2c4.com>");
diff --git a/lib/crypto/curve25519-hacl64.c b/lib/crypto/curve25519-hacl64.c
new file mode 100644
index 000000000000..c40e5d913234
--- /dev/null
+++ b/lib/crypto/curve25519-hacl64.c
@@ -0,0 +1,786 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/*
+ * Copyright (C) 2016-2017 INRIA and Microsoft Corporation.
+ * Copyright (C) 2018-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ *
+ * This is a machine-generated formally verified implementation of Curve25519
+ * ECDH from: <https://github.com/mitls/hacl-star>. Though originally machine
+ * generated, it has been tweaked to be suitable for use in the kernel. It is
+ * optimized for 64-bit machines that can efficiently work with 128-bit
+ * integer types.
+ */
+
+#include <asm/unaligned.h>
+#include <crypto/curve25519.h>
+#include <linux/string.h>
+
+static __always_inline u64 u64_eq_mask(u64 a, u64 b)
+{
+ u64 x = a ^ b;
+ u64 minus_x = ~x + (u64)1U;
+ u64 x_or_minus_x = x | minus_x;
+ u64 xnx = x_or_minus_x >> (u32)63U;
+ u64 c = xnx - (u64)1U;
+ return c;
+}
+
+static __always_inline u64 u64_gte_mask(u64 a, u64 b)
+{
+ u64 x = a;
+ u64 y = b;
+ u64 x_xor_y = x ^ y;
+ u64 x_sub_y = x - y;
+ u64 x_sub_y_xor_y = x_sub_y ^ y;
+ u64 q = x_xor_y | x_sub_y_xor_y;
+ u64 x_xor_q = x ^ q;
+ u64 x_xor_q_ = x_xor_q >> (u32)63U;
+ u64 c = x_xor_q_ - (u64)1U;
+ return c;
+}
+
+static __always_inline void modulo_carry_top(u64 *b)
+{
+ u64 b4 = b[4];
+ u64 b0 = b[0];
+ u64 b4_ = b4 & 0x7ffffffffffffLLU;
+ u64 b0_ = b0 + 19 * (b4 >> 51);
+ b[4] = b4_;
+ b[0] = b0_;
+}
+
+static __always_inline void fproduct_copy_from_wide_(u64 *output, u128 *input)
+{
+ {
+ u128 xi = input[0];
+ output[0] = ((u64)(xi));
+ }
+ {
+ u128 xi = input[1];
+ output[1] = ((u64)(xi));
+ }
+ {
+ u128 xi = input[2];
+ output[2] = ((u64)(xi));
+ }
+ {
+ u128 xi = input[3];
+ output[3] = ((u64)(xi));
+ }
+ {
+ u128 xi = input[4];
+ output[4] = ((u64)(xi));
+ }
+}
+
+static __always_inline void
+fproduct_sum_scalar_multiplication_(u128 *output, u64 *input, u64 s)
+{
+ output[0] += (u128)input[0] * s;
+ output[1] += (u128)input[1] * s;
+ output[2] += (u128)input[2] * s;
+ output[3] += (u128)input[3] * s;
+ output[4] += (u128)input[4] * s;
+}
+
+static __always_inline void fproduct_carry_wide_(u128 *tmp)
+{
+ {
+ u32 ctr = 0;
+ u128 tctr = tmp[ctr];
+ u128 tctrp1 = tmp[ctr + 1];
+ u64 r0 = ((u64)(tctr)) & 0x7ffffffffffffLLU;
+ u128 c = ((tctr) >> (51));
+ tmp[ctr] = ((u128)(r0));
+ tmp[ctr + 1] = ((tctrp1) + (c));
+ }
+ {
+ u32 ctr = 1;
+ u128 tctr = tmp[ctr];
+ u128 tctrp1 = tmp[ctr + 1];
+ u64 r0 = ((u64)(tctr)) & 0x7ffffffffffffLLU;
+ u128 c = ((tctr) >> (51));
+ tmp[ctr] = ((u128)(r0));
+ tmp[ctr + 1] = ((tctrp1) + (c));
+ }
+
+ {
+ u32 ctr = 2;
+ u128 tctr = tmp[ctr];
+ u128 tctrp1 = tmp[ctr + 1];
+ u64 r0 = ((u64)(tctr)) & 0x7ffffffffffffLLU;
+ u128 c = ((tctr) >> (51));
+ tmp[ctr] = ((u128)(r0));
+ tmp[ctr + 1] = ((tctrp1) + (c));
+ }
+ {
+ u32 ctr = 3;
+ u128 tctr = tmp[ctr];
+ u128 tctrp1 = tmp[ctr + 1];
+ u64 r0 = ((u64)(tctr)) & 0x7ffffffffffffLLU;
+ u128 c = ((tctr) >> (51));
+ tmp[ctr] = ((u128)(r0));
+ tmp[ctr + 1] = ((tctrp1) + (c));
+ }
+}
+
+static __always_inline void fmul_shift_reduce(u64 *output)
+{
+ u64 tmp = output[4];
+ u64 b0;
+ {
+ u32 ctr = 5 - 0 - 1;
+ u64 z = output[ctr - 1];
+ output[ctr] = z;
+ }
+ {
+ u32 ctr = 5 - 1 - 1;
+ u64 z = output[ctr - 1];
+ output[ctr] = z;
+ }
+ {
+ u32 ctr = 5 - 2 - 1;
+ u64 z = output[ctr - 1];
+ output[ctr] = z;
+ }
+ {
+ u32 ctr = 5 - 3 - 1;
+ u64 z = output[ctr - 1];
+ output[ctr] = z;
+ }
+ output[0] = tmp;
+ b0 = output[0];
+ output[0] = 19 * b0;
+}
+
+static __always_inline void fmul_mul_shift_reduce_(u128 *output, u64 *input,
+ u64 *input21)
+{
+ u32 i;
+ u64 input2i;
+ {
+ u64 input2i = input21[0];
+ fproduct_sum_scalar_multiplication_(output, input, input2i);
+ fmul_shift_reduce(input);
+ }
+ {
+ u64 input2i = input21[1];
+ fproduct_sum_scalar_multiplication_(output, input, input2i);
+ fmul_shift_reduce(input);
+ }
+ {
+ u64 input2i = input21[2];
+ fproduct_sum_scalar_multiplication_(output, input, input2i);
+ fmul_shift_reduce(input);
+ }
+ {
+ u64 input2i = input21[3];
+ fproduct_sum_scalar_multiplication_(output, input, input2i);
+ fmul_shift_reduce(input);
+ }
+ i = 4;
+ input2i = input21[i];
+ fproduct_sum_scalar_multiplication_(output, input, input2i);
+}
+
+static __always_inline void fmul_fmul(u64 *output, u64 *input, u64 *input21)
+{
+ u64 tmp[5] = { input[0], input[1], input[2], input[3], input[4] };
+ {
+ u128 b4;
+ u128 b0;
+ u128 b4_;
+ u128 b0_;
+ u64 i0;
+ u64 i1;
+ u64 i0_;
+ u64 i1_;
+ u128 t[5] = { 0 };
+ fmul_mul_shift_reduce_(t, tmp, input21);
+ fproduct_carry_wide_(t);
+ b4 = t[4];
+ b0 = t[0];
+ b4_ = ((b4) & (((u128)(0x7ffffffffffffLLU))));
+ b0_ = ((b0) + (((u128)(19) * (((u64)(((b4) >> (51))))))));
+ t[4] = b4_;
+ t[0] = b0_;
+ fproduct_copy_from_wide_(output, t);
+ i0 = output[0];
+ i1 = output[1];
+ i0_ = i0 & 0x7ffffffffffffLLU;
+ i1_ = i1 + (i0 >> 51);
+ output[0] = i0_;
+ output[1] = i1_;
+ }
+}
+
+static __always_inline void fsquare_fsquare__(u128 *tmp, u64 *output)
+{
+ u64 r0 = output[0];
+ u64 r1 = output[1];
+ u64 r2 = output[2];
+ u64 r3 = output[3];
+ u64 r4 = output[4];
+ u64 d0 = r0 * 2;
+ u64 d1 = r1 * 2;
+ u64 d2 = r2 * 2 * 19;
+ u64 d419 = r4 * 19;
+ u64 d4 = d419 * 2;
+ u128 s0 = ((((((u128)(r0) * (r0))) + (((u128)(d4) * (r1))))) +
+ (((u128)(d2) * (r3))));
+ u128 s1 = ((((((u128)(d0) * (r1))) + (((u128)(d4) * (r2))))) +
+ (((u128)(r3 * 19) * (r3))));
+ u128 s2 = ((((((u128)(d0) * (r2))) + (((u128)(r1) * (r1))))) +
+ (((u128)(d4) * (r3))));
+ u128 s3 = ((((((u128)(d0) * (r3))) + (((u128)(d1) * (r2))))) +
+ (((u128)(r4) * (d419))));
+ u128 s4 = ((((((u128)(d0) * (r4))) + (((u128)(d1) * (r3))))) +
+ (((u128)(r2) * (r2))));
+ tmp[0] = s0;
+ tmp[1] = s1;
+ tmp[2] = s2;
+ tmp[3] = s3;
+ tmp[4] = s4;
+}
+
+static __always_inline void fsquare_fsquare_(u128 *tmp, u64 *output)
+{
+ u128 b4;
+ u128 b0;
+ u128 b4_;
+ u128 b0_;
+ u64 i0;
+ u64 i1;
+ u64 i0_;
+ u64 i1_;
+ fsquare_fsquare__(tmp, output);
+ fproduct_carry_wide_(tmp);
+ b4 = tmp[4];
+ b0 = tmp[0];
+ b4_ = ((b4) & (((u128)(0x7ffffffffffffLLU))));
+ b0_ = ((b0) + (((u128)(19) * (((u64)(((b4) >> (51))))))));
+ tmp[4] = b4_;
+ tmp[0] = b0_;
+ fproduct_copy_from_wide_(output, tmp);
+ i0 = output[0];
+ i1 = output[1];
+ i0_ = i0 & 0x7ffffffffffffLLU;
+ i1_ = i1 + (i0 >> 51);
+ output[0] = i0_;
+ output[1] = i1_;
+}
+
+static __always_inline void fsquare_fsquare_times_(u64 *output, u128 *tmp,
+ u32 count1)
+{
+ u32 i;
+ fsquare_fsquare_(tmp, output);
+ for (i = 1; i < count1; ++i)
+ fsquare_fsquare_(tmp, output);
+}
+
+static __always_inline void fsquare_fsquare_times(u64 *output, u64 *input,
+ u32 count1)
+{
+ u128 t[5];
+ memcpy(output, input, 5 * sizeof(*input));
+ fsquare_fsquare_times_(output, t, count1);
+}
+
+static __always_inline void fsquare_fsquare_times_inplace(u64 *output,
+ u32 count1)
+{
+ u128 t[5];
+ fsquare_fsquare_times_(output, t, count1);
+}
+
+static __always_inline void crecip_crecip(u64 *out, u64 *z)
+{
+ u64 buf[20] = { 0 };
+ u64 *a0 = buf;
+ u64 *t00 = buf + 5;
+ u64 *b0 = buf + 10;
+ u64 *t01;
+ u64 *b1;
+ u64 *c0;
+ u64 *a;
+ u64 *t0;
+ u64 *b;
+ u64 *c;
+ fsquare_fsquare_times(a0, z, 1);
+ fsquare_fsquare_times(t00, a0, 2);
+ fmul_fmul(b0, t00, z);
+ fmul_fmul(a0, b0, a0);
+ fsquare_fsquare_times(t00, a0, 1);
+ fmul_fmul(b0, t00, b0);
+ fsquare_fsquare_times(t00, b0, 5);
+ t01 = buf + 5;
+ b1 = buf + 10;
+ c0 = buf + 15;
+ fmul_fmul(b1, t01, b1);
+ fsquare_fsquare_times(t01, b1, 10);
+ fmul_fmul(c0, t01, b1);
+ fsquare_fsquare_times(t01, c0, 20);
+ fmul_fmul(t01, t01, c0);
+ fsquare_fsquare_times_inplace(t01, 10);
+ fmul_fmul(b1, t01, b1);
+ fsquare_fsquare_times(t01, b1, 50);
+ a = buf;
+ t0 = buf + 5;
+ b = buf + 10;
+ c = buf + 15;
+ fmul_fmul(c, t0, b);
+ fsquare_fsquare_times(t0, c, 100);
+ fmul_fmul(t0, t0, c);
+ fsquare_fsquare_times_inplace(t0, 50);
+ fmul_fmul(t0, t0, b);
+ fsquare_fsquare_times_inplace(t0, 5);
+ fmul_fmul(out, t0, a);
+}
+
+static __always_inline void fsum(u64 *a, u64 *b)
+{
+ a[0] += b[0];
+ a[1] += b[1];
+ a[2] += b[2];
+ a[3] += b[3];
+ a[4] += b[4];
+}
+
+static __always_inline void fdifference(u64 *a, u64 *b)
+{
+ u64 tmp[5] = { 0 };
+ u64 b0;
+ u64 b1;
+ u64 b2;
+ u64 b3;
+ u64 b4;
+ memcpy(tmp, b, 5 * sizeof(*b));
+ b0 = tmp[0];
+ b1 = tmp[1];
+ b2 = tmp[2];
+ b3 = tmp[3];
+ b4 = tmp[4];
+ tmp[0] = b0 + 0x3fffffffffff68LLU;
+ tmp[1] = b1 + 0x3ffffffffffff8LLU;
+ tmp[2] = b2 + 0x3ffffffffffff8LLU;
+ tmp[3] = b3 + 0x3ffffffffffff8LLU;
+ tmp[4] = b4 + 0x3ffffffffffff8LLU;
+ {
+ u64 xi = a[0];
+ u64 yi = tmp[0];
+ a[0] = yi - xi;
+ }
+ {
+ u64 xi = a[1];
+ u64 yi = tmp[1];
+ a[1] = yi - xi;
+ }
+ {
+ u64 xi = a[2];
+ u64 yi = tmp[2];
+ a[2] = yi - xi;
+ }
+ {
+ u64 xi = a[3];
+ u64 yi = tmp[3];
+ a[3] = yi - xi;
+ }
+ {
+ u64 xi = a[4];
+ u64 yi = tmp[4];
+ a[4] = yi - xi;
+ }
+}
+
+static __always_inline void fscalar(u64 *output, u64 *b, u64 s)
+{
+ u128 tmp[5];
+ u128 b4;
+ u128 b0;
+ u128 b4_;
+ u128 b0_;
+ {
+ u64 xi = b[0];
+ tmp[0] = ((u128)(xi) * (s));
+ }
+ {
+ u64 xi = b[1];
+ tmp[1] = ((u128)(xi) * (s));
+ }
+ {
+ u64 xi = b[2];
+ tmp[2] = ((u128)(xi) * (s));
+ }
+ {
+ u64 xi = b[3];
+ tmp[3] = ((u128)(xi) * (s));
+ }
+ {
+ u64 xi = b[4];
+ tmp[4] = ((u128)(xi) * (s));
+ }
+ fproduct_carry_wide_(tmp);
+ b4 = tmp[4];
+ b0 = tmp[0];
+ b4_ = ((b4) & (((u128)(0x7ffffffffffffLLU))));
+ b0_ = ((b0) + (((u128)(19) * (((u64)(((b4) >> (51))))))));
+ tmp[4] = b4_;
+ tmp[0] = b0_;
+ fproduct_copy_from_wide_(output, tmp);
+}
+
+static __always_inline void fmul(u64 *output, u64 *a, u64 *b)
+{
+ fmul_fmul(output, a, b);
+}
+
+static __always_inline void crecip(u64 *output, u64 *input)
+{
+ crecip_crecip(output, input);
+}
+
+static __always_inline void point_swap_conditional_step(u64 *a, u64 *b,
+ u64 swap1, u32 ctr)
+{
+ u32 i = ctr - 1;
+ u64 ai = a[i];
+ u64 bi = b[i];
+ u64 x = swap1 & (ai ^ bi);
+ u64 ai1 = ai ^ x;
+ u64 bi1 = bi ^ x;
+ a[i] = ai1;
+ b[i] = bi1;
+}
+
+static __always_inline void point_swap_conditional5(u64 *a, u64 *b, u64 swap1)
+{
+ point_swap_conditional_step(a, b, swap1, 5);
+ point_swap_conditional_step(a, b, swap1, 4);
+ point_swap_conditional_step(a, b, swap1, 3);
+ point_swap_conditional_step(a, b, swap1, 2);
+ point_swap_conditional_step(a, b, swap1, 1);
+}
+
+static __always_inline void point_swap_conditional(u64 *a, u64 *b, u64 iswap)
+{
+ u64 swap1 = 0 - iswap;
+ point_swap_conditional5(a, b, swap1);
+ point_swap_conditional5(a + 5, b + 5, swap1);
+}
+
+static __always_inline void point_copy(u64 *output, u64 *input)
+{
+ memcpy(output, input, 5 * sizeof(*input));
+ memcpy(output + 5, input + 5, 5 * sizeof(*input));
+}
+
+static __always_inline void addanddouble_fmonty(u64 *pp, u64 *ppq, u64 *p,
+ u64 *pq, u64 *qmqp)
+{
+ u64 *qx = qmqp;
+ u64 *x2 = pp;
+ u64 *z2 = pp + 5;
+ u64 *x3 = ppq;
+ u64 *z3 = ppq + 5;
+ u64 *x = p;
+ u64 *z = p + 5;
+ u64 *xprime = pq;
+ u64 *zprime = pq + 5;
+ u64 buf[40] = { 0 };
+ u64 *origx = buf;
+ u64 *origxprime0 = buf + 5;
+ u64 *xxprime0;
+ u64 *zzprime0;
+ u64 *origxprime;
+ xxprime0 = buf + 25;
+ zzprime0 = buf + 30;
+ memcpy(origx, x, 5 * sizeof(*x));
+ fsum(x, z);
+ fdifference(z, origx);
+ memcpy(origxprime0, xprime, 5 * sizeof(*xprime));
+ fsum(xprime, zprime);
+ fdifference(zprime, origxprime0);
+ fmul(xxprime0, xprime, z);
+ fmul(zzprime0, x, zprime);
+ origxprime = buf + 5;
+ {
+ u64 *xx0;
+ u64 *zz0;
+ u64 *xxprime;
+ u64 *zzprime;
+ u64 *zzzprime;
+ xx0 = buf + 15;
+ zz0 = buf + 20;
+ xxprime = buf + 25;
+ zzprime = buf + 30;
+ zzzprime = buf + 35;
+ memcpy(origxprime, xxprime, 5 * sizeof(*xxprime));
+ fsum(xxprime, zzprime);
+ fdifference(zzprime, origxprime);
+ fsquare_fsquare_times(x3, xxprime, 1);
+ fsquare_fsquare_times(zzzprime, zzprime, 1);
+ fmul(z3, zzzprime, qx);
+ fsquare_fsquare_times(xx0, x, 1);
+ fsquare_fsquare_times(zz0, z, 1);
+ {
+ u64 *zzz;
+ u64 *xx;
+ u64 *zz;
+ u64 scalar;
+ zzz = buf + 10;
+ xx = buf + 15;
+ zz = buf + 20;
+ fmul(x2, xx, zz);
+ fdifference(zz, xx);
+ scalar = 121665;
+ fscalar(zzz, zz, scalar);
+ fsum(zzz, xx);
+ fmul(z2, zzz, zz);
+ }
+ }
+}
+
+static __always_inline void
+ladder_smallloop_cmult_small_loop_step(u64 *nq, u64 *nqpq, u64 *nq2, u64 *nqpq2,
+ u64 *q, u8 byt)
+{
+ u64 bit0 = (u64)(byt >> 7);
+ u64 bit;
+ point_swap_conditional(nq, nqpq, bit0);
+ addanddouble_fmonty(nq2, nqpq2, nq, nqpq, q);
+ bit = (u64)(byt >> 7);
+ point_swap_conditional(nq2, nqpq2, bit);
+}
+
+static __always_inline void
+ladder_smallloop_cmult_small_loop_double_step(u64 *nq, u64 *nqpq, u64 *nq2,
+ u64 *nqpq2, u64 *q, u8 byt)
+{
+ u8 byt1;
+ ladder_smallloop_cmult_small_loop_step(nq, nqpq, nq2, nqpq2, q, byt);
+ byt1 = byt << 1;
+ ladder_smallloop_cmult_small_loop_step(nq2, nqpq2, nq, nqpq, q, byt1);
+}
+
+static __always_inline void
+ladder_smallloop_cmult_small_loop(u64 *nq, u64 *nqpq, u64 *nq2, u64 *nqpq2,
+ u64 *q, u8 byt, u32 i)
+{
+ while (i--) {
+ ladder_smallloop_cmult_small_loop_double_step(nq, nqpq, nq2,
+ nqpq2, q, byt);
+ byt <<= 2;
+ }
+}
+
+static __always_inline void ladder_bigloop_cmult_big_loop(u8 *n1, u64 *nq,
+ u64 *nqpq, u64 *nq2,
+ u64 *nqpq2, u64 *q,
+ u32 i)
+{
+ while (i--) {
+ u8 byte = n1[i];
+ ladder_smallloop_cmult_small_loop(nq, nqpq, nq2, nqpq2, q,
+ byte, 4);
+ }
+}
+
+static void ladder_cmult(u64 *result, u8 *n1, u64 *q)
+{
+ u64 point_buf[40] = { 0 };
+ u64 *nq = point_buf;
+ u64 *nqpq = point_buf + 10;
+ u64 *nq2 = point_buf + 20;
+ u64 *nqpq2 = point_buf + 30;
+ point_copy(nqpq, q);
+ nq[0] = 1;
+ ladder_bigloop_cmult_big_loop(n1, nq, nqpq, nq2, nqpq2, q, 32);
+ point_copy(result, nq);
+}
+
+static __always_inline void format_fexpand(u64 *output, const u8 *input)
+{
+ const u8 *x00 = input + 6;
+ const u8 *x01 = input + 12;
+ const u8 *x02 = input + 19;
+ const u8 *x0 = input + 24;
+ u64 i0, i1, i2, i3, i4, output0, output1, output2, output3, output4;
+ i0 = get_unaligned_le64(input);
+ i1 = get_unaligned_le64(x00);
+ i2 = get_unaligned_le64(x01);
+ i3 = get_unaligned_le64(x02);
+ i4 = get_unaligned_le64(x0);
+ output0 = i0 & 0x7ffffffffffffLLU;
+ output1 = i1 >> 3 & 0x7ffffffffffffLLU;
+ output2 = i2 >> 6 & 0x7ffffffffffffLLU;
+ output3 = i3 >> 1 & 0x7ffffffffffffLLU;
+ output4 = i4 >> 12 & 0x7ffffffffffffLLU;
+ output[0] = output0;
+ output[1] = output1;
+ output[2] = output2;
+ output[3] = output3;
+ output[4] = output4;
+}
+
+static __always_inline void format_fcontract_first_carry_pass(u64 *input)
+{
+ u64 t0 = input[0];
+ u64 t1 = input[1];
+ u64 t2 = input[2];
+ u64 t3 = input[3];
+ u64 t4 = input[4];
+ u64 t1_ = t1 + (t0 >> 51);
+ u64 t0_ = t0 & 0x7ffffffffffffLLU;
+ u64 t2_ = t2 + (t1_ >> 51);
+ u64 t1__ = t1_ & 0x7ffffffffffffLLU;
+ u64 t3_ = t3 + (t2_ >> 51);
+ u64 t2__ = t2_ & 0x7ffffffffffffLLU;
+ u64 t4_ = t4 + (t3_ >> 51);
+ u64 t3__ = t3_ & 0x7ffffffffffffLLU;
+ input[0] = t0_;
+ input[1] = t1__;
+ input[2] = t2__;
+ input[3] = t3__;
+ input[4] = t4_;
+}
+
+static __always_inline void format_fcontract_first_carry_full(u64 *input)
+{
+ format_fcontract_first_carry_pass(input);
+ modulo_carry_top(input);
+}
+
+static __always_inline void format_fcontract_second_carry_pass(u64 *input)
+{
+ u64 t0 = input[0];
+ u64 t1 = input[1];
+ u64 t2 = input[2];
+ u64 t3 = input[3];
+ u64 t4 = input[4];
+ u64 t1_ = t1 + (t0 >> 51);
+ u64 t0_ = t0 & 0x7ffffffffffffLLU;
+ u64 t2_ = t2 + (t1_ >> 51);
+ u64 t1__ = t1_ & 0x7ffffffffffffLLU;
+ u64 t3_ = t3 + (t2_ >> 51);
+ u64 t2__ = t2_ & 0x7ffffffffffffLLU;
+ u64 t4_ = t4 + (t3_ >> 51);
+ u64 t3__ = t3_ & 0x7ffffffffffffLLU;
+ input[0] = t0_;
+ input[1] = t1__;
+ input[2] = t2__;
+ input[3] = t3__;
+ input[4] = t4_;
+}
+
+static __always_inline void format_fcontract_second_carry_full(u64 *input)
+{
+ u64 i0;
+ u64 i1;
+ u64 i0_;
+ u64 i1_;
+ format_fcontract_second_carry_pass(input);
+ modulo_carry_top(input);
+ i0 = input[0];
+ i1 = input[1];
+ i0_ = i0 & 0x7ffffffffffffLLU;
+ i1_ = i1 + (i0 >> 51);
+ input[0] = i0_;
+ input[1] = i1_;
+}
+
+static __always_inline void format_fcontract_trim(u64 *input)
+{
+ u64 a0 = input[0];
+ u64 a1 = input[1];
+ u64 a2 = input[2];
+ u64 a3 = input[3];
+ u64 a4 = input[4];
+ u64 mask0 = u64_gte_mask(a0, 0x7ffffffffffedLLU);
+ u64 mask1 = u64_eq_mask(a1, 0x7ffffffffffffLLU);
+ u64 mask2 = u64_eq_mask(a2, 0x7ffffffffffffLLU);
+ u64 mask3 = u64_eq_mask(a3, 0x7ffffffffffffLLU);
+ u64 mask4 = u64_eq_mask(a4, 0x7ffffffffffffLLU);
+ u64 mask = (((mask0 & mask1) & mask2) & mask3) & mask4;
+ u64 a0_ = a0 - (0x7ffffffffffedLLU & mask);
+ u64 a1_ = a1 - (0x7ffffffffffffLLU & mask);
+ u64 a2_ = a2 - (0x7ffffffffffffLLU & mask);
+ u64 a3_ = a3 - (0x7ffffffffffffLLU & mask);
+ u64 a4_ = a4 - (0x7ffffffffffffLLU & mask);
+ input[0] = a0_;
+ input[1] = a1_;
+ input[2] = a2_;
+ input[3] = a3_;
+ input[4] = a4_;
+}
+
+static __always_inline void format_fcontract_store(u8 *output, u64 *input)
+{
+ u64 t0 = input[0];
+ u64 t1 = input[1];
+ u64 t2 = input[2];
+ u64 t3 = input[3];
+ u64 t4 = input[4];
+ u64 o0 = t1 << 51 | t0;
+ u64 o1 = t2 << 38 | t1 >> 13;
+ u64 o2 = t3 << 25 | t2 >> 26;
+ u64 o3 = t4 << 12 | t3 >> 39;
+ u8 *b0 = output;
+ u8 *b1 = output + 8;
+ u8 *b2 = output + 16;
+ u8 *b3 = output + 24;
+ put_unaligned_le64(o0, b0);
+ put_unaligned_le64(o1, b1);
+ put_unaligned_le64(o2, b2);
+ put_unaligned_le64(o3, b3);
+}
+
+static __always_inline void format_fcontract(u8 *output, u64 *input)
+{
+ format_fcontract_first_carry_full(input);
+ format_fcontract_second_carry_full(input);
+ format_fcontract_trim(input);
+ format_fcontract_store(output, input);
+}
+
+static __always_inline void format_scalar_of_point(u8 *scalar, u64 *point)
+{
+ u64 *x = point;
+ u64 *z = point + 5;
+ u64 buf[10] __aligned(32) = { 0 };
+ u64 *zmone = buf;
+ u64 *sc = buf + 5;
+ crecip(zmone, z);
+ fmul(sc, x, zmone);
+ format_fcontract(scalar, sc);
+}
+
+void curve25519_generic(u8 mypublic[CURVE25519_KEY_SIZE],
+ const u8 secret[CURVE25519_KEY_SIZE],
+ const u8 basepoint[CURVE25519_KEY_SIZE])
+{
+ u64 buf0[10] __aligned(32) = { 0 };
+ u64 *x0 = buf0;
+ u64 *z = buf0 + 5;
+ u64 *q;
+ format_fexpand(x0, basepoint);
+ z[0] = 1;
+ q = buf0;
+ {
+ u8 e[32] __aligned(32) = { 0 };
+ u8 *scalar;
+ memcpy(e, secret, 32);
+ curve25519_clamp_secret(e);
+ scalar = e;
+ {
+ u64 buf[15] = { 0 };
+ u64 *nq = buf;
+ u64 *x = nq;
+ x[0] = 1;
+ ladder_cmult(nq, scalar, q);
+ format_scalar_of_point(mypublic, nq);
+ memzero_explicit(buf, sizeof(buf));
+ }
+ memzero_explicit(e, sizeof(e));
+ }
+ memzero_explicit(buf0, sizeof(buf0));
+}
diff --git a/lib/crypto/curve25519-selftest.c b/lib/crypto/curve25519-selftest.c
new file mode 100644
index 000000000000..c85e85381e78
--- /dev/null
+++ b/lib/crypto/curve25519-selftest.c
@@ -0,0 +1,1321 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/*
+ * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ */
+
+#include <crypto/curve25519.h>
+
+struct curve25519_test_vector {
+ u8 private[CURVE25519_KEY_SIZE];
+ u8 public[CURVE25519_KEY_SIZE];
+ u8 result[CURVE25519_KEY_SIZE];
+ bool valid;
+};
+static const struct curve25519_test_vector curve25519_test_vectors[] __initconst = {
+ {
+ .private = { 0x77, 0x07, 0x6d, 0x0a, 0x73, 0x18, 0xa5, 0x7d,
+ 0x3c, 0x16, 0xc1, 0x72, 0x51, 0xb2, 0x66, 0x45,
+ 0xdf, 0x4c, 0x2f, 0x87, 0xeb, 0xc0, 0x99, 0x2a,
+ 0xb1, 0x77, 0xfb, 0xa5, 0x1d, 0xb9, 0x2c, 0x2a },
+ .public = { 0xde, 0x9e, 0xdb, 0x7d, 0x7b, 0x7d, 0xc1, 0xb4,
+ 0xd3, 0x5b, 0x61, 0xc2, 0xec, 0xe4, 0x35, 0x37,
+ 0x3f, 0x83, 0x43, 0xc8, 0x5b, 0x78, 0x67, 0x4d,
+ 0xad, 0xfc, 0x7e, 0x14, 0x6f, 0x88, 0x2b, 0x4f },
+ .result = { 0x4a, 0x5d, 0x9d, 0x5b, 0xa4, 0xce, 0x2d, 0xe1,
+ 0x72, 0x8e, 0x3b, 0xf4, 0x80, 0x35, 0x0f, 0x25,
+ 0xe0, 0x7e, 0x21, 0xc9, 0x47, 0xd1, 0x9e, 0x33,
+ 0x76, 0xf0, 0x9b, 0x3c, 0x1e, 0x16, 0x17, 0x42 },
+ .valid = true
+ },
+ {
+ .private = { 0x5d, 0xab, 0x08, 0x7e, 0x62, 0x4a, 0x8a, 0x4b,
+ 0x79, 0xe1, 0x7f, 0x8b, 0x83, 0x80, 0x0e, 0xe6,
+ 0x6f, 0x3b, 0xb1, 0x29, 0x26, 0x18, 0xb6, 0xfd,
+ 0x1c, 0x2f, 0x8b, 0x27, 0xff, 0x88, 0xe0, 0xeb },
+ .public = { 0x85, 0x20, 0xf0, 0x09, 0x89, 0x30, 0xa7, 0x54,
+ 0x74, 0x8b, 0x7d, 0xdc, 0xb4, 0x3e, 0xf7, 0x5a,
+ 0x0d, 0xbf, 0x3a, 0x0d, 0x26, 0x38, 0x1a, 0xf4,
+ 0xeb, 0xa4, 0xa9, 0x8e, 0xaa, 0x9b, 0x4e, 0x6a },
+ .result = { 0x4a, 0x5d, 0x9d, 0x5b, 0xa4, 0xce, 0x2d, 0xe1,
+ 0x72, 0x8e, 0x3b, 0xf4, 0x80, 0x35, 0x0f, 0x25,
+ 0xe0, 0x7e, 0x21, 0xc9, 0x47, 0xd1, 0x9e, 0x33,
+ 0x76, 0xf0, 0x9b, 0x3c, 0x1e, 0x16, 0x17, 0x42 },
+ .valid = true
+ },
+ {
+ .private = { 1 },
+ .public = { 0x25, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+ .result = { 0x3c, 0x77, 0x77, 0xca, 0xf9, 0x97, 0xb2, 0x64,
+ 0x41, 0x60, 0x77, 0x66, 0x5b, 0x4e, 0x22, 0x9d,
+ 0x0b, 0x95, 0x48, 0xdc, 0x0c, 0xd8, 0x19, 0x98,
+ 0xdd, 0xcd, 0xc5, 0xc8, 0x53, 0x3c, 0x79, 0x7f },
+ .valid = true
+ },
+ {
+ .private = { 1 },
+ .public = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
+ .result = { 0xb3, 0x2d, 0x13, 0x62, 0xc2, 0x48, 0xd6, 0x2f,
+ 0xe6, 0x26, 0x19, 0xcf, 0xf0, 0x4d, 0xd4, 0x3d,
+ 0xb7, 0x3f, 0xfc, 0x1b, 0x63, 0x08, 0xed, 0xe3,
+ 0x0b, 0x78, 0xd8, 0x73, 0x80, 0xf1, 0xe8, 0x34 },
+ .valid = true
+ },
+ {
+ .private = { 0xa5, 0x46, 0xe3, 0x6b, 0xf0, 0x52, 0x7c, 0x9d,
+ 0x3b, 0x16, 0x15, 0x4b, 0x82, 0x46, 0x5e, 0xdd,
+ 0x62, 0x14, 0x4c, 0x0a, 0xc1, 0xfc, 0x5a, 0x18,
+ 0x50, 0x6a, 0x22, 0x44, 0xba, 0x44, 0x9a, 0xc4 },
+ .public = { 0xe6, 0xdb, 0x68, 0x67, 0x58, 0x30, 0x30, 0xdb,
+ 0x35, 0x94, 0xc1, 0xa4, 0x24, 0xb1, 0x5f, 0x7c,
+ 0x72, 0x66, 0x24, 0xec, 0x26, 0xb3, 0x35, 0x3b,
+ 0x10, 0xa9, 0x03, 0xa6, 0xd0, 0xab, 0x1c, 0x4c },
+ .result = { 0xc3, 0xda, 0x55, 0x37, 0x9d, 0xe9, 0xc6, 0x90,
+ 0x8e, 0x94, 0xea, 0x4d, 0xf2, 0x8d, 0x08, 0x4f,
+ 0x32, 0xec, 0xcf, 0x03, 0x49, 0x1c, 0x71, 0xf7,
+ 0x54, 0xb4, 0x07, 0x55, 0x77, 0xa2, 0x85, 0x52 },
+ .valid = true
+ },
+ {
+ .private = { 1, 2, 3, 4 },
+ .public = { 0 },
+ .result = { 0 },
+ .valid = false
+ },
+ {
+ .private = { 2, 4, 6, 8 },
+ .public = { 0xe0, 0xeb, 0x7a, 0x7c, 0x3b, 0x41, 0xb8, 0xae,
+ 0x16, 0x56, 0xe3, 0xfa, 0xf1, 0x9f, 0xc4, 0x6a,
+ 0xda, 0x09, 0x8d, 0xeb, 0x9c, 0x32, 0xb1, 0xfd,
+ 0x86, 0x62, 0x05, 0x16, 0x5f, 0x49, 0xb8 },
+ .result = { 0 },
+ .valid = false
+ },
+ {
+ .private = { 0xff, 0xff, 0xff, 0xff, 0x0a, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
+ .public = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0x0a, 0x00, 0xfb, 0x9f },
+ .result = { 0x77, 0x52, 0xb6, 0x18, 0xc1, 0x2d, 0x48, 0xd2,
+ 0xc6, 0x93, 0x46, 0x83, 0x81, 0x7c, 0xc6, 0x57,
+ 0xf3, 0x31, 0x03, 0x19, 0x49, 0x48, 0x20, 0x05,
+ 0x42, 0x2b, 0x4e, 0xae, 0x8d, 0x1d, 0x43, 0x23 },
+ .valid = true
+ },
+ {
+ .private = { 0x8e, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+ .public = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8e, 0x06 },
+ .result = { 0x5a, 0xdf, 0xaa, 0x25, 0x86, 0x8e, 0x32, 0x3d,
+ 0xae, 0x49, 0x62, 0xc1, 0x01, 0x5c, 0xb3, 0x12,
+ 0xe1, 0xc5, 0xc7, 0x9e, 0x95, 0x3f, 0x03, 0x99,
+ 0xb0, 0xba, 0x16, 0x22, 0xf3, 0xb6, 0xf7, 0x0c },
+ .valid = true
+ },
+ /* wycheproof - normal case */
+ {
+ .private = { 0x48, 0x52, 0x83, 0x4d, 0x9d, 0x6b, 0x77, 0xda,
+ 0xde, 0xab, 0xaa, 0xf2, 0xe1, 0x1d, 0xca, 0x66,
+ 0xd1, 0x9f, 0xe7, 0x49, 0x93, 0xa7, 0xbe, 0xc3,
+ 0x6c, 0x6e, 0x16, 0xa0, 0x98, 0x3f, 0xea, 0xba },
+ .public = { 0x9c, 0x64, 0x7d, 0x9a, 0xe5, 0x89, 0xb9, 0xf5,
+ 0x8f, 0xdc, 0x3c, 0xa4, 0x94, 0x7e, 0xfb, 0xc9,
+ 0x15, 0xc4, 0xb2, 0xe0, 0x8e, 0x74, 0x4a, 0x0e,
+ 0xdf, 0x46, 0x9d, 0xac, 0x59, 0xc8, 0xf8, 0x5a },
+ .result = { 0x87, 0xb7, 0xf2, 0x12, 0xb6, 0x27, 0xf7, 0xa5,
+ 0x4c, 0xa5, 0xe0, 0xbc, 0xda, 0xdd, 0xd5, 0x38,
+ 0x9d, 0x9d, 0xe6, 0x15, 0x6c, 0xdb, 0xcf, 0x8e,
+ 0xbe, 0x14, 0xff, 0xbc, 0xfb, 0x43, 0x65, 0x51 },
+ .valid = true
+ },
+ /* wycheproof - public key on twist */
+ {
+ .private = { 0x58, 0x8c, 0x06, 0x1a, 0x50, 0x80, 0x4a, 0xc4,
+ 0x88, 0xad, 0x77, 0x4a, 0xc7, 0x16, 0xc3, 0xf5,
+ 0xba, 0x71, 0x4b, 0x27, 0x12, 0xe0, 0x48, 0x49,
+ 0x13, 0x79, 0xa5, 0x00, 0x21, 0x19, 0x98, 0xa8 },
+ .public = { 0x63, 0xaa, 0x40, 0xc6, 0xe3, 0x83, 0x46, 0xc5,
+ 0xca, 0xf2, 0x3a, 0x6d, 0xf0, 0xa5, 0xe6, 0xc8,
+ 0x08, 0x89, 0xa0, 0x86, 0x47, 0xe5, 0x51, 0xb3,
+ 0x56, 0x34, 0x49, 0xbe, 0xfc, 0xfc, 0x97, 0x33 },
+ .result = { 0xb1, 0xa7, 0x07, 0x51, 0x94, 0x95, 0xff, 0xff,
+ 0xb2, 0x98, 0xff, 0x94, 0x17, 0x16, 0xb0, 0x6d,
+ 0xfa, 0xb8, 0x7c, 0xf8, 0xd9, 0x11, 0x23, 0xfe,
+ 0x2b, 0xe9, 0xa2, 0x33, 0xdd, 0xa2, 0x22, 0x12 },
+ .valid = true
+ },
+ /* wycheproof - public key on twist */
+ {
+ .private = { 0xb0, 0x5b, 0xfd, 0x32, 0xe5, 0x53, 0x25, 0xd9,
+ 0xfd, 0x64, 0x8c, 0xb3, 0x02, 0x84, 0x80, 0x39,
+ 0x00, 0x0b, 0x39, 0x0e, 0x44, 0xd5, 0x21, 0xe5,
+ 0x8a, 0xab, 0x3b, 0x29, 0xa6, 0x96, 0x0b, 0xa8 },
+ .public = { 0x0f, 0x83, 0xc3, 0x6f, 0xde, 0xd9, 0xd3, 0x2f,
+ 0xad, 0xf4, 0xef, 0xa3, 0xae, 0x93, 0xa9, 0x0b,
+ 0xb5, 0xcf, 0xa6, 0x68, 0x93, 0xbc, 0x41, 0x2c,
+ 0x43, 0xfa, 0x72, 0x87, 0xdb, 0xb9, 0x97, 0x79 },
+ .result = { 0x67, 0xdd, 0x4a, 0x6e, 0x16, 0x55, 0x33, 0x53,
+ 0x4c, 0x0e, 0x3f, 0x17, 0x2e, 0x4a, 0xb8, 0x57,
+ 0x6b, 0xca, 0x92, 0x3a, 0x5f, 0x07, 0xb2, 0xc0,
+ 0x69, 0xb4, 0xc3, 0x10, 0xff, 0x2e, 0x93, 0x5b },
+ .valid = true
+ },
+ /* wycheproof - public key on twist */
+ {
+ .private = { 0x70, 0xe3, 0x4b, 0xcb, 0xe1, 0xf4, 0x7f, 0xbc,
+ 0x0f, 0xdd, 0xfd, 0x7c, 0x1e, 0x1a, 0xa5, 0x3d,
+ 0x57, 0xbf, 0xe0, 0xf6, 0x6d, 0x24, 0x30, 0x67,
+ 0xb4, 0x24, 0xbb, 0x62, 0x10, 0xbe, 0xd1, 0x9c },
+ .public = { 0x0b, 0x82, 0x11, 0xa2, 0xb6, 0x04, 0x90, 0x97,
+ 0xf6, 0x87, 0x1c, 0x6c, 0x05, 0x2d, 0x3c, 0x5f,
+ 0xc1, 0xba, 0x17, 0xda, 0x9e, 0x32, 0xae, 0x45,
+ 0x84, 0x03, 0xb0, 0x5b, 0xb2, 0x83, 0x09, 0x2a },
+ .result = { 0x4a, 0x06, 0x38, 0xcf, 0xaa, 0x9e, 0xf1, 0x93,
+ 0x3b, 0x47, 0xf8, 0x93, 0x92, 0x96, 0xa6, 0xb2,
+ 0x5b, 0xe5, 0x41, 0xef, 0x7f, 0x70, 0xe8, 0x44,
+ 0xc0, 0xbc, 0xc0, 0x0b, 0x13, 0x4d, 0xe6, 0x4a },
+ .valid = true
+ },
+ /* wycheproof - public key on twist */
+ {
+ .private = { 0x68, 0xc1, 0xf3, 0xa6, 0x53, 0xa4, 0xcd, 0xb1,
+ 0xd3, 0x7b, 0xba, 0x94, 0x73, 0x8f, 0x8b, 0x95,
+ 0x7a, 0x57, 0xbe, 0xb2, 0x4d, 0x64, 0x6e, 0x99,
+ 0x4d, 0xc2, 0x9a, 0x27, 0x6a, 0xad, 0x45, 0x8d },
+ .public = { 0x34, 0x3a, 0xc2, 0x0a, 0x3b, 0x9c, 0x6a, 0x27,
+ 0xb1, 0x00, 0x81, 0x76, 0x50, 0x9a, 0xd3, 0x07,
+ 0x35, 0x85, 0x6e, 0xc1, 0xc8, 0xd8, 0xfc, 0xae,
+ 0x13, 0x91, 0x2d, 0x08, 0xd1, 0x52, 0xf4, 0x6c },
+ .result = { 0x39, 0x94, 0x91, 0xfc, 0xe8, 0xdf, 0xab, 0x73,
+ 0xb4, 0xf9, 0xf6, 0x11, 0xde, 0x8e, 0xa0, 0xb2,
+ 0x7b, 0x28, 0xf8, 0x59, 0x94, 0x25, 0x0b, 0x0f,
+ 0x47, 0x5d, 0x58, 0x5d, 0x04, 0x2a, 0xc2, 0x07 },
+ .valid = true
+ },
+ /* wycheproof - public key on twist */
+ {
+ .private = { 0xd8, 0x77, 0xb2, 0x6d, 0x06, 0xdf, 0xf9, 0xd9,
+ 0xf7, 0xfd, 0x4c, 0x5b, 0x37, 0x69, 0xf8, 0xcd,
+ 0xd5, 0xb3, 0x05, 0x16, 0xa5, 0xab, 0x80, 0x6b,
+ 0xe3, 0x24, 0xff, 0x3e, 0xb6, 0x9e, 0xa0, 0xb2 },
+ .public = { 0xfa, 0x69, 0x5f, 0xc7, 0xbe, 0x8d, 0x1b, 0xe5,
+ 0xbf, 0x70, 0x48, 0x98, 0xf3, 0x88, 0xc4, 0x52,
+ 0xba, 0xfd, 0xd3, 0xb8, 0xea, 0xe8, 0x05, 0xf8,
+ 0x68, 0x1a, 0x8d, 0x15, 0xc2, 0xd4, 0xe1, 0x42 },
+ .result = { 0x2c, 0x4f, 0xe1, 0x1d, 0x49, 0x0a, 0x53, 0x86,
+ 0x17, 0x76, 0xb1, 0x3b, 0x43, 0x54, 0xab, 0xd4,
+ 0xcf, 0x5a, 0x97, 0x69, 0x9d, 0xb6, 0xe6, 0xc6,
+ 0x8c, 0x16, 0x26, 0xd0, 0x76, 0x62, 0xf7, 0x58 },
+ .valid = true
+ },
+ /* wycheproof - public key = 0 */
+ {
+ .private = { 0x20, 0x74, 0x94, 0x03, 0x8f, 0x2b, 0xb8, 0x11,
+ 0xd4, 0x78, 0x05, 0xbc, 0xdf, 0x04, 0xa2, 0xac,
+ 0x58, 0x5a, 0xda, 0x7f, 0x2f, 0x23, 0x38, 0x9b,
+ 0xfd, 0x46, 0x58, 0xf9, 0xdd, 0xd4, 0xde, 0xbc },
+ .public = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+ .result = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+ .valid = false
+ },
+ /* wycheproof - public key = 1 */
+ {
+ .private = { 0x20, 0x2e, 0x89, 0x72, 0xb6, 0x1c, 0x7e, 0x61,
+ 0x93, 0x0e, 0xb9, 0x45, 0x0b, 0x50, 0x70, 0xea,
+ 0xe1, 0xc6, 0x70, 0x47, 0x56, 0x85, 0x54, 0x1f,
+ 0x04, 0x76, 0x21, 0x7e, 0x48, 0x18, 0xcf, 0xab },
+ .public = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+ .result = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+ .valid = false
+ },
+ /* wycheproof - edge case on twist */
+ {
+ .private = { 0x38, 0xdd, 0xe9, 0xf3, 0xe7, 0xb7, 0x99, 0x04,
+ 0x5f, 0x9a, 0xc3, 0x79, 0x3d, 0x4a, 0x92, 0x77,
+ 0xda, 0xde, 0xad, 0xc4, 0x1b, 0xec, 0x02, 0x90,
+ 0xf8, 0x1f, 0x74, 0x4f, 0x73, 0x77, 0x5f, 0x84 },
+ .public = { 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+ .result = { 0x9a, 0x2c, 0xfe, 0x84, 0xff, 0x9c, 0x4a, 0x97,
+ 0x39, 0x62, 0x5c, 0xae, 0x4a, 0x3b, 0x82, 0xa9,
+ 0x06, 0x87, 0x7a, 0x44, 0x19, 0x46, 0xf8, 0xd7,
+ 0xb3, 0xd7, 0x95, 0xfe, 0x8f, 0x5d, 0x16, 0x39 },
+ .valid = true
+ },
+ /* wycheproof - edge case on twist */
+ {
+ .private = { 0x98, 0x57, 0xa9, 0x14, 0xe3, 0xc2, 0x90, 0x36,
+ 0xfd, 0x9a, 0x44, 0x2b, 0xa5, 0x26, 0xb5, 0xcd,
+ 0xcd, 0xf2, 0x82, 0x16, 0x15, 0x3e, 0x63, 0x6c,
+ 0x10, 0x67, 0x7a, 0xca, 0xb6, 0xbd, 0x6a, 0xa5 },
+ .public = { 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+ .result = { 0x4d, 0xa4, 0xe0, 0xaa, 0x07, 0x2c, 0x23, 0x2e,
+ 0xe2, 0xf0, 0xfa, 0x4e, 0x51, 0x9a, 0xe5, 0x0b,
+ 0x52, 0xc1, 0xed, 0xd0, 0x8a, 0x53, 0x4d, 0x4e,
+ 0xf3, 0x46, 0xc2, 0xe1, 0x06, 0xd2, 0x1d, 0x60 },
+ .valid = true
+ },
+ /* wycheproof - edge case on twist */
+ {
+ .private = { 0x48, 0xe2, 0x13, 0x0d, 0x72, 0x33, 0x05, 0xed,
+ 0x05, 0xe6, 0xe5, 0x89, 0x4d, 0x39, 0x8a, 0x5e,
+ 0x33, 0x36, 0x7a, 0x8c, 0x6a, 0xac, 0x8f, 0xcd,
+ 0xf0, 0xa8, 0x8e, 0x4b, 0x42, 0x82, 0x0d, 0xb7 },
+ .public = { 0xff, 0xff, 0xff, 0x03, 0x00, 0x00, 0xf8, 0xff,
+ 0xff, 0x1f, 0x00, 0x00, 0xc0, 0xff, 0xff, 0xff,
+ 0x00, 0x00, 0x00, 0xfe, 0xff, 0xff, 0x07, 0x00,
+ 0x00, 0xf0, 0xff, 0xff, 0x3f, 0x00, 0x00, 0x00 },
+ .result = { 0x9e, 0xd1, 0x0c, 0x53, 0x74, 0x7f, 0x64, 0x7f,
+ 0x82, 0xf4, 0x51, 0x25, 0xd3, 0xde, 0x15, 0xa1,
+ 0xe6, 0xb8, 0x24, 0x49, 0x6a, 0xb4, 0x04, 0x10,
+ 0xff, 0xcc, 0x3c, 0xfe, 0x95, 0x76, 0x0f, 0x3b },
+ .valid = true
+ },
+ /* wycheproof - edge case on twist */
+ {
+ .private = { 0x28, 0xf4, 0x10, 0x11, 0x69, 0x18, 0x51, 0xb3,
+ 0xa6, 0x2b, 0x64, 0x15, 0x53, 0xb3, 0x0d, 0x0d,
+ 0xfd, 0xdc, 0xb8, 0xff, 0xfc, 0xf5, 0x37, 0x00,
+ 0xa7, 0xbe, 0x2f, 0x6a, 0x87, 0x2e, 0x9f, 0xb0 },
+ .public = { 0x00, 0x00, 0x00, 0xfc, 0xff, 0xff, 0x07, 0x00,
+ 0x00, 0xe0, 0xff, 0xff, 0x3f, 0x00, 0x00, 0x00,
+ 0xff, 0xff, 0xff, 0x01, 0x00, 0x00, 0xf8, 0xff,
+ 0xff, 0x0f, 0x00, 0x00, 0xc0, 0xff, 0xff, 0x7f },
+ .result = { 0xcf, 0x72, 0xb4, 0xaa, 0x6a, 0xa1, 0xc9, 0xf8,
+ 0x94, 0xf4, 0x16, 0x5b, 0x86, 0x10, 0x9a, 0xa4,
+ 0x68, 0x51, 0x76, 0x48, 0xe1, 0xf0, 0xcc, 0x70,
+ 0xe1, 0xab, 0x08, 0x46, 0x01, 0x76, 0x50, 0x6b },
+ .valid = true
+ },
+ /* wycheproof - edge case on twist */
+ {
+ .private = { 0x18, 0xa9, 0x3b, 0x64, 0x99, 0xb9, 0xf6, 0xb3,
+ 0x22, 0x5c, 0xa0, 0x2f, 0xef, 0x41, 0x0e, 0x0a,
+ 0xde, 0xc2, 0x35, 0x32, 0x32, 0x1d, 0x2d, 0x8e,
+ 0xf1, 0xa6, 0xd6, 0x02, 0xa8, 0xc6, 0x5b, 0x83 },
+ .public = { 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0x7f },
+ .result = { 0x5d, 0x50, 0xb6, 0x28, 0x36, 0xbb, 0x69, 0x57,
+ 0x94, 0x10, 0x38, 0x6c, 0xf7, 0xbb, 0x81, 0x1c,
+ 0x14, 0xbf, 0x85, 0xb1, 0xc7, 0xb1, 0x7e, 0x59,
+ 0x24, 0xc7, 0xff, 0xea, 0x91, 0xef, 0x9e, 0x12 },
+ .valid = true
+ },
+ /* wycheproof - edge case on twist */
+ {
+ .private = { 0xc0, 0x1d, 0x13, 0x05, 0xa1, 0x33, 0x8a, 0x1f,
+ 0xca, 0xc2, 0xba, 0x7e, 0x2e, 0x03, 0x2b, 0x42,
+ 0x7e, 0x0b, 0x04, 0x90, 0x31, 0x65, 0xac, 0xa9,
+ 0x57, 0xd8, 0xd0, 0x55, 0x3d, 0x87, 0x17, 0xb0 },
+ .public = { 0xea, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f },
+ .result = { 0x19, 0x23, 0x0e, 0xb1, 0x48, 0xd5, 0xd6, 0x7c,
+ 0x3c, 0x22, 0xab, 0x1d, 0xae, 0xff, 0x80, 0xa5,
+ 0x7e, 0xae, 0x42, 0x65, 0xce, 0x28, 0x72, 0x65,
+ 0x7b, 0x2c, 0x80, 0x99, 0xfc, 0x69, 0x8e, 0x50 },
+ .valid = true
+ },
+ /* wycheproof - edge case for public key */
+ {
+ .private = { 0x38, 0x6f, 0x7f, 0x16, 0xc5, 0x07, 0x31, 0xd6,
+ 0x4f, 0x82, 0xe6, 0xa1, 0x70, 0xb1, 0x42, 0xa4,
+ 0xe3, 0x4f, 0x31, 0xfd, 0x77, 0x68, 0xfc, 0xb8,
+ 0x90, 0x29, 0x25, 0xe7, 0xd1, 0xe2, 0x1a, 0xbe },
+ .public = { 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+ .result = { 0x0f, 0xca, 0xb5, 0xd8, 0x42, 0xa0, 0x78, 0xd7,
+ 0xa7, 0x1f, 0xc5, 0x9b, 0x57, 0xbf, 0xb4, 0xca,
+ 0x0b, 0xe6, 0x87, 0x3b, 0x49, 0xdc, 0xdb, 0x9f,
+ 0x44, 0xe1, 0x4a, 0xe8, 0xfb, 0xdf, 0xa5, 0x42 },
+ .valid = true
+ },
+ /* wycheproof - edge case for public key */
+ {
+ .private = { 0xe0, 0x23, 0xa2, 0x89, 0xbd, 0x5e, 0x90, 0xfa,
+ 0x28, 0x04, 0xdd, 0xc0, 0x19, 0xa0, 0x5e, 0xf3,
+ 0xe7, 0x9d, 0x43, 0x4b, 0xb6, 0xea, 0x2f, 0x52,
+ 0x2e, 0xcb, 0x64, 0x3a, 0x75, 0x29, 0x6e, 0x95 },
+ .public = { 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 },
+ .result = { 0x54, 0xce, 0x8f, 0x22, 0x75, 0xc0, 0x77, 0xe3,
+ 0xb1, 0x30, 0x6a, 0x39, 0x39, 0xc5, 0xe0, 0x3e,
+ 0xef, 0x6b, 0xbb, 0x88, 0x06, 0x05, 0x44, 0x75,
+ 0x8d, 0x9f, 0xef, 0x59, 0xb0, 0xbc, 0x3e, 0x4f },
+ .valid = true
+ },
+ /* wycheproof - edge case for public key */
+ {
+ .private = { 0x68, 0xf0, 0x10, 0xd6, 0x2e, 0xe8, 0xd9, 0x26,
+ 0x05, 0x3a, 0x36, 0x1c, 0x3a, 0x75, 0xc6, 0xea,
+ 0x4e, 0xbd, 0xc8, 0x60, 0x6a, 0xb2, 0x85, 0x00,
+ 0x3a, 0x6f, 0x8f, 0x40, 0x76, 0xb0, 0x1e, 0x83 },
+ .public = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x03 },
+ .result = { 0xf1, 0x36, 0x77, 0x5c, 0x5b, 0xeb, 0x0a, 0xf8,
+ 0x11, 0x0a, 0xf1, 0x0b, 0x20, 0x37, 0x23, 0x32,
+ 0x04, 0x3c, 0xab, 0x75, 0x24, 0x19, 0x67, 0x87,
+ 0x75, 0xa2, 0x23, 0xdf, 0x57, 0xc9, 0xd3, 0x0d },
+ .valid = true
+ },
+ /* wycheproof - edge case for public key */
+ {
+ .private = { 0x58, 0xeb, 0xcb, 0x35, 0xb0, 0xf8, 0x84, 0x5c,
+ 0xaf, 0x1e, 0xc6, 0x30, 0xf9, 0x65, 0x76, 0xb6,
+ 0x2c, 0x4b, 0x7b, 0x6c, 0x36, 0xb2, 0x9d, 0xeb,
+ 0x2c, 0xb0, 0x08, 0x46, 0x51, 0x75, 0x5c, 0x96 },
+ .public = { 0xff, 0xff, 0xff, 0xfb, 0xff, 0xff, 0xfb, 0xff,
+ 0xff, 0xdf, 0xff, 0xff, 0xdf, 0xff, 0xff, 0xff,
+ 0xfe, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xf7, 0xff,
+ 0xff, 0xf7, 0xff, 0xff, 0xbf, 0xff, 0xff, 0x3f },
+ .result = { 0xbf, 0x9a, 0xff, 0xd0, 0x6b, 0x84, 0x40, 0x85,
+ 0x58, 0x64, 0x60, 0x96, 0x2e, 0xf2, 0x14, 0x6f,
+ 0xf3, 0xd4, 0x53, 0x3d, 0x94, 0x44, 0xaa, 0xb0,
+ 0x06, 0xeb, 0x88, 0xcc, 0x30, 0x54, 0x40, 0x7d },
+ .valid = true
+ },
+ /* wycheproof - edge case for public key */
+ {
+ .private = { 0x18, 0x8c, 0x4b, 0xc5, 0xb9, 0xc4, 0x4b, 0x38,
+ 0xbb, 0x65, 0x8b, 0x9b, 0x2a, 0xe8, 0x2d, 0x5b,
+ 0x01, 0x01, 0x5e, 0x09, 0x31, 0x84, 0xb1, 0x7c,
+ 0xb7, 0x86, 0x35, 0x03, 0xa7, 0x83, 0xe1, 0xbb },
+ .public = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f },
+ .result = { 0xd4, 0x80, 0xde, 0x04, 0xf6, 0x99, 0xcb, 0x3b,
+ 0xe0, 0x68, 0x4a, 0x9c, 0xc2, 0xe3, 0x12, 0x81,
+ 0xea, 0x0b, 0xc5, 0xa9, 0xdc, 0xc1, 0x57, 0xd3,
+ 0xd2, 0x01, 0x58, 0xd4, 0x6c, 0xa5, 0x24, 0x6d },
+ .valid = true
+ },
+ /* wycheproof - edge case for public key */
+ {
+ .private = { 0xe0, 0x6c, 0x11, 0xbb, 0x2e, 0x13, 0xce, 0x3d,
+ 0xc7, 0x67, 0x3f, 0x67, 0xf5, 0x48, 0x22, 0x42,
+ 0x90, 0x94, 0x23, 0xa9, 0xae, 0x95, 0xee, 0x98,
+ 0x6a, 0x98, 0x8d, 0x98, 0xfa, 0xee, 0x23, 0xa2 },
+ .public = { 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0x7f,
+ 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0x7f,
+ 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0x7f,
+ 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0x7f },
+ .result = { 0x4c, 0x44, 0x01, 0xcc, 0xe6, 0xb5, 0x1e, 0x4c,
+ 0xb1, 0x8f, 0x27, 0x90, 0x24, 0x6c, 0x9b, 0xf9,
+ 0x14, 0xdb, 0x66, 0x77, 0x50, 0xa1, 0xcb, 0x89,
+ 0x06, 0x90, 0x92, 0xaf, 0x07, 0x29, 0x22, 0x76 },
+ .valid = true
+ },
+ /* wycheproof - edge case for public key */
+ {
+ .private = { 0xc0, 0x65, 0x8c, 0x46, 0xdd, 0xe1, 0x81, 0x29,
+ 0x29, 0x38, 0x77, 0x53, 0x5b, 0x11, 0x62, 0xb6,
+ 0xf9, 0xf5, 0x41, 0x4a, 0x23, 0xcf, 0x4d, 0x2c,
+ 0xbc, 0x14, 0x0a, 0x4d, 0x99, 0xda, 0x2b, 0x8f },
+ .public = { 0xeb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f },
+ .result = { 0x57, 0x8b, 0xa8, 0xcc, 0x2d, 0xbd, 0xc5, 0x75,
+ 0xaf, 0xcf, 0x9d, 0xf2, 0xb3, 0xee, 0x61, 0x89,
+ 0xf5, 0x33, 0x7d, 0x68, 0x54, 0xc7, 0x9b, 0x4c,
+ 0xe1, 0x65, 0xea, 0x12, 0x29, 0x3b, 0x3a, 0x0f },
+ .valid = true
+ },
+ /* wycheproof - public key with low order */
+ {
+ .private = { 0x10, 0x25, 0x5c, 0x92, 0x30, 0xa9, 0x7a, 0x30,
+ 0xa4, 0x58, 0xca, 0x28, 0x4a, 0x62, 0x96, 0x69,
+ 0x29, 0x3a, 0x31, 0x89, 0x0c, 0xda, 0x9d, 0x14,
+ 0x7f, 0xeb, 0xc7, 0xd1, 0xe2, 0x2d, 0x6b, 0xb1 },
+ .public = { 0xe0, 0xeb, 0x7a, 0x7c, 0x3b, 0x41, 0xb8, 0xae,
+ 0x16, 0x56, 0xe3, 0xfa, 0xf1, 0x9f, 0xc4, 0x6a,
+ 0xda, 0x09, 0x8d, 0xeb, 0x9c, 0x32, 0xb1, 0xfd,
+ 0x86, 0x62, 0x05, 0x16, 0x5f, 0x49, 0xb8, 0x00 },
+ .result = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+ .valid = false
+ },
+ /* wycheproof - public key with low order */
+ {
+ .private = { 0x78, 0xf1, 0xe8, 0xed, 0xf1, 0x44, 0x81, 0xb3,
+ 0x89, 0x44, 0x8d, 0xac, 0x8f, 0x59, 0xc7, 0x0b,
+ 0x03, 0x8e, 0x7c, 0xf9, 0x2e, 0xf2, 0xc7, 0xef,
+ 0xf5, 0x7a, 0x72, 0x46, 0x6e, 0x11, 0x52, 0x96 },
+ .public = { 0x5f, 0x9c, 0x95, 0xbc, 0xa3, 0x50, 0x8c, 0x24,
+ 0xb1, 0xd0, 0xb1, 0x55, 0x9c, 0x83, 0xef, 0x5b,
+ 0x04, 0x44, 0x5c, 0xc4, 0x58, 0x1c, 0x8e, 0x86,
+ 0xd8, 0x22, 0x4e, 0xdd, 0xd0, 0x9f, 0x11, 0x57 },
+ .result = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+ .valid = false
+ },
+ /* wycheproof - public key with low order */
+ {
+ .private = { 0xa0, 0xa0, 0x5a, 0x3e, 0x8f, 0x9f, 0x44, 0x20,
+ 0x4d, 0x5f, 0x80, 0x59, 0xa9, 0x4a, 0xc7, 0xdf,
+ 0xc3, 0x9a, 0x49, 0xac, 0x01, 0x6d, 0xd7, 0x43,
+ 0xdb, 0xfa, 0x43, 0xc5, 0xd6, 0x71, 0xfd, 0x88 },
+ .public = { 0xec, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f },
+ .result = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+ .valid = false
+ },
+ /* wycheproof - public key with low order */
+ {
+ .private = { 0xd0, 0xdb, 0xb3, 0xed, 0x19, 0x06, 0x66, 0x3f,
+ 0x15, 0x42, 0x0a, 0xf3, 0x1f, 0x4e, 0xaf, 0x65,
+ 0x09, 0xd9, 0xa9, 0x94, 0x97, 0x23, 0x50, 0x06,
+ 0x05, 0xad, 0x7c, 0x1c, 0x6e, 0x74, 0x50, 0xa9 },
+ .public = { 0xed, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f },
+ .result = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+ .valid = false
+ },
+ /* wycheproof - public key with low order */
+ {
+ .private = { 0xc0, 0xb1, 0xd0, 0xeb, 0x22, 0xb2, 0x44, 0xfe,
+ 0x32, 0x91, 0x14, 0x00, 0x72, 0xcd, 0xd9, 0xd9,
+ 0x89, 0xb5, 0xf0, 0xec, 0xd9, 0x6c, 0x10, 0x0f,
+ 0xeb, 0x5b, 0xca, 0x24, 0x1c, 0x1d, 0x9f, 0x8f },
+ .public = { 0xee, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f },
+ .result = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+ .valid = false
+ },
+ /* wycheproof - public key with low order */
+ {
+ .private = { 0x48, 0x0b, 0xf4, 0x5f, 0x59, 0x49, 0x42, 0xa8,
+ 0xbc, 0x0f, 0x33, 0x53, 0xc6, 0xe8, 0xb8, 0x85,
+ 0x3d, 0x77, 0xf3, 0x51, 0xf1, 0xc2, 0xca, 0x6c,
+ 0x2d, 0x1a, 0xbf, 0x8a, 0x00, 0xb4, 0x22, 0x9c },
+ .public = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80 },
+ .result = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+ .valid = false
+ },
+ /* wycheproof - public key with low order */
+ {
+ .private = { 0x30, 0xf9, 0x93, 0xfc, 0xf8, 0x51, 0x4f, 0xc8,
+ 0x9b, 0xd8, 0xdb, 0x14, 0xcd, 0x43, 0xba, 0x0d,
+ 0x4b, 0x25, 0x30, 0xe7, 0x3c, 0x42, 0x76, 0xa0,
+ 0x5e, 0x1b, 0x14, 0x5d, 0x42, 0x0c, 0xed, 0xb4 },
+ .public = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80 },
+ .result = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+ .valid = false
+ },
+ /* wycheproof - public key with low order */
+ {
+ .private = { 0xc0, 0x49, 0x74, 0xb7, 0x58, 0x38, 0x0e, 0x2a,
+ 0x5b, 0x5d, 0xf6, 0xeb, 0x09, 0xbb, 0x2f, 0x6b,
+ 0x34, 0x34, 0xf9, 0x82, 0x72, 0x2a, 0x8e, 0x67,
+ 0x6d, 0x3d, 0xa2, 0x51, 0xd1, 0xb3, 0xde, 0x83 },
+ .public = { 0xe0, 0xeb, 0x7a, 0x7c, 0x3b, 0x41, 0xb8, 0xae,
+ 0x16, 0x56, 0xe3, 0xfa, 0xf1, 0x9f, 0xc4, 0x6a,
+ 0xda, 0x09, 0x8d, 0xeb, 0x9c, 0x32, 0xb1, 0xfd,
+ 0x86, 0x62, 0x05, 0x16, 0x5f, 0x49, 0xb8, 0x80 },
+ .result = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+ .valid = false
+ },
+ /* wycheproof - public key with low order */
+ {
+ .private = { 0x50, 0x2a, 0x31, 0x37, 0x3d, 0xb3, 0x24, 0x46,
+ 0x84, 0x2f, 0xe5, 0xad, 0xd3, 0xe0, 0x24, 0x02,
+ 0x2e, 0xa5, 0x4f, 0x27, 0x41, 0x82, 0xaf, 0xc3,
+ 0xd9, 0xf1, 0xbb, 0x3d, 0x39, 0x53, 0x4e, 0xb5 },
+ .public = { 0x5f, 0x9c, 0x95, 0xbc, 0xa3, 0x50, 0x8c, 0x24,
+ 0xb1, 0xd0, 0xb1, 0x55, 0x9c, 0x83, 0xef, 0x5b,
+ 0x04, 0x44, 0x5c, 0xc4, 0x58, 0x1c, 0x8e, 0x86,
+ 0xd8, 0x22, 0x4e, 0xdd, 0xd0, 0x9f, 0x11, 0xd7 },
+ .result = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+ .valid = false
+ },
+ /* wycheproof - public key with low order */
+ {
+ .private = { 0x90, 0xfa, 0x64, 0x17, 0xb0, 0xe3, 0x70, 0x30,
+ 0xfd, 0x6e, 0x43, 0xef, 0xf2, 0xab, 0xae, 0xf1,
+ 0x4c, 0x67, 0x93, 0x11, 0x7a, 0x03, 0x9c, 0xf6,
+ 0x21, 0x31, 0x8b, 0xa9, 0x0f, 0x4e, 0x98, 0xbe },
+ .public = { 0xec, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
+ .result = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+ .valid = false
+ },
+ /* wycheproof - public key with low order */
+ {
+ .private = { 0x78, 0xad, 0x3f, 0x26, 0x02, 0x7f, 0x1c, 0x9f,
+ 0xdd, 0x97, 0x5a, 0x16, 0x13, 0xb9, 0x47, 0x77,
+ 0x9b, 0xad, 0x2c, 0xf2, 0xb7, 0x41, 0xad, 0xe0,
+ 0x18, 0x40, 0x88, 0x5a, 0x30, 0xbb, 0x97, 0x9c },
+ .public = { 0xed, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
+ .result = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+ .valid = false
+ },
+ /* wycheproof - public key with low order */
+ {
+ .private = { 0x98, 0xe2, 0x3d, 0xe7, 0xb1, 0xe0, 0x92, 0x6e,
+ 0xd9, 0xc8, 0x7e, 0x7b, 0x14, 0xba, 0xf5, 0x5f,
+ 0x49, 0x7a, 0x1d, 0x70, 0x96, 0xf9, 0x39, 0x77,
+ 0x68, 0x0e, 0x44, 0xdc, 0x1c, 0x7b, 0x7b, 0x8b },
+ .public = { 0xee, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
+ .result = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+ .valid = false
+ },
+ /* wycheproof - public key >= p */
+ {
+ .private = { 0xf0, 0x1e, 0x48, 0xda, 0xfa, 0xc9, 0xd7, 0xbc,
+ 0xf5, 0x89, 0xcb, 0xc3, 0x82, 0xc8, 0x78, 0xd1,
+ 0x8b, 0xda, 0x35, 0x50, 0x58, 0x9f, 0xfb, 0x5d,
+ 0x50, 0xb5, 0x23, 0xbe, 0xbe, 0x32, 0x9d, 0xae },
+ .public = { 0xef, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f },
+ .result = { 0xbd, 0x36, 0xa0, 0x79, 0x0e, 0xb8, 0x83, 0x09,
+ 0x8c, 0x98, 0x8b, 0x21, 0x78, 0x67, 0x73, 0xde,
+ 0x0b, 0x3a, 0x4d, 0xf1, 0x62, 0x28, 0x2c, 0xf1,
+ 0x10, 0xde, 0x18, 0xdd, 0x48, 0x4c, 0xe7, 0x4b },
+ .valid = true
+ },
+ /* wycheproof - public key >= p */
+ {
+ .private = { 0x28, 0x87, 0x96, 0xbc, 0x5a, 0xff, 0x4b, 0x81,
+ 0xa3, 0x75, 0x01, 0x75, 0x7b, 0xc0, 0x75, 0x3a,
+ 0x3c, 0x21, 0x96, 0x47, 0x90, 0xd3, 0x86, 0x99,
+ 0x30, 0x8d, 0xeb, 0xc1, 0x7a, 0x6e, 0xaf, 0x8d },
+ .public = { 0xf0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f },
+ .result = { 0xb4, 0xe0, 0xdd, 0x76, 0xda, 0x7b, 0x07, 0x17,
+ 0x28, 0xb6, 0x1f, 0x85, 0x67, 0x71, 0xaa, 0x35,
+ 0x6e, 0x57, 0xed, 0xa7, 0x8a, 0x5b, 0x16, 0x55,
+ 0xcc, 0x38, 0x20, 0xfb, 0x5f, 0x85, 0x4c, 0x5c },
+ .valid = true
+ },
+ /* wycheproof - public key >= p */
+ {
+ .private = { 0x98, 0xdf, 0x84, 0x5f, 0x66, 0x51, 0xbf, 0x11,
+ 0x38, 0x22, 0x1f, 0x11, 0x90, 0x41, 0xf7, 0x2b,
+ 0x6d, 0xbc, 0x3c, 0x4a, 0xce, 0x71, 0x43, 0xd9,
+ 0x9f, 0xd5, 0x5a, 0xd8, 0x67, 0x48, 0x0d, 0xa8 },
+ .public = { 0xf1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f },
+ .result = { 0x6f, 0xdf, 0x6c, 0x37, 0x61, 0x1d, 0xbd, 0x53,
+ 0x04, 0xdc, 0x0f, 0x2e, 0xb7, 0xc9, 0x51, 0x7e,
+ 0xb3, 0xc5, 0x0e, 0x12, 0xfd, 0x05, 0x0a, 0xc6,
+ 0xde, 0xc2, 0x70, 0x71, 0xd4, 0xbf, 0xc0, 0x34 },
+ .valid = true
+ },
+ /* wycheproof - public key >= p */
+ {
+ .private = { 0xf0, 0x94, 0x98, 0xe4, 0x6f, 0x02, 0xf8, 0x78,
+ 0x82, 0x9e, 0x78, 0xb8, 0x03, 0xd3, 0x16, 0xa2,
+ 0xed, 0x69, 0x5d, 0x04, 0x98, 0xa0, 0x8a, 0xbd,
+ 0xf8, 0x27, 0x69, 0x30, 0xe2, 0x4e, 0xdc, 0xb0 },
+ .public = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f },
+ .result = { 0x4c, 0x8f, 0xc4, 0xb1, 0xc6, 0xab, 0x88, 0xfb,
+ 0x21, 0xf1, 0x8f, 0x6d, 0x4c, 0x81, 0x02, 0x40,
+ 0xd4, 0xe9, 0x46, 0x51, 0xba, 0x44, 0xf7, 0xa2,
+ 0xc8, 0x63, 0xce, 0xc7, 0xdc, 0x56, 0x60, 0x2d },
+ .valid = true
+ },
+ /* wycheproof - public key >= p */
+ {
+ .private = { 0x18, 0x13, 0xc1, 0x0a, 0x5c, 0x7f, 0x21, 0xf9,
+ 0x6e, 0x17, 0xf2, 0x88, 0xc0, 0xcc, 0x37, 0x60,
+ 0x7c, 0x04, 0xc5, 0xf5, 0xae, 0xa2, 0xdb, 0x13,
+ 0x4f, 0x9e, 0x2f, 0xfc, 0x66, 0xbd, 0x9d, 0xb8 },
+ .public = { 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80 },
+ .result = { 0x1c, 0xd0, 0xb2, 0x82, 0x67, 0xdc, 0x54, 0x1c,
+ 0x64, 0x2d, 0x6d, 0x7d, 0xca, 0x44, 0xa8, 0xb3,
+ 0x8a, 0x63, 0x73, 0x6e, 0xef, 0x5c, 0x4e, 0x65,
+ 0x01, 0xff, 0xbb, 0xb1, 0x78, 0x0c, 0x03, 0x3c },
+ .valid = true
+ },
+ /* wycheproof - public key >= p */
+ {
+ .private = { 0x78, 0x57, 0xfb, 0x80, 0x86, 0x53, 0x64, 0x5a,
+ 0x0b, 0xeb, 0x13, 0x8a, 0x64, 0xf5, 0xf4, 0xd7,
+ 0x33, 0xa4, 0x5e, 0xa8, 0x4c, 0x3c, 0xda, 0x11,
+ 0xa9, 0xc0, 0x6f, 0x7e, 0x71, 0x39, 0x14, 0x9e },
+ .public = { 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80 },
+ .result = { 0x87, 0x55, 0xbe, 0x01, 0xc6, 0x0a, 0x7e, 0x82,
+ 0x5c, 0xff, 0x3e, 0x0e, 0x78, 0xcb, 0x3a, 0xa4,
+ 0x33, 0x38, 0x61, 0x51, 0x6a, 0xa5, 0x9b, 0x1c,
+ 0x51, 0xa8, 0xb2, 0xa5, 0x43, 0xdf, 0xa8, 0x22 },
+ .valid = true
+ },
+ /* wycheproof - public key >= p */
+ {
+ .private = { 0xe0, 0x3a, 0xa8, 0x42, 0xe2, 0xab, 0xc5, 0x6e,
+ 0x81, 0xe8, 0x7b, 0x8b, 0x9f, 0x41, 0x7b, 0x2a,
+ 0x1e, 0x59, 0x13, 0xc7, 0x23, 0xee, 0xd2, 0x8d,
+ 0x75, 0x2f, 0x8d, 0x47, 0xa5, 0x9f, 0x49, 0x8f },
+ .public = { 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80 },
+ .result = { 0x54, 0xc9, 0xa1, 0xed, 0x95, 0xe5, 0x46, 0xd2,
+ 0x78, 0x22, 0xa3, 0x60, 0x93, 0x1d, 0xda, 0x60,
+ 0xa1, 0xdf, 0x04, 0x9d, 0xa6, 0xf9, 0x04, 0x25,
+ 0x3c, 0x06, 0x12, 0xbb, 0xdc, 0x08, 0x74, 0x76 },
+ .valid = true
+ },
+ /* wycheproof - public key >= p */
+ {
+ .private = { 0xf8, 0xf7, 0x07, 0xb7, 0x99, 0x9b, 0x18, 0xcb,
+ 0x0d, 0x6b, 0x96, 0x12, 0x4f, 0x20, 0x45, 0x97,
+ 0x2c, 0xa2, 0x74, 0xbf, 0xc1, 0x54, 0xad, 0x0c,
+ 0x87, 0x03, 0x8c, 0x24, 0xc6, 0xd0, 0xd4, 0xb2 },
+ .public = { 0xda, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
+ .result = { 0xcc, 0x1f, 0x40, 0xd7, 0x43, 0xcd, 0xc2, 0x23,
+ 0x0e, 0x10, 0x43, 0xda, 0xba, 0x8b, 0x75, 0xe8,
+ 0x10, 0xf1, 0xfb, 0xab, 0x7f, 0x25, 0x52, 0x69,
+ 0xbd, 0x9e, 0xbb, 0x29, 0xe6, 0xbf, 0x49, 0x4f },
+ .valid = true
+ },
+ /* wycheproof - public key >= p */
+ {
+ .private = { 0xa0, 0x34, 0xf6, 0x84, 0xfa, 0x63, 0x1e, 0x1a,
+ 0x34, 0x81, 0x18, 0xc1, 0xce, 0x4c, 0x98, 0x23,
+ 0x1f, 0x2d, 0x9e, 0xec, 0x9b, 0xa5, 0x36, 0x5b,
+ 0x4a, 0x05, 0xd6, 0x9a, 0x78, 0x5b, 0x07, 0x96 },
+ .public = { 0xdb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
+ .result = { 0x54, 0x99, 0x8e, 0xe4, 0x3a, 0x5b, 0x00, 0x7b,
+ 0xf4, 0x99, 0xf0, 0x78, 0xe7, 0x36, 0x52, 0x44,
+ 0x00, 0xa8, 0xb5, 0xc7, 0xe9, 0xb9, 0xb4, 0x37,
+ 0x71, 0x74, 0x8c, 0x7c, 0xdf, 0x88, 0x04, 0x12 },
+ .valid = true
+ },
+ /* wycheproof - public key >= p */
+ {
+ .private = { 0x30, 0xb6, 0xc6, 0xa0, 0xf2, 0xff, 0xa6, 0x80,
+ 0x76, 0x8f, 0x99, 0x2b, 0xa8, 0x9e, 0x15, 0x2d,
+ 0x5b, 0xc9, 0x89, 0x3d, 0x38, 0xc9, 0x11, 0x9b,
+ 0xe4, 0xf7, 0x67, 0xbf, 0xab, 0x6e, 0x0c, 0xa5 },
+ .public = { 0xdc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
+ .result = { 0xea, 0xd9, 0xb3, 0x8e, 0xfd, 0xd7, 0x23, 0x63,
+ 0x79, 0x34, 0xe5, 0x5a, 0xb7, 0x17, 0xa7, 0xae,
+ 0x09, 0xeb, 0x86, 0xa2, 0x1d, 0xc3, 0x6a, 0x3f,
+ 0xee, 0xb8, 0x8b, 0x75, 0x9e, 0x39, 0x1e, 0x09 },
+ .valid = true
+ },
+ /* wycheproof - public key >= p */
+ {
+ .private = { 0x90, 0x1b, 0x9d, 0xcf, 0x88, 0x1e, 0x01, 0xe0,
+ 0x27, 0x57, 0x50, 0x35, 0xd4, 0x0b, 0x43, 0xbd,
+ 0xc1, 0xc5, 0x24, 0x2e, 0x03, 0x08, 0x47, 0x49,
+ 0x5b, 0x0c, 0x72, 0x86, 0x46, 0x9b, 0x65, 0x91 },
+ .public = { 0xea, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
+ .result = { 0x60, 0x2f, 0xf4, 0x07, 0x89, 0xb5, 0x4b, 0x41,
+ 0x80, 0x59, 0x15, 0xfe, 0x2a, 0x62, 0x21, 0xf0,
+ 0x7a, 0x50, 0xff, 0xc2, 0xc3, 0xfc, 0x94, 0xcf,
+ 0x61, 0xf1, 0x3d, 0x79, 0x04, 0xe8, 0x8e, 0x0e },
+ .valid = true
+ },
+ /* wycheproof - public key >= p */
+ {
+ .private = { 0x80, 0x46, 0x67, 0x7c, 0x28, 0xfd, 0x82, 0xc9,
+ 0xa1, 0xbd, 0xb7, 0x1a, 0x1a, 0x1a, 0x34, 0xfa,
+ 0xba, 0x12, 0x25, 0xe2, 0x50, 0x7f, 0xe3, 0xf5,
+ 0x4d, 0x10, 0xbd, 0x5b, 0x0d, 0x86, 0x5f, 0x8e },
+ .public = { 0xeb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
+ .result = { 0xe0, 0x0a, 0xe8, 0xb1, 0x43, 0x47, 0x12, 0x47,
+ 0xba, 0x24, 0xf1, 0x2c, 0x88, 0x55, 0x36, 0xc3,
+ 0xcb, 0x98, 0x1b, 0x58, 0xe1, 0xe5, 0x6b, 0x2b,
+ 0xaf, 0x35, 0xc1, 0x2a, 0xe1, 0xf7, 0x9c, 0x26 },
+ .valid = true
+ },
+ /* wycheproof - public key >= p */
+ {
+ .private = { 0x60, 0x2f, 0x7e, 0x2f, 0x68, 0xa8, 0x46, 0xb8,
+ 0x2c, 0xc2, 0x69, 0xb1, 0xd4, 0x8e, 0x93, 0x98,
+ 0x86, 0xae, 0x54, 0xfd, 0x63, 0x6c, 0x1f, 0xe0,
+ 0x74, 0xd7, 0x10, 0x12, 0x7d, 0x47, 0x24, 0x91 },
+ .public = { 0xef, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
+ .result = { 0x98, 0xcb, 0x9b, 0x50, 0xdd, 0x3f, 0xc2, 0xb0,
+ 0xd4, 0xf2, 0xd2, 0xbf, 0x7c, 0x5c, 0xfd, 0xd1,
+ 0x0c, 0x8f, 0xcd, 0x31, 0xfc, 0x40, 0xaf, 0x1a,
+ 0xd4, 0x4f, 0x47, 0xc1, 0x31, 0x37, 0x63, 0x62 },
+ .valid = true
+ },
+ /* wycheproof - public key >= p */
+ {
+ .private = { 0x60, 0x88, 0x7b, 0x3d, 0xc7, 0x24, 0x43, 0x02,
+ 0x6e, 0xbe, 0xdb, 0xbb, 0xb7, 0x06, 0x65, 0xf4,
+ 0x2b, 0x87, 0xad, 0xd1, 0x44, 0x0e, 0x77, 0x68,
+ 0xfb, 0xd7, 0xe8, 0xe2, 0xce, 0x5f, 0x63, 0x9d },
+ .public = { 0xf0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
+ .result = { 0x38, 0xd6, 0x30, 0x4c, 0x4a, 0x7e, 0x6d, 0x9f,
+ 0x79, 0x59, 0x33, 0x4f, 0xb5, 0x24, 0x5b, 0xd2,
+ 0xc7, 0x54, 0x52, 0x5d, 0x4c, 0x91, 0xdb, 0x95,
+ 0x02, 0x06, 0x92, 0x62, 0x34, 0xc1, 0xf6, 0x33 },
+ .valid = true
+ },
+ /* wycheproof - public key >= p */
+ {
+ .private = { 0x78, 0xd3, 0x1d, 0xfa, 0x85, 0x44, 0x97, 0xd7,
+ 0x2d, 0x8d, 0xef, 0x8a, 0x1b, 0x7f, 0xb0, 0x06,
+ 0xce, 0xc2, 0xd8, 0xc4, 0x92, 0x46, 0x47, 0xc9,
+ 0x38, 0x14, 0xae, 0x56, 0xfa, 0xed, 0xa4, 0x95 },
+ .public = { 0xf1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
+ .result = { 0x78, 0x6c, 0xd5, 0x49, 0x96, 0xf0, 0x14, 0xa5,
+ 0xa0, 0x31, 0xec, 0x14, 0xdb, 0x81, 0x2e, 0xd0,
+ 0x83, 0x55, 0x06, 0x1f, 0xdb, 0x5d, 0xe6, 0x80,
+ 0xa8, 0x00, 0xac, 0x52, 0x1f, 0x31, 0x8e, 0x23 },
+ .valid = true
+ },
+ /* wycheproof - public key >= p */
+ {
+ .private = { 0xc0, 0x4c, 0x5b, 0xae, 0xfa, 0x83, 0x02, 0xdd,
+ 0xde, 0xd6, 0xa4, 0xbb, 0x95, 0x77, 0x61, 0xb4,
+ 0xeb, 0x97, 0xae, 0xfa, 0x4f, 0xc3, 0xb8, 0x04,
+ 0x30, 0x85, 0xf9, 0x6a, 0x56, 0x59, 0xb3, 0xa5 },
+ .public = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
+ .result = { 0x29, 0xae, 0x8b, 0xc7, 0x3e, 0x9b, 0x10, 0xa0,
+ 0x8b, 0x4f, 0x68, 0x1c, 0x43, 0xc3, 0xe0, 0xac,
+ 0x1a, 0x17, 0x1d, 0x31, 0xb3, 0x8f, 0x1a, 0x48,
+ 0xef, 0xba, 0x29, 0xae, 0x63, 0x9e, 0xa1, 0x34 },
+ .valid = true
+ },
+ /* wycheproof - RFC 7748 */
+ {
+ .private = { 0xa0, 0x46, 0xe3, 0x6b, 0xf0, 0x52, 0x7c, 0x9d,
+ 0x3b, 0x16, 0x15, 0x4b, 0x82, 0x46, 0x5e, 0xdd,
+ 0x62, 0x14, 0x4c, 0x0a, 0xc1, 0xfc, 0x5a, 0x18,
+ 0x50, 0x6a, 0x22, 0x44, 0xba, 0x44, 0x9a, 0x44 },
+ .public = { 0xe6, 0xdb, 0x68, 0x67, 0x58, 0x30, 0x30, 0xdb,
+ 0x35, 0x94, 0xc1, 0xa4, 0x24, 0xb1, 0x5f, 0x7c,
+ 0x72, 0x66, 0x24, 0xec, 0x26, 0xb3, 0x35, 0x3b,
+ 0x10, 0xa9, 0x03, 0xa6, 0xd0, 0xab, 0x1c, 0x4c },
+ .result = { 0xc3, 0xda, 0x55, 0x37, 0x9d, 0xe9, 0xc6, 0x90,
+ 0x8e, 0x94, 0xea, 0x4d, 0xf2, 0x8d, 0x08, 0x4f,
+ 0x32, 0xec, 0xcf, 0x03, 0x49, 0x1c, 0x71, 0xf7,
+ 0x54, 0xb4, 0x07, 0x55, 0x77, 0xa2, 0x85, 0x52 },
+ .valid = true
+ },
+ /* wycheproof - RFC 7748 */
+ {
+ .private = { 0x48, 0x66, 0xe9, 0xd4, 0xd1, 0xb4, 0x67, 0x3c,
+ 0x5a, 0xd2, 0x26, 0x91, 0x95, 0x7d, 0x6a, 0xf5,
+ 0xc1, 0x1b, 0x64, 0x21, 0xe0, 0xea, 0x01, 0xd4,
+ 0x2c, 0xa4, 0x16, 0x9e, 0x79, 0x18, 0xba, 0x4d },
+ .public = { 0xe5, 0x21, 0x0f, 0x12, 0x78, 0x68, 0x11, 0xd3,
+ 0xf4, 0xb7, 0x95, 0x9d, 0x05, 0x38, 0xae, 0x2c,
+ 0x31, 0xdb, 0xe7, 0x10, 0x6f, 0xc0, 0x3c, 0x3e,
+ 0xfc, 0x4c, 0xd5, 0x49, 0xc7, 0x15, 0xa4, 0x13 },
+ .result = { 0x95, 0xcb, 0xde, 0x94, 0x76, 0xe8, 0x90, 0x7d,
+ 0x7a, 0xad, 0xe4, 0x5c, 0xb4, 0xb8, 0x73, 0xf8,
+ 0x8b, 0x59, 0x5a, 0x68, 0x79, 0x9f, 0xa1, 0x52,
+ 0xe6, 0xf8, 0xf7, 0x64, 0x7a, 0xac, 0x79, 0x57 },
+ .valid = true
+ },
+ /* wycheproof - edge case for shared secret */
+ {
+ .private = { 0xa0, 0xa4, 0xf1, 0x30, 0xb9, 0x8a, 0x5b, 0xe4,
+ 0xb1, 0xce, 0xdb, 0x7c, 0xb8, 0x55, 0x84, 0xa3,
+ 0x52, 0x0e, 0x14, 0x2d, 0x47, 0x4d, 0xc9, 0xcc,
+ 0xb9, 0x09, 0xa0, 0x73, 0xa9, 0x76, 0xbf, 0x63 },
+ .public = { 0x0a, 0xb4, 0xe7, 0x63, 0x80, 0xd8, 0x4d, 0xde,
+ 0x4f, 0x68, 0x33, 0xc5, 0x8f, 0x2a, 0x9f, 0xb8,
+ 0xf8, 0x3b, 0xb0, 0x16, 0x9b, 0x17, 0x2b, 0xe4,
+ 0xb6, 0xe0, 0x59, 0x28, 0x87, 0x74, 0x1a, 0x36 },
+ .result = { 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+ .valid = true
+ },
+ /* wycheproof - edge case for shared secret */
+ {
+ .private = { 0xa0, 0xa4, 0xf1, 0x30, 0xb9, 0x8a, 0x5b, 0xe4,
+ 0xb1, 0xce, 0xdb, 0x7c, 0xb8, 0x55, 0x84, 0xa3,
+ 0x52, 0x0e, 0x14, 0x2d, 0x47, 0x4d, 0xc9, 0xcc,
+ 0xb9, 0x09, 0xa0, 0x73, 0xa9, 0x76, 0xbf, 0x63 },
+ .public = { 0x89, 0xe1, 0x0d, 0x57, 0x01, 0xb4, 0x33, 0x7d,
+ 0x2d, 0x03, 0x21, 0x81, 0x53, 0x8b, 0x10, 0x64,
+ 0xbd, 0x40, 0x84, 0x40, 0x1c, 0xec, 0xa1, 0xfd,
+ 0x12, 0x66, 0x3a, 0x19, 0x59, 0x38, 0x80, 0x00 },
+ .result = { 0x09, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+ .valid = true
+ },
+ /* wycheproof - edge case for shared secret */
+ {
+ .private = { 0xa0, 0xa4, 0xf1, 0x30, 0xb9, 0x8a, 0x5b, 0xe4,
+ 0xb1, 0xce, 0xdb, 0x7c, 0xb8, 0x55, 0x84, 0xa3,
+ 0x52, 0x0e, 0x14, 0x2d, 0x47, 0x4d, 0xc9, 0xcc,
+ 0xb9, 0x09, 0xa0, 0x73, 0xa9, 0x76, 0xbf, 0x63 },
+ .public = { 0x2b, 0x55, 0xd3, 0xaa, 0x4a, 0x8f, 0x80, 0xc8,
+ 0xc0, 0xb2, 0xae, 0x5f, 0x93, 0x3e, 0x85, 0xaf,
+ 0x49, 0xbe, 0xac, 0x36, 0xc2, 0xfa, 0x73, 0x94,
+ 0xba, 0xb7, 0x6c, 0x89, 0x33, 0xf8, 0xf8, 0x1d },
+ .result = { 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+ .valid = true
+ },
+ /* wycheproof - edge case for shared secret */
+ {
+ .private = { 0xa0, 0xa4, 0xf1, 0x30, 0xb9, 0x8a, 0x5b, 0xe4,
+ 0xb1, 0xce, 0xdb, 0x7c, 0xb8, 0x55, 0x84, 0xa3,
+ 0x52, 0x0e, 0x14, 0x2d, 0x47, 0x4d, 0xc9, 0xcc,
+ 0xb9, 0x09, 0xa0, 0x73, 0xa9, 0x76, 0xbf, 0x63 },
+ .public = { 0x63, 0xe5, 0xb1, 0xfe, 0x96, 0x01, 0xfe, 0x84,
+ 0x38, 0x5d, 0x88, 0x66, 0xb0, 0x42, 0x12, 0x62,
+ 0xf7, 0x8f, 0xbf, 0xa5, 0xaf, 0xf9, 0x58, 0x5e,
+ 0x62, 0x66, 0x79, 0xb1, 0x85, 0x47, 0xd9, 0x59 },
+ .result = { 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f },
+ .valid = true
+ },
+ /* wycheproof - edge case for shared secret */
+ {
+ .private = { 0xa0, 0xa4, 0xf1, 0x30, 0xb9, 0x8a, 0x5b, 0xe4,
+ 0xb1, 0xce, 0xdb, 0x7c, 0xb8, 0x55, 0x84, 0xa3,
+ 0x52, 0x0e, 0x14, 0x2d, 0x47, 0x4d, 0xc9, 0xcc,
+ 0xb9, 0x09, 0xa0, 0x73, 0xa9, 0x76, 0xbf, 0x63 },
+ .public = { 0xe4, 0x28, 0xf3, 0xda, 0xc1, 0x78, 0x09, 0xf8,
+ 0x27, 0xa5, 0x22, 0xce, 0x32, 0x35, 0x50, 0x58,
+ 0xd0, 0x73, 0x69, 0x36, 0x4a, 0xa7, 0x89, 0x02,
+ 0xee, 0x10, 0x13, 0x9b, 0x9f, 0x9d, 0xd6, 0x53 },
+ .result = { 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f },
+ .valid = true
+ },
+ /* wycheproof - edge case for shared secret */
+ {
+ .private = { 0xa0, 0xa4, 0xf1, 0x30, 0xb9, 0x8a, 0x5b, 0xe4,
+ 0xb1, 0xce, 0xdb, 0x7c, 0xb8, 0x55, 0x84, 0xa3,
+ 0x52, 0x0e, 0x14, 0x2d, 0x47, 0x4d, 0xc9, 0xcc,
+ 0xb9, 0x09, 0xa0, 0x73, 0xa9, 0x76, 0xbf, 0x63 },
+ .public = { 0xb3, 0xb5, 0x0e, 0x3e, 0xd3, 0xa4, 0x07, 0xb9,
+ 0x5d, 0xe9, 0x42, 0xef, 0x74, 0x57, 0x5b, 0x5a,
+ 0xb8, 0xa1, 0x0c, 0x09, 0xee, 0x10, 0x35, 0x44,
+ 0xd6, 0x0b, 0xdf, 0xed, 0x81, 0x38, 0xab, 0x2b },
+ .result = { 0xf9, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f },
+ .valid = true
+ },
+ /* wycheproof - edge case for shared secret */
+ {
+ .private = { 0xa0, 0xa4, 0xf1, 0x30, 0xb9, 0x8a, 0x5b, 0xe4,
+ 0xb1, 0xce, 0xdb, 0x7c, 0xb8, 0x55, 0x84, 0xa3,
+ 0x52, 0x0e, 0x14, 0x2d, 0x47, 0x4d, 0xc9, 0xcc,
+ 0xb9, 0x09, 0xa0, 0x73, 0xa9, 0x76, 0xbf, 0x63 },
+ .public = { 0x21, 0x3f, 0xff, 0xe9, 0x3d, 0x5e, 0xa8, 0xcd,
+ 0x24, 0x2e, 0x46, 0x28, 0x44, 0x02, 0x99, 0x22,
+ 0xc4, 0x3c, 0x77, 0xc9, 0xe3, 0xe4, 0x2f, 0x56,
+ 0x2f, 0x48, 0x5d, 0x24, 0xc5, 0x01, 0xa2, 0x0b },
+ .result = { 0xf3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f },
+ .valid = true
+ },
+ /* wycheproof - edge case for shared secret */
+ {
+ .private = { 0xa0, 0xa4, 0xf1, 0x30, 0xb9, 0x8a, 0x5b, 0xe4,
+ 0xb1, 0xce, 0xdb, 0x7c, 0xb8, 0x55, 0x84, 0xa3,
+ 0x52, 0x0e, 0x14, 0x2d, 0x47, 0x4d, 0xc9, 0xcc,
+ 0xb9, 0x09, 0xa0, 0x73, 0xa9, 0x76, 0xbf, 0x63 },
+ .public = { 0x91, 0xb2, 0x32, 0xa1, 0x78, 0xb3, 0xcd, 0x53,
+ 0x09, 0x32, 0x44, 0x1e, 0x61, 0x39, 0x41, 0x8f,
+ 0x72, 0x17, 0x22, 0x92, 0xf1, 0xda, 0x4c, 0x18,
+ 0x34, 0xfc, 0x5e, 0xbf, 0xef, 0xb5, 0x1e, 0x3f },
+ .result = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x03 },
+ .valid = true
+ },
+ /* wycheproof - edge case for shared secret */
+ {
+ .private = { 0xa0, 0xa4, 0xf1, 0x30, 0xb9, 0x8a, 0x5b, 0xe4,
+ 0xb1, 0xce, 0xdb, 0x7c, 0xb8, 0x55, 0x84, 0xa3,
+ 0x52, 0x0e, 0x14, 0x2d, 0x47, 0x4d, 0xc9, 0xcc,
+ 0xb9, 0x09, 0xa0, 0x73, 0xa9, 0x76, 0xbf, 0x63 },
+ .public = { 0x04, 0x5c, 0x6e, 0x11, 0xc5, 0xd3, 0x32, 0x55,
+ 0x6c, 0x78, 0x22, 0xfe, 0x94, 0xeb, 0xf8, 0x9b,
+ 0x56, 0xa3, 0x87, 0x8d, 0xc2, 0x7c, 0xa0, 0x79,
+ 0x10, 0x30, 0x58, 0x84, 0x9f, 0xab, 0xcb, 0x4f },
+ .result = { 0xe5, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f },
+ .valid = true
+ },
+ /* wycheproof - edge case for shared secret */
+ {
+ .private = { 0xa0, 0xa4, 0xf1, 0x30, 0xb9, 0x8a, 0x5b, 0xe4,
+ 0xb1, 0xce, 0xdb, 0x7c, 0xb8, 0x55, 0x84, 0xa3,
+ 0x52, 0x0e, 0x14, 0x2d, 0x47, 0x4d, 0xc9, 0xcc,
+ 0xb9, 0x09, 0xa0, 0x73, 0xa9, 0x76, 0xbf, 0x63 },
+ .public = { 0x1c, 0xa2, 0x19, 0x0b, 0x71, 0x16, 0x35, 0x39,
+ 0x06, 0x3c, 0x35, 0x77, 0x3b, 0xda, 0x0c, 0x9c,
+ 0x92, 0x8e, 0x91, 0x36, 0xf0, 0x62, 0x0a, 0xeb,
+ 0x09, 0x3f, 0x09, 0x91, 0x97, 0xb7, 0xf7, 0x4e },
+ .result = { 0xe3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f },
+ .valid = true
+ },
+ /* wycheproof - edge case for shared secret */
+ {
+ .private = { 0xa0, 0xa4, 0xf1, 0x30, 0xb9, 0x8a, 0x5b, 0xe4,
+ 0xb1, 0xce, 0xdb, 0x7c, 0xb8, 0x55, 0x84, 0xa3,
+ 0x52, 0x0e, 0x14, 0x2d, 0x47, 0x4d, 0xc9, 0xcc,
+ 0xb9, 0x09, 0xa0, 0x73, 0xa9, 0x76, 0xbf, 0x63 },
+ .public = { 0xf7, 0x6e, 0x90, 0x10, 0xac, 0x33, 0xc5, 0x04,
+ 0x3b, 0x2d, 0x3b, 0x76, 0xa8, 0x42, 0x17, 0x10,
+ 0x00, 0xc4, 0x91, 0x62, 0x22, 0xe9, 0xe8, 0x58,
+ 0x97, 0xa0, 0xae, 0xc7, 0xf6, 0x35, 0x0b, 0x3c },
+ .result = { 0xdd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f },
+ .valid = true
+ },
+ /* wycheproof - edge case for shared secret */
+ {
+ .private = { 0xa0, 0xa4, 0xf1, 0x30, 0xb9, 0x8a, 0x5b, 0xe4,
+ 0xb1, 0xce, 0xdb, 0x7c, 0xb8, 0x55, 0x84, 0xa3,
+ 0x52, 0x0e, 0x14, 0x2d, 0x47, 0x4d, 0xc9, 0xcc,
+ 0xb9, 0x09, 0xa0, 0x73, 0xa9, 0x76, 0xbf, 0x63 },
+ .public = { 0xbb, 0x72, 0x68, 0x8d, 0x8f, 0x8a, 0xa7, 0xa3,
+ 0x9c, 0xd6, 0x06, 0x0c, 0xd5, 0xc8, 0x09, 0x3c,
+ 0xde, 0xc6, 0xfe, 0x34, 0x19, 0x37, 0xc3, 0x88,
+ 0x6a, 0x99, 0x34, 0x6c, 0xd0, 0x7f, 0xaa, 0x55 },
+ .result = { 0xdb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f },
+ .valid = true
+ },
+ /* wycheproof - edge case for shared secret */
+ {
+ .private = { 0xa0, 0xa4, 0xf1, 0x30, 0xb9, 0x8a, 0x5b, 0xe4,
+ 0xb1, 0xce, 0xdb, 0x7c, 0xb8, 0x55, 0x84, 0xa3,
+ 0x52, 0x0e, 0x14, 0x2d, 0x47, 0x4d, 0xc9, 0xcc,
+ 0xb9, 0x09, 0xa0, 0x73, 0xa9, 0x76, 0xbf, 0x63 },
+ .public = { 0x88, 0xfd, 0xde, 0xa1, 0x93, 0x39, 0x1c, 0x6a,
+ 0x59, 0x33, 0xef, 0x9b, 0x71, 0x90, 0x15, 0x49,
+ 0x44, 0x72, 0x05, 0xaa, 0xe9, 0xda, 0x92, 0x8a,
+ 0x6b, 0x91, 0xa3, 0x52, 0xba, 0x10, 0xf4, 0x1f },
+ .result = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02 },
+ .valid = true
+ },
+ /* wycheproof - edge case for shared secret */
+ {
+ .private = { 0xa0, 0xa4, 0xf1, 0x30, 0xb9, 0x8a, 0x5b, 0xe4,
+ 0xb1, 0xce, 0xdb, 0x7c, 0xb8, 0x55, 0x84, 0xa3,
+ 0x52, 0x0e, 0x14, 0x2d, 0x47, 0x4d, 0xc9, 0xcc,
+ 0xb9, 0x09, 0xa0, 0x73, 0xa9, 0x76, 0xbf, 0x63 },
+ .public = { 0x30, 0x3b, 0x39, 0x2f, 0x15, 0x31, 0x16, 0xca,
+ 0xd9, 0xcc, 0x68, 0x2a, 0x00, 0xcc, 0xc4, 0x4c,
+ 0x95, 0xff, 0x0d, 0x3b, 0xbe, 0x56, 0x8b, 0xeb,
+ 0x6c, 0x4e, 0x73, 0x9b, 0xaf, 0xdc, 0x2c, 0x68 },
+ .result = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x00 },
+ .valid = true
+ },
+ /* wycheproof - checking for overflow */
+ {
+ .private = { 0xc8, 0x17, 0x24, 0x70, 0x40, 0x00, 0xb2, 0x6d,
+ 0x31, 0x70, 0x3c, 0xc9, 0x7e, 0x3a, 0x37, 0x8d,
+ 0x56, 0xfa, 0xd8, 0x21, 0x93, 0x61, 0xc8, 0x8c,
+ 0xca, 0x8b, 0xd7, 0xc5, 0x71, 0x9b, 0x12, 0xb2 },
+ .public = { 0xfd, 0x30, 0x0a, 0xeb, 0x40, 0xe1, 0xfa, 0x58,
+ 0x25, 0x18, 0x41, 0x2b, 0x49, 0xb2, 0x08, 0xa7,
+ 0x84, 0x2b, 0x1e, 0x1f, 0x05, 0x6a, 0x04, 0x01,
+ 0x78, 0xea, 0x41, 0x41, 0x53, 0x4f, 0x65, 0x2d },
+ .result = { 0xb7, 0x34, 0x10, 0x5d, 0xc2, 0x57, 0x58, 0x5d,
+ 0x73, 0xb5, 0x66, 0xcc, 0xb7, 0x6f, 0x06, 0x27,
+ 0x95, 0xcc, 0xbe, 0xc8, 0x91, 0x28, 0xe5, 0x2b,
+ 0x02, 0xf3, 0xe5, 0x96, 0x39, 0xf1, 0x3c, 0x46 },
+ .valid = true
+ },
+ /* wycheproof - checking for overflow */
+ {
+ .private = { 0xc8, 0x17, 0x24, 0x70, 0x40, 0x00, 0xb2, 0x6d,
+ 0x31, 0x70, 0x3c, 0xc9, 0x7e, 0x3a, 0x37, 0x8d,
+ 0x56, 0xfa, 0xd8, 0x21, 0x93, 0x61, 0xc8, 0x8c,
+ 0xca, 0x8b, 0xd7, 0xc5, 0x71, 0x9b, 0x12, 0xb2 },
+ .public = { 0xc8, 0xef, 0x79, 0xb5, 0x14, 0xd7, 0x68, 0x26,
+ 0x77, 0xbc, 0x79, 0x31, 0xe0, 0x6e, 0xe5, 0xc2,
+ 0x7c, 0x9b, 0x39, 0x2b, 0x4a, 0xe9, 0x48, 0x44,
+ 0x73, 0xf5, 0x54, 0xe6, 0x67, 0x8e, 0xcc, 0x2e },
+ .result = { 0x64, 0x7a, 0x46, 0xb6, 0xfc, 0x3f, 0x40, 0xd6,
+ 0x21, 0x41, 0xee, 0x3c, 0xee, 0x70, 0x6b, 0x4d,
+ 0x7a, 0x92, 0x71, 0x59, 0x3a, 0x7b, 0x14, 0x3e,
+ 0x8e, 0x2e, 0x22, 0x79, 0x88, 0x3e, 0x45, 0x50 },
+ .valid = true
+ },
+ /* wycheproof - checking for overflow */
+ {
+ .private = { 0xc8, 0x17, 0x24, 0x70, 0x40, 0x00, 0xb2, 0x6d,
+ 0x31, 0x70, 0x3c, 0xc9, 0x7e, 0x3a, 0x37, 0x8d,
+ 0x56, 0xfa, 0xd8, 0x21, 0x93, 0x61, 0xc8, 0x8c,
+ 0xca, 0x8b, 0xd7, 0xc5, 0x71, 0x9b, 0x12, 0xb2 },
+ .public = { 0x64, 0xae, 0xac, 0x25, 0x04, 0x14, 0x48, 0x61,
+ 0x53, 0x2b, 0x7b, 0xbc, 0xb6, 0xc8, 0x7d, 0x67,
+ 0xdd, 0x4c, 0x1f, 0x07, 0xeb, 0xc2, 0xe0, 0x6e,
+ 0xff, 0xb9, 0x5a, 0xec, 0xc6, 0x17, 0x0b, 0x2c },
+ .result = { 0x4f, 0xf0, 0x3d, 0x5f, 0xb4, 0x3c, 0xd8, 0x65,
+ 0x7a, 0x3c, 0xf3, 0x7c, 0x13, 0x8c, 0xad, 0xce,
+ 0xcc, 0xe5, 0x09, 0xe4, 0xeb, 0xa0, 0x89, 0xd0,
+ 0xef, 0x40, 0xb4, 0xe4, 0xfb, 0x94, 0x61, 0x55 },
+ .valid = true
+ },
+ /* wycheproof - checking for overflow */
+ {
+ .private = { 0xc8, 0x17, 0x24, 0x70, 0x40, 0x00, 0xb2, 0x6d,
+ 0x31, 0x70, 0x3c, 0xc9, 0x7e, 0x3a, 0x37, 0x8d,
+ 0x56, 0xfa, 0xd8, 0x21, 0x93, 0x61, 0xc8, 0x8c,
+ 0xca, 0x8b, 0xd7, 0xc5, 0x71, 0x9b, 0x12, 0xb2 },
+ .public = { 0xbf, 0x68, 0xe3, 0x5e, 0x9b, 0xdb, 0x7e, 0xee,
+ 0x1b, 0x50, 0x57, 0x02, 0x21, 0x86, 0x0f, 0x5d,
+ 0xcd, 0xad, 0x8a, 0xcb, 0xab, 0x03, 0x1b, 0x14,
+ 0x97, 0x4c, 0xc4, 0x90, 0x13, 0xc4, 0x98, 0x31 },
+ .result = { 0x21, 0xce, 0xe5, 0x2e, 0xfd, 0xbc, 0x81, 0x2e,
+ 0x1d, 0x02, 0x1a, 0x4a, 0xf1, 0xe1, 0xd8, 0xbc,
+ 0x4d, 0xb3, 0xc4, 0x00, 0xe4, 0xd2, 0xa2, 0xc5,
+ 0x6a, 0x39, 0x26, 0xdb, 0x4d, 0x99, 0xc6, 0x5b },
+ .valid = true
+ },
+ /* wycheproof - checking for overflow */
+ {
+ .private = { 0xc8, 0x17, 0x24, 0x70, 0x40, 0x00, 0xb2, 0x6d,
+ 0x31, 0x70, 0x3c, 0xc9, 0x7e, 0x3a, 0x37, 0x8d,
+ 0x56, 0xfa, 0xd8, 0x21, 0x93, 0x61, 0xc8, 0x8c,
+ 0xca, 0x8b, 0xd7, 0xc5, 0x71, 0x9b, 0x12, 0xb2 },
+ .public = { 0x53, 0x47, 0xc4, 0x91, 0x33, 0x1a, 0x64, 0xb4,
+ 0x3d, 0xdc, 0x68, 0x30, 0x34, 0xe6, 0x77, 0xf5,
+ 0x3d, 0xc3, 0x2b, 0x52, 0xa5, 0x2a, 0x57, 0x7c,
+ 0x15, 0xa8, 0x3b, 0xf2, 0x98, 0xe9, 0x9f, 0x19 },
+ .result = { 0x18, 0xcb, 0x89, 0xe4, 0xe2, 0x0c, 0x0c, 0x2b,
+ 0xd3, 0x24, 0x30, 0x52, 0x45, 0x26, 0x6c, 0x93,
+ 0x27, 0x69, 0x0b, 0xbe, 0x79, 0xac, 0xb8, 0x8f,
+ 0x5b, 0x8f, 0xb3, 0xf7, 0x4e, 0xca, 0x3e, 0x52 },
+ .valid = true
+ },
+ /* wycheproof - private key == -1 (mod order) */
+ {
+ .private = { 0xa0, 0x23, 0xcd, 0xd0, 0x83, 0xef, 0x5b, 0xb8,
+ 0x2f, 0x10, 0xd6, 0x2e, 0x59, 0xe1, 0x5a, 0x68,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x50 },
+ .public = { 0x25, 0x8e, 0x04, 0x52, 0x3b, 0x8d, 0x25, 0x3e,
+ 0xe6, 0x57, 0x19, 0xfc, 0x69, 0x06, 0xc6, 0x57,
+ 0x19, 0x2d, 0x80, 0x71, 0x7e, 0xdc, 0x82, 0x8f,
+ 0xa0, 0xaf, 0x21, 0x68, 0x6e, 0x2f, 0xaa, 0x75 },
+ .result = { 0x25, 0x8e, 0x04, 0x52, 0x3b, 0x8d, 0x25, 0x3e,
+ 0xe6, 0x57, 0x19, 0xfc, 0x69, 0x06, 0xc6, 0x57,
+ 0x19, 0x2d, 0x80, 0x71, 0x7e, 0xdc, 0x82, 0x8f,
+ 0xa0, 0xaf, 0x21, 0x68, 0x6e, 0x2f, 0xaa, 0x75 },
+ .valid = true
+ },
+ /* wycheproof - private key == 1 (mod order) on twist */
+ {
+ .private = { 0x58, 0x08, 0x3d, 0xd2, 0x61, 0xad, 0x91, 0xef,
+ 0xf9, 0x52, 0x32, 0x2e, 0xc8, 0x24, 0xc6, 0x82,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x5f },
+ .public = { 0x2e, 0xae, 0x5e, 0xc3, 0xdd, 0x49, 0x4e, 0x9f,
+ 0x2d, 0x37, 0xd2, 0x58, 0xf8, 0x73, 0xa8, 0xe6,
+ 0xe9, 0xd0, 0xdb, 0xd1, 0xe3, 0x83, 0xef, 0x64,
+ 0xd9, 0x8b, 0xb9, 0x1b, 0x3e, 0x0b, 0xe0, 0x35 },
+ .result = { 0x2e, 0xae, 0x5e, 0xc3, 0xdd, 0x49, 0x4e, 0x9f,
+ 0x2d, 0x37, 0xd2, 0x58, 0xf8, 0x73, 0xa8, 0xe6,
+ 0xe9, 0xd0, 0xdb, 0xd1, 0xe3, 0x83, 0xef, 0x64,
+ 0xd9, 0x8b, 0xb9, 0x1b, 0x3e, 0x0b, 0xe0, 0x35 },
+ .valid = true
+ }
+};
+
+bool __init curve25519_selftest(void)
+{
+ bool success = true, ret, ret2;
+ size_t i = 0, j;
+ u8 in[CURVE25519_KEY_SIZE];
+ u8 out[CURVE25519_KEY_SIZE], out2[CURVE25519_KEY_SIZE],
+ out3[CURVE25519_KEY_SIZE];
+
+ for (i = 0; i < ARRAY_SIZE(curve25519_test_vectors); ++i) {
+ memset(out, 0, CURVE25519_KEY_SIZE);
+ ret = curve25519(out, curve25519_test_vectors[i].private,
+ curve25519_test_vectors[i].public);
+ if (ret != curve25519_test_vectors[i].valid ||
+ memcmp(out, curve25519_test_vectors[i].result,
+ CURVE25519_KEY_SIZE)) {
+ pr_err("curve25519 self-test %zu: FAIL\n", i + 1);
+ success = false;
+ }
+ }
+
+ for (i = 0; i < 5; ++i) {
+ get_random_bytes(in, sizeof(in));
+ ret = curve25519_generate_public(out, in);
+ ret2 = curve25519(out2, in, (u8[CURVE25519_KEY_SIZE]){ 9 });
+ curve25519_generic(out3, in, (u8[CURVE25519_KEY_SIZE]){ 9 });
+ if (ret != ret2 ||
+ memcmp(out, out2, CURVE25519_KEY_SIZE) ||
+ memcmp(out, out3, CURVE25519_KEY_SIZE)) {
+ pr_err("curve25519 basepoint self-test %zu: FAIL: input - 0x",
+ i + 1);
+ for (j = CURVE25519_KEY_SIZE; j-- > 0;)
+ printk(KERN_CONT "%02x", in[j]);
+ printk(KERN_CONT "\n");
+ success = false;
+ }
+ }
+
+ return success;
+}
diff --git a/lib/crypto/curve25519.c b/lib/crypto/curve25519.c
new file mode 100644
index 000000000000..064b352c6907
--- /dev/null
+++ b/lib/crypto/curve25519.c
@@ -0,0 +1,33 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/*
+ * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ *
+ * This is an implementation of the Curve25519 ECDH algorithm, using either
+ * a 32-bit implementation or a 64-bit implementation with 128-bit integers,
+ * depending on what is supported by the target compiler.
+ *
+ * Information: https://cr.yp.to/ecdh.html
+ */
+
+#include <crypto/curve25519.h>
+#include <linux/module.h>
+#include <linux/init.h>
+
+static int __init curve25519_init(void)
+{
+ if (!IS_ENABLED(CONFIG_CRYPTO_MANAGER_DISABLE_TESTS) &&
+ WARN_ON(!curve25519_selftest()))
+ return -ENODEV;
+ return 0;
+}
+
+static void __exit curve25519_exit(void)
+{
+}
+
+module_init(curve25519_init);
+module_exit(curve25519_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Curve25519 scalar multiplication");
+MODULE_AUTHOR("Jason A. Donenfeld <Jason@zx2c4.com>");
diff --git a/lib/crypto/des.c b/lib/crypto/des.c
new file mode 100644
index 000000000000..ef5bb8822aba
--- /dev/null
+++ b/lib/crypto/des.c
@@ -0,0 +1,902 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Cryptographic API.
+ *
+ * DES & Triple DES EDE Cipher Algorithms.
+ *
+ * Copyright (c) 2005 Dag Arne Osvik <da@osvik.no>
+ */
+
+#include <linux/bitops.h>
+#include <linux/compiler.h>
+#include <linux/crypto.h>
+#include <linux/errno.h>
+#include <linux/fips.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/types.h>
+
+#include <asm/unaligned.h>
+
+#include <crypto/des.h>
+#include <crypto/internal/des.h>
+
+#define ROL(x, r) ((x) = rol32((x), (r)))
+#define ROR(x, r) ((x) = ror32((x), (r)))
+
+/* Lookup tables for key expansion */
+
+static const u8 pc1[256] = {
+ 0x00, 0x00, 0x40, 0x04, 0x10, 0x10, 0x50, 0x14,
+ 0x04, 0x40, 0x44, 0x44, 0x14, 0x50, 0x54, 0x54,
+ 0x02, 0x02, 0x42, 0x06, 0x12, 0x12, 0x52, 0x16,
+ 0x06, 0x42, 0x46, 0x46, 0x16, 0x52, 0x56, 0x56,
+ 0x80, 0x08, 0xc0, 0x0c, 0x90, 0x18, 0xd0, 0x1c,
+ 0x84, 0x48, 0xc4, 0x4c, 0x94, 0x58, 0xd4, 0x5c,
+ 0x82, 0x0a, 0xc2, 0x0e, 0x92, 0x1a, 0xd2, 0x1e,
+ 0x86, 0x4a, 0xc6, 0x4e, 0x96, 0x5a, 0xd6, 0x5e,
+ 0x20, 0x20, 0x60, 0x24, 0x30, 0x30, 0x70, 0x34,
+ 0x24, 0x60, 0x64, 0x64, 0x34, 0x70, 0x74, 0x74,
+ 0x22, 0x22, 0x62, 0x26, 0x32, 0x32, 0x72, 0x36,
+ 0x26, 0x62, 0x66, 0x66, 0x36, 0x72, 0x76, 0x76,
+ 0xa0, 0x28, 0xe0, 0x2c, 0xb0, 0x38, 0xf0, 0x3c,
+ 0xa4, 0x68, 0xe4, 0x6c, 0xb4, 0x78, 0xf4, 0x7c,
+ 0xa2, 0x2a, 0xe2, 0x2e, 0xb2, 0x3a, 0xf2, 0x3e,
+ 0xa6, 0x6a, 0xe6, 0x6e, 0xb6, 0x7a, 0xf6, 0x7e,
+ 0x08, 0x80, 0x48, 0x84, 0x18, 0x90, 0x58, 0x94,
+ 0x0c, 0xc0, 0x4c, 0xc4, 0x1c, 0xd0, 0x5c, 0xd4,
+ 0x0a, 0x82, 0x4a, 0x86, 0x1a, 0x92, 0x5a, 0x96,
+ 0x0e, 0xc2, 0x4e, 0xc6, 0x1e, 0xd2, 0x5e, 0xd6,
+ 0x88, 0x88, 0xc8, 0x8c, 0x98, 0x98, 0xd8, 0x9c,
+ 0x8c, 0xc8, 0xcc, 0xcc, 0x9c, 0xd8, 0xdc, 0xdc,
+ 0x8a, 0x8a, 0xca, 0x8e, 0x9a, 0x9a, 0xda, 0x9e,
+ 0x8e, 0xca, 0xce, 0xce, 0x9e, 0xda, 0xde, 0xde,
+ 0x28, 0xa0, 0x68, 0xa4, 0x38, 0xb0, 0x78, 0xb4,
+ 0x2c, 0xe0, 0x6c, 0xe4, 0x3c, 0xf0, 0x7c, 0xf4,
+ 0x2a, 0xa2, 0x6a, 0xa6, 0x3a, 0xb2, 0x7a, 0xb6,
+ 0x2e, 0xe2, 0x6e, 0xe6, 0x3e, 0xf2, 0x7e, 0xf6,
+ 0xa8, 0xa8, 0xe8, 0xac, 0xb8, 0xb8, 0xf8, 0xbc,
+ 0xac, 0xe8, 0xec, 0xec, 0xbc, 0xf8, 0xfc, 0xfc,
+ 0xaa, 0xaa, 0xea, 0xae, 0xba, 0xba, 0xfa, 0xbe,
+ 0xae, 0xea, 0xee, 0xee, 0xbe, 0xfa, 0xfe, 0xfe
+};
+
+static const u8 rs[256] = {
+ 0x00, 0x00, 0x80, 0x80, 0x02, 0x02, 0x82, 0x82,
+ 0x04, 0x04, 0x84, 0x84, 0x06, 0x06, 0x86, 0x86,
+ 0x08, 0x08, 0x88, 0x88, 0x0a, 0x0a, 0x8a, 0x8a,
+ 0x0c, 0x0c, 0x8c, 0x8c, 0x0e, 0x0e, 0x8e, 0x8e,
+ 0x10, 0x10, 0x90, 0x90, 0x12, 0x12, 0x92, 0x92,
+ 0x14, 0x14, 0x94, 0x94, 0x16, 0x16, 0x96, 0x96,
+ 0x18, 0x18, 0x98, 0x98, 0x1a, 0x1a, 0x9a, 0x9a,
+ 0x1c, 0x1c, 0x9c, 0x9c, 0x1e, 0x1e, 0x9e, 0x9e,
+ 0x20, 0x20, 0xa0, 0xa0, 0x22, 0x22, 0xa2, 0xa2,
+ 0x24, 0x24, 0xa4, 0xa4, 0x26, 0x26, 0xa6, 0xa6,
+ 0x28, 0x28, 0xa8, 0xa8, 0x2a, 0x2a, 0xaa, 0xaa,
+ 0x2c, 0x2c, 0xac, 0xac, 0x2e, 0x2e, 0xae, 0xae,
+ 0x30, 0x30, 0xb0, 0xb0, 0x32, 0x32, 0xb2, 0xb2,
+ 0x34, 0x34, 0xb4, 0xb4, 0x36, 0x36, 0xb6, 0xb6,
+ 0x38, 0x38, 0xb8, 0xb8, 0x3a, 0x3a, 0xba, 0xba,
+ 0x3c, 0x3c, 0xbc, 0xbc, 0x3e, 0x3e, 0xbe, 0xbe,
+ 0x40, 0x40, 0xc0, 0xc0, 0x42, 0x42, 0xc2, 0xc2,
+ 0x44, 0x44, 0xc4, 0xc4, 0x46, 0x46, 0xc6, 0xc6,
+ 0x48, 0x48, 0xc8, 0xc8, 0x4a, 0x4a, 0xca, 0xca,
+ 0x4c, 0x4c, 0xcc, 0xcc, 0x4e, 0x4e, 0xce, 0xce,
+ 0x50, 0x50, 0xd0, 0xd0, 0x52, 0x52, 0xd2, 0xd2,
+ 0x54, 0x54, 0xd4, 0xd4, 0x56, 0x56, 0xd6, 0xd6,
+ 0x58, 0x58, 0xd8, 0xd8, 0x5a, 0x5a, 0xda, 0xda,
+ 0x5c, 0x5c, 0xdc, 0xdc, 0x5e, 0x5e, 0xde, 0xde,
+ 0x60, 0x60, 0xe0, 0xe0, 0x62, 0x62, 0xe2, 0xe2,
+ 0x64, 0x64, 0xe4, 0xe4, 0x66, 0x66, 0xe6, 0xe6,
+ 0x68, 0x68, 0xe8, 0xe8, 0x6a, 0x6a, 0xea, 0xea,
+ 0x6c, 0x6c, 0xec, 0xec, 0x6e, 0x6e, 0xee, 0xee,
+ 0x70, 0x70, 0xf0, 0xf0, 0x72, 0x72, 0xf2, 0xf2,
+ 0x74, 0x74, 0xf4, 0xf4, 0x76, 0x76, 0xf6, 0xf6,
+ 0x78, 0x78, 0xf8, 0xf8, 0x7a, 0x7a, 0xfa, 0xfa,
+ 0x7c, 0x7c, 0xfc, 0xfc, 0x7e, 0x7e, 0xfe, 0xfe
+};
+
+static const u32 pc2[1024] = {
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00040000, 0x00000000, 0x04000000, 0x00100000,
+ 0x00400000, 0x00000008, 0x00000800, 0x40000000,
+ 0x00440000, 0x00000008, 0x04000800, 0x40100000,
+ 0x00000400, 0x00000020, 0x08000000, 0x00000100,
+ 0x00040400, 0x00000020, 0x0c000000, 0x00100100,
+ 0x00400400, 0x00000028, 0x08000800, 0x40000100,
+ 0x00440400, 0x00000028, 0x0c000800, 0x40100100,
+ 0x80000000, 0x00000010, 0x00000000, 0x00800000,
+ 0x80040000, 0x00000010, 0x04000000, 0x00900000,
+ 0x80400000, 0x00000018, 0x00000800, 0x40800000,
+ 0x80440000, 0x00000018, 0x04000800, 0x40900000,
+ 0x80000400, 0x00000030, 0x08000000, 0x00800100,
+ 0x80040400, 0x00000030, 0x0c000000, 0x00900100,
+ 0x80400400, 0x00000038, 0x08000800, 0x40800100,
+ 0x80440400, 0x00000038, 0x0c000800, 0x40900100,
+ 0x10000000, 0x00000000, 0x00200000, 0x00001000,
+ 0x10040000, 0x00000000, 0x04200000, 0x00101000,
+ 0x10400000, 0x00000008, 0x00200800, 0x40001000,
+ 0x10440000, 0x00000008, 0x04200800, 0x40101000,
+ 0x10000400, 0x00000020, 0x08200000, 0x00001100,
+ 0x10040400, 0x00000020, 0x0c200000, 0x00101100,
+ 0x10400400, 0x00000028, 0x08200800, 0x40001100,
+ 0x10440400, 0x00000028, 0x0c200800, 0x40101100,
+ 0x90000000, 0x00000010, 0x00200000, 0x00801000,
+ 0x90040000, 0x00000010, 0x04200000, 0x00901000,
+ 0x90400000, 0x00000018, 0x00200800, 0x40801000,
+ 0x90440000, 0x00000018, 0x04200800, 0x40901000,
+ 0x90000400, 0x00000030, 0x08200000, 0x00801100,
+ 0x90040400, 0x00000030, 0x0c200000, 0x00901100,
+ 0x90400400, 0x00000038, 0x08200800, 0x40801100,
+ 0x90440400, 0x00000038, 0x0c200800, 0x40901100,
+ 0x00000200, 0x00080000, 0x00000000, 0x00000004,
+ 0x00040200, 0x00080000, 0x04000000, 0x00100004,
+ 0x00400200, 0x00080008, 0x00000800, 0x40000004,
+ 0x00440200, 0x00080008, 0x04000800, 0x40100004,
+ 0x00000600, 0x00080020, 0x08000000, 0x00000104,
+ 0x00040600, 0x00080020, 0x0c000000, 0x00100104,
+ 0x00400600, 0x00080028, 0x08000800, 0x40000104,
+ 0x00440600, 0x00080028, 0x0c000800, 0x40100104,
+ 0x80000200, 0x00080010, 0x00000000, 0x00800004,
+ 0x80040200, 0x00080010, 0x04000000, 0x00900004,
+ 0x80400200, 0x00080018, 0x00000800, 0x40800004,
+ 0x80440200, 0x00080018, 0x04000800, 0x40900004,
+ 0x80000600, 0x00080030, 0x08000000, 0x00800104,
+ 0x80040600, 0x00080030, 0x0c000000, 0x00900104,
+ 0x80400600, 0x00080038, 0x08000800, 0x40800104,
+ 0x80440600, 0x00080038, 0x0c000800, 0x40900104,
+ 0x10000200, 0x00080000, 0x00200000, 0x00001004,
+ 0x10040200, 0x00080000, 0x04200000, 0x00101004,
+ 0x10400200, 0x00080008, 0x00200800, 0x40001004,
+ 0x10440200, 0x00080008, 0x04200800, 0x40101004,
+ 0x10000600, 0x00080020, 0x08200000, 0x00001104,
+ 0x10040600, 0x00080020, 0x0c200000, 0x00101104,
+ 0x10400600, 0x00080028, 0x08200800, 0x40001104,
+ 0x10440600, 0x00080028, 0x0c200800, 0x40101104,
+ 0x90000200, 0x00080010, 0x00200000, 0x00801004,
+ 0x90040200, 0x00080010, 0x04200000, 0x00901004,
+ 0x90400200, 0x00080018, 0x00200800, 0x40801004,
+ 0x90440200, 0x00080018, 0x04200800, 0x40901004,
+ 0x90000600, 0x00080030, 0x08200000, 0x00801104,
+ 0x90040600, 0x00080030, 0x0c200000, 0x00901104,
+ 0x90400600, 0x00080038, 0x08200800, 0x40801104,
+ 0x90440600, 0x00080038, 0x0c200800, 0x40901104,
+ 0x00000002, 0x00002000, 0x20000000, 0x00000001,
+ 0x00040002, 0x00002000, 0x24000000, 0x00100001,
+ 0x00400002, 0x00002008, 0x20000800, 0x40000001,
+ 0x00440002, 0x00002008, 0x24000800, 0x40100001,
+ 0x00000402, 0x00002020, 0x28000000, 0x00000101,
+ 0x00040402, 0x00002020, 0x2c000000, 0x00100101,
+ 0x00400402, 0x00002028, 0x28000800, 0x40000101,
+ 0x00440402, 0x00002028, 0x2c000800, 0x40100101,
+ 0x80000002, 0x00002010, 0x20000000, 0x00800001,
+ 0x80040002, 0x00002010, 0x24000000, 0x00900001,
+ 0x80400002, 0x00002018, 0x20000800, 0x40800001,
+ 0x80440002, 0x00002018, 0x24000800, 0x40900001,
+ 0x80000402, 0x00002030, 0x28000000, 0x00800101,
+ 0x80040402, 0x00002030, 0x2c000000, 0x00900101,
+ 0x80400402, 0x00002038, 0x28000800, 0x40800101,
+ 0x80440402, 0x00002038, 0x2c000800, 0x40900101,
+ 0x10000002, 0x00002000, 0x20200000, 0x00001001,
+ 0x10040002, 0x00002000, 0x24200000, 0x00101001,
+ 0x10400002, 0x00002008, 0x20200800, 0x40001001,
+ 0x10440002, 0x00002008, 0x24200800, 0x40101001,
+ 0x10000402, 0x00002020, 0x28200000, 0x00001101,
+ 0x10040402, 0x00002020, 0x2c200000, 0x00101101,
+ 0x10400402, 0x00002028, 0x28200800, 0x40001101,
+ 0x10440402, 0x00002028, 0x2c200800, 0x40101101,
+ 0x90000002, 0x00002010, 0x20200000, 0x00801001,
+ 0x90040002, 0x00002010, 0x24200000, 0x00901001,
+ 0x90400002, 0x00002018, 0x20200800, 0x40801001,
+ 0x90440002, 0x00002018, 0x24200800, 0x40901001,
+ 0x90000402, 0x00002030, 0x28200000, 0x00801101,
+ 0x90040402, 0x00002030, 0x2c200000, 0x00901101,
+ 0x90400402, 0x00002038, 0x28200800, 0x40801101,
+ 0x90440402, 0x00002038, 0x2c200800, 0x40901101,
+ 0x00000202, 0x00082000, 0x20000000, 0x00000005,
+ 0x00040202, 0x00082000, 0x24000000, 0x00100005,
+ 0x00400202, 0x00082008, 0x20000800, 0x40000005,
+ 0x00440202, 0x00082008, 0x24000800, 0x40100005,
+ 0x00000602, 0x00082020, 0x28000000, 0x00000105,
+ 0x00040602, 0x00082020, 0x2c000000, 0x00100105,
+ 0x00400602, 0x00082028, 0x28000800, 0x40000105,
+ 0x00440602, 0x00082028, 0x2c000800, 0x40100105,
+ 0x80000202, 0x00082010, 0x20000000, 0x00800005,
+ 0x80040202, 0x00082010, 0x24000000, 0x00900005,
+ 0x80400202, 0x00082018, 0x20000800, 0x40800005,
+ 0x80440202, 0x00082018, 0x24000800, 0x40900005,
+ 0x80000602, 0x00082030, 0x28000000, 0x00800105,
+ 0x80040602, 0x00082030, 0x2c000000, 0x00900105,
+ 0x80400602, 0x00082038, 0x28000800, 0x40800105,
+ 0x80440602, 0x00082038, 0x2c000800, 0x40900105,
+ 0x10000202, 0x00082000, 0x20200000, 0x00001005,
+ 0x10040202, 0x00082000, 0x24200000, 0x00101005,
+ 0x10400202, 0x00082008, 0x20200800, 0x40001005,
+ 0x10440202, 0x00082008, 0x24200800, 0x40101005,
+ 0x10000602, 0x00082020, 0x28200000, 0x00001105,
+ 0x10040602, 0x00082020, 0x2c200000, 0x00101105,
+ 0x10400602, 0x00082028, 0x28200800, 0x40001105,
+ 0x10440602, 0x00082028, 0x2c200800, 0x40101105,
+ 0x90000202, 0x00082010, 0x20200000, 0x00801005,
+ 0x90040202, 0x00082010, 0x24200000, 0x00901005,
+ 0x90400202, 0x00082018, 0x20200800, 0x40801005,
+ 0x90440202, 0x00082018, 0x24200800, 0x40901005,
+ 0x90000602, 0x00082030, 0x28200000, 0x00801105,
+ 0x90040602, 0x00082030, 0x2c200000, 0x00901105,
+ 0x90400602, 0x00082038, 0x28200800, 0x40801105,
+ 0x90440602, 0x00082038, 0x2c200800, 0x40901105,
+
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000008, 0x00080000, 0x10000000,
+ 0x02000000, 0x00000000, 0x00000080, 0x00001000,
+ 0x02000000, 0x00000008, 0x00080080, 0x10001000,
+ 0x00004000, 0x00000000, 0x00000040, 0x00040000,
+ 0x00004000, 0x00000008, 0x00080040, 0x10040000,
+ 0x02004000, 0x00000000, 0x000000c0, 0x00041000,
+ 0x02004000, 0x00000008, 0x000800c0, 0x10041000,
+ 0x00020000, 0x00008000, 0x08000000, 0x00200000,
+ 0x00020000, 0x00008008, 0x08080000, 0x10200000,
+ 0x02020000, 0x00008000, 0x08000080, 0x00201000,
+ 0x02020000, 0x00008008, 0x08080080, 0x10201000,
+ 0x00024000, 0x00008000, 0x08000040, 0x00240000,
+ 0x00024000, 0x00008008, 0x08080040, 0x10240000,
+ 0x02024000, 0x00008000, 0x080000c0, 0x00241000,
+ 0x02024000, 0x00008008, 0x080800c0, 0x10241000,
+ 0x00000000, 0x01000000, 0x00002000, 0x00000020,
+ 0x00000000, 0x01000008, 0x00082000, 0x10000020,
+ 0x02000000, 0x01000000, 0x00002080, 0x00001020,
+ 0x02000000, 0x01000008, 0x00082080, 0x10001020,
+ 0x00004000, 0x01000000, 0x00002040, 0x00040020,
+ 0x00004000, 0x01000008, 0x00082040, 0x10040020,
+ 0x02004000, 0x01000000, 0x000020c0, 0x00041020,
+ 0x02004000, 0x01000008, 0x000820c0, 0x10041020,
+ 0x00020000, 0x01008000, 0x08002000, 0x00200020,
+ 0x00020000, 0x01008008, 0x08082000, 0x10200020,
+ 0x02020000, 0x01008000, 0x08002080, 0x00201020,
+ 0x02020000, 0x01008008, 0x08082080, 0x10201020,
+ 0x00024000, 0x01008000, 0x08002040, 0x00240020,
+ 0x00024000, 0x01008008, 0x08082040, 0x10240020,
+ 0x02024000, 0x01008000, 0x080020c0, 0x00241020,
+ 0x02024000, 0x01008008, 0x080820c0, 0x10241020,
+ 0x00000400, 0x04000000, 0x00100000, 0x00000004,
+ 0x00000400, 0x04000008, 0x00180000, 0x10000004,
+ 0x02000400, 0x04000000, 0x00100080, 0x00001004,
+ 0x02000400, 0x04000008, 0x00180080, 0x10001004,
+ 0x00004400, 0x04000000, 0x00100040, 0x00040004,
+ 0x00004400, 0x04000008, 0x00180040, 0x10040004,
+ 0x02004400, 0x04000000, 0x001000c0, 0x00041004,
+ 0x02004400, 0x04000008, 0x001800c0, 0x10041004,
+ 0x00020400, 0x04008000, 0x08100000, 0x00200004,
+ 0x00020400, 0x04008008, 0x08180000, 0x10200004,
+ 0x02020400, 0x04008000, 0x08100080, 0x00201004,
+ 0x02020400, 0x04008008, 0x08180080, 0x10201004,
+ 0x00024400, 0x04008000, 0x08100040, 0x00240004,
+ 0x00024400, 0x04008008, 0x08180040, 0x10240004,
+ 0x02024400, 0x04008000, 0x081000c0, 0x00241004,
+ 0x02024400, 0x04008008, 0x081800c0, 0x10241004,
+ 0x00000400, 0x05000000, 0x00102000, 0x00000024,
+ 0x00000400, 0x05000008, 0x00182000, 0x10000024,
+ 0x02000400, 0x05000000, 0x00102080, 0x00001024,
+ 0x02000400, 0x05000008, 0x00182080, 0x10001024,
+ 0x00004400, 0x05000000, 0x00102040, 0x00040024,
+ 0x00004400, 0x05000008, 0x00182040, 0x10040024,
+ 0x02004400, 0x05000000, 0x001020c0, 0x00041024,
+ 0x02004400, 0x05000008, 0x001820c0, 0x10041024,
+ 0x00020400, 0x05008000, 0x08102000, 0x00200024,
+ 0x00020400, 0x05008008, 0x08182000, 0x10200024,
+ 0x02020400, 0x05008000, 0x08102080, 0x00201024,
+ 0x02020400, 0x05008008, 0x08182080, 0x10201024,
+ 0x00024400, 0x05008000, 0x08102040, 0x00240024,
+ 0x00024400, 0x05008008, 0x08182040, 0x10240024,
+ 0x02024400, 0x05008000, 0x081020c0, 0x00241024,
+ 0x02024400, 0x05008008, 0x081820c0, 0x10241024,
+ 0x00000800, 0x00010000, 0x20000000, 0x00000010,
+ 0x00000800, 0x00010008, 0x20080000, 0x10000010,
+ 0x02000800, 0x00010000, 0x20000080, 0x00001010,
+ 0x02000800, 0x00010008, 0x20080080, 0x10001010,
+ 0x00004800, 0x00010000, 0x20000040, 0x00040010,
+ 0x00004800, 0x00010008, 0x20080040, 0x10040010,
+ 0x02004800, 0x00010000, 0x200000c0, 0x00041010,
+ 0x02004800, 0x00010008, 0x200800c0, 0x10041010,
+ 0x00020800, 0x00018000, 0x28000000, 0x00200010,
+ 0x00020800, 0x00018008, 0x28080000, 0x10200010,
+ 0x02020800, 0x00018000, 0x28000080, 0x00201010,
+ 0x02020800, 0x00018008, 0x28080080, 0x10201010,
+ 0x00024800, 0x00018000, 0x28000040, 0x00240010,
+ 0x00024800, 0x00018008, 0x28080040, 0x10240010,
+ 0x02024800, 0x00018000, 0x280000c0, 0x00241010,
+ 0x02024800, 0x00018008, 0x280800c0, 0x10241010,
+ 0x00000800, 0x01010000, 0x20002000, 0x00000030,
+ 0x00000800, 0x01010008, 0x20082000, 0x10000030,
+ 0x02000800, 0x01010000, 0x20002080, 0x00001030,
+ 0x02000800, 0x01010008, 0x20082080, 0x10001030,
+ 0x00004800, 0x01010000, 0x20002040, 0x00040030,
+ 0x00004800, 0x01010008, 0x20082040, 0x10040030,
+ 0x02004800, 0x01010000, 0x200020c0, 0x00041030,
+ 0x02004800, 0x01010008, 0x200820c0, 0x10041030,
+ 0x00020800, 0x01018000, 0x28002000, 0x00200030,
+ 0x00020800, 0x01018008, 0x28082000, 0x10200030,
+ 0x02020800, 0x01018000, 0x28002080, 0x00201030,
+ 0x02020800, 0x01018008, 0x28082080, 0x10201030,
+ 0x00024800, 0x01018000, 0x28002040, 0x00240030,
+ 0x00024800, 0x01018008, 0x28082040, 0x10240030,
+ 0x02024800, 0x01018000, 0x280020c0, 0x00241030,
+ 0x02024800, 0x01018008, 0x280820c0, 0x10241030,
+ 0x00000c00, 0x04010000, 0x20100000, 0x00000014,
+ 0x00000c00, 0x04010008, 0x20180000, 0x10000014,
+ 0x02000c00, 0x04010000, 0x20100080, 0x00001014,
+ 0x02000c00, 0x04010008, 0x20180080, 0x10001014,
+ 0x00004c00, 0x04010000, 0x20100040, 0x00040014,
+ 0x00004c00, 0x04010008, 0x20180040, 0x10040014,
+ 0x02004c00, 0x04010000, 0x201000c0, 0x00041014,
+ 0x02004c00, 0x04010008, 0x201800c0, 0x10041014,
+ 0x00020c00, 0x04018000, 0x28100000, 0x00200014,
+ 0x00020c00, 0x04018008, 0x28180000, 0x10200014,
+ 0x02020c00, 0x04018000, 0x28100080, 0x00201014,
+ 0x02020c00, 0x04018008, 0x28180080, 0x10201014,
+ 0x00024c00, 0x04018000, 0x28100040, 0x00240014,
+ 0x00024c00, 0x04018008, 0x28180040, 0x10240014,
+ 0x02024c00, 0x04018000, 0x281000c0, 0x00241014,
+ 0x02024c00, 0x04018008, 0x281800c0, 0x10241014,
+ 0x00000c00, 0x05010000, 0x20102000, 0x00000034,
+ 0x00000c00, 0x05010008, 0x20182000, 0x10000034,
+ 0x02000c00, 0x05010000, 0x20102080, 0x00001034,
+ 0x02000c00, 0x05010008, 0x20182080, 0x10001034,
+ 0x00004c00, 0x05010000, 0x20102040, 0x00040034,
+ 0x00004c00, 0x05010008, 0x20182040, 0x10040034,
+ 0x02004c00, 0x05010000, 0x201020c0, 0x00041034,
+ 0x02004c00, 0x05010008, 0x201820c0, 0x10041034,
+ 0x00020c00, 0x05018000, 0x28102000, 0x00200034,
+ 0x00020c00, 0x05018008, 0x28182000, 0x10200034,
+ 0x02020c00, 0x05018000, 0x28102080, 0x00201034,
+ 0x02020c00, 0x05018008, 0x28182080, 0x10201034,
+ 0x00024c00, 0x05018000, 0x28102040, 0x00240034,
+ 0x00024c00, 0x05018008, 0x28182040, 0x10240034,
+ 0x02024c00, 0x05018000, 0x281020c0, 0x00241034,
+ 0x02024c00, 0x05018008, 0x281820c0, 0x10241034
+};
+
+/* S-box lookup tables */
+
+static const u32 S1[64] = {
+ 0x01010400, 0x00000000, 0x00010000, 0x01010404,
+ 0x01010004, 0x00010404, 0x00000004, 0x00010000,
+ 0x00000400, 0x01010400, 0x01010404, 0x00000400,
+ 0x01000404, 0x01010004, 0x01000000, 0x00000004,
+ 0x00000404, 0x01000400, 0x01000400, 0x00010400,
+ 0x00010400, 0x01010000, 0x01010000, 0x01000404,
+ 0x00010004, 0x01000004, 0x01000004, 0x00010004,
+ 0x00000000, 0x00000404, 0x00010404, 0x01000000,
+ 0x00010000, 0x01010404, 0x00000004, 0x01010000,
+ 0x01010400, 0x01000000, 0x01000000, 0x00000400,
+ 0x01010004, 0x00010000, 0x00010400, 0x01000004,
+ 0x00000400, 0x00000004, 0x01000404, 0x00010404,
+ 0x01010404, 0x00010004, 0x01010000, 0x01000404,
+ 0x01000004, 0x00000404, 0x00010404, 0x01010400,
+ 0x00000404, 0x01000400, 0x01000400, 0x00000000,
+ 0x00010004, 0x00010400, 0x00000000, 0x01010004
+};
+
+static const u32 S2[64] = {
+ 0x80108020, 0x80008000, 0x00008000, 0x00108020,
+ 0x00100000, 0x00000020, 0x80100020, 0x80008020,
+ 0x80000020, 0x80108020, 0x80108000, 0x80000000,
+ 0x80008000, 0x00100000, 0x00000020, 0x80100020,
+ 0x00108000, 0x00100020, 0x80008020, 0x00000000,
+ 0x80000000, 0x00008000, 0x00108020, 0x80100000,
+ 0x00100020, 0x80000020, 0x00000000, 0x00108000,
+ 0x00008020, 0x80108000, 0x80100000, 0x00008020,
+ 0x00000000, 0x00108020, 0x80100020, 0x00100000,
+ 0x80008020, 0x80100000, 0x80108000, 0x00008000,
+ 0x80100000, 0x80008000, 0x00000020, 0x80108020,
+ 0x00108020, 0x00000020, 0x00008000, 0x80000000,
+ 0x00008020, 0x80108000, 0x00100000, 0x80000020,
+ 0x00100020, 0x80008020, 0x80000020, 0x00100020,
+ 0x00108000, 0x00000000, 0x80008000, 0x00008020,
+ 0x80000000, 0x80100020, 0x80108020, 0x00108000
+};
+
+static const u32 S3[64] = {
+ 0x00000208, 0x08020200, 0x00000000, 0x08020008,
+ 0x08000200, 0x00000000, 0x00020208, 0x08000200,
+ 0x00020008, 0x08000008, 0x08000008, 0x00020000,
+ 0x08020208, 0x00020008, 0x08020000, 0x00000208,
+ 0x08000000, 0x00000008, 0x08020200, 0x00000200,
+ 0x00020200, 0x08020000, 0x08020008, 0x00020208,
+ 0x08000208, 0x00020200, 0x00020000, 0x08000208,
+ 0x00000008, 0x08020208, 0x00000200, 0x08000000,
+ 0x08020200, 0x08000000, 0x00020008, 0x00000208,
+ 0x00020000, 0x08020200, 0x08000200, 0x00000000,
+ 0x00000200, 0x00020008, 0x08020208, 0x08000200,
+ 0x08000008, 0x00000200, 0x00000000, 0x08020008,
+ 0x08000208, 0x00020000, 0x08000000, 0x08020208,
+ 0x00000008, 0x00020208, 0x00020200, 0x08000008,
+ 0x08020000, 0x08000208, 0x00000208, 0x08020000,
+ 0x00020208, 0x00000008, 0x08020008, 0x00020200
+};
+
+static const u32 S4[64] = {
+ 0x00802001, 0x00002081, 0x00002081, 0x00000080,
+ 0x00802080, 0x00800081, 0x00800001, 0x00002001,
+ 0x00000000, 0x00802000, 0x00802000, 0x00802081,
+ 0x00000081, 0x00000000, 0x00800080, 0x00800001,
+ 0x00000001, 0x00002000, 0x00800000, 0x00802001,
+ 0x00000080, 0x00800000, 0x00002001, 0x00002080,
+ 0x00800081, 0x00000001, 0x00002080, 0x00800080,
+ 0x00002000, 0x00802080, 0x00802081, 0x00000081,
+ 0x00800080, 0x00800001, 0x00802000, 0x00802081,
+ 0x00000081, 0x00000000, 0x00000000, 0x00802000,
+ 0x00002080, 0x00800080, 0x00800081, 0x00000001,
+ 0x00802001, 0x00002081, 0x00002081, 0x00000080,
+ 0x00802081, 0x00000081, 0x00000001, 0x00002000,
+ 0x00800001, 0x00002001, 0x00802080, 0x00800081,
+ 0x00002001, 0x00002080, 0x00800000, 0x00802001,
+ 0x00000080, 0x00800000, 0x00002000, 0x00802080
+};
+
+static const u32 S5[64] = {
+ 0x00000100, 0x02080100, 0x02080000, 0x42000100,
+ 0x00080000, 0x00000100, 0x40000000, 0x02080000,
+ 0x40080100, 0x00080000, 0x02000100, 0x40080100,
+ 0x42000100, 0x42080000, 0x00080100, 0x40000000,
+ 0x02000000, 0x40080000, 0x40080000, 0x00000000,
+ 0x40000100, 0x42080100, 0x42080100, 0x02000100,
+ 0x42080000, 0x40000100, 0x00000000, 0x42000000,
+ 0x02080100, 0x02000000, 0x42000000, 0x00080100,
+ 0x00080000, 0x42000100, 0x00000100, 0x02000000,
+ 0x40000000, 0x02080000, 0x42000100, 0x40080100,
+ 0x02000100, 0x40000000, 0x42080000, 0x02080100,
+ 0x40080100, 0x00000100, 0x02000000, 0x42080000,
+ 0x42080100, 0x00080100, 0x42000000, 0x42080100,
+ 0x02080000, 0x00000000, 0x40080000, 0x42000000,
+ 0x00080100, 0x02000100, 0x40000100, 0x00080000,
+ 0x00000000, 0x40080000, 0x02080100, 0x40000100
+};
+
+static const u32 S6[64] = {
+ 0x20000010, 0x20400000, 0x00004000, 0x20404010,
+ 0x20400000, 0x00000010, 0x20404010, 0x00400000,
+ 0x20004000, 0x00404010, 0x00400000, 0x20000010,
+ 0x00400010, 0x20004000, 0x20000000, 0x00004010,
+ 0x00000000, 0x00400010, 0x20004010, 0x00004000,
+ 0x00404000, 0x20004010, 0x00000010, 0x20400010,
+ 0x20400010, 0x00000000, 0x00404010, 0x20404000,
+ 0x00004010, 0x00404000, 0x20404000, 0x20000000,
+ 0x20004000, 0x00000010, 0x20400010, 0x00404000,
+ 0x20404010, 0x00400000, 0x00004010, 0x20000010,
+ 0x00400000, 0x20004000, 0x20000000, 0x00004010,
+ 0x20000010, 0x20404010, 0x00404000, 0x20400000,
+ 0x00404010, 0x20404000, 0x00000000, 0x20400010,
+ 0x00000010, 0x00004000, 0x20400000, 0x00404010,
+ 0x00004000, 0x00400010, 0x20004010, 0x00000000,
+ 0x20404000, 0x20000000, 0x00400010, 0x20004010
+};
+
+static const u32 S7[64] = {
+ 0x00200000, 0x04200002, 0x04000802, 0x00000000,
+ 0x00000800, 0x04000802, 0x00200802, 0x04200800,
+ 0x04200802, 0x00200000, 0x00000000, 0x04000002,
+ 0x00000002, 0x04000000, 0x04200002, 0x00000802,
+ 0x04000800, 0x00200802, 0x00200002, 0x04000800,
+ 0x04000002, 0x04200000, 0x04200800, 0x00200002,
+ 0x04200000, 0x00000800, 0x00000802, 0x04200802,
+ 0x00200800, 0x00000002, 0x04000000, 0x00200800,
+ 0x04000000, 0x00200800, 0x00200000, 0x04000802,
+ 0x04000802, 0x04200002, 0x04200002, 0x00000002,
+ 0x00200002, 0x04000000, 0x04000800, 0x00200000,
+ 0x04200800, 0x00000802, 0x00200802, 0x04200800,
+ 0x00000802, 0x04000002, 0x04200802, 0x04200000,
+ 0x00200800, 0x00000000, 0x00000002, 0x04200802,
+ 0x00000000, 0x00200802, 0x04200000, 0x00000800,
+ 0x04000002, 0x04000800, 0x00000800, 0x00200002
+};
+
+static const u32 S8[64] = {
+ 0x10001040, 0x00001000, 0x00040000, 0x10041040,
+ 0x10000000, 0x10001040, 0x00000040, 0x10000000,
+ 0x00040040, 0x10040000, 0x10041040, 0x00041000,
+ 0x10041000, 0x00041040, 0x00001000, 0x00000040,
+ 0x10040000, 0x10000040, 0x10001000, 0x00001040,
+ 0x00041000, 0x00040040, 0x10040040, 0x10041000,
+ 0x00001040, 0x00000000, 0x00000000, 0x10040040,
+ 0x10000040, 0x10001000, 0x00041040, 0x00040000,
+ 0x00041040, 0x00040000, 0x10041000, 0x00001000,
+ 0x00000040, 0x10040040, 0x00001000, 0x00041040,
+ 0x10001000, 0x00000040, 0x10000040, 0x10040000,
+ 0x10040040, 0x10000000, 0x00040000, 0x10001040,
+ 0x00000000, 0x10041040, 0x00040040, 0x10000040,
+ 0x10040000, 0x10001000, 0x10001040, 0x00000000,
+ 0x10041040, 0x00041000, 0x00041000, 0x00001040,
+ 0x00001040, 0x00040040, 0x10000000, 0x10041000
+};
+
+/* Encryption components: IP, FP, and round function */
+
+#define IP(L, R, T) \
+ ROL(R, 4); \
+ T = L; \
+ L ^= R; \
+ L &= 0xf0f0f0f0; \
+ R ^= L; \
+ L ^= T; \
+ ROL(R, 12); \
+ T = L; \
+ L ^= R; \
+ L &= 0xffff0000; \
+ R ^= L; \
+ L ^= T; \
+ ROR(R, 14); \
+ T = L; \
+ L ^= R; \
+ L &= 0xcccccccc; \
+ R ^= L; \
+ L ^= T; \
+ ROL(R, 6); \
+ T = L; \
+ L ^= R; \
+ L &= 0xff00ff00; \
+ R ^= L; \
+ L ^= T; \
+ ROR(R, 7); \
+ T = L; \
+ L ^= R; \
+ L &= 0xaaaaaaaa; \
+ R ^= L; \
+ L ^= T; \
+ ROL(L, 1);
+
+#define FP(L, R, T) \
+ ROR(L, 1); \
+ T = L; \
+ L ^= R; \
+ L &= 0xaaaaaaaa; \
+ R ^= L; \
+ L ^= T; \
+ ROL(R, 7); \
+ T = L; \
+ L ^= R; \
+ L &= 0xff00ff00; \
+ R ^= L; \
+ L ^= T; \
+ ROR(R, 6); \
+ T = L; \
+ L ^= R; \
+ L &= 0xcccccccc; \
+ R ^= L; \
+ L ^= T; \
+ ROL(R, 14); \
+ T = L; \
+ L ^= R; \
+ L &= 0xffff0000; \
+ R ^= L; \
+ L ^= T; \
+ ROR(R, 12); \
+ T = L; \
+ L ^= R; \
+ L &= 0xf0f0f0f0; \
+ R ^= L; \
+ L ^= T; \
+ ROR(R, 4);
+
+#define ROUND(L, R, A, B, K, d) \
+ B = K[0]; A = K[1]; K += d; \
+ B ^= R; A ^= R; \
+ B &= 0x3f3f3f3f; ROR(A, 4); \
+ L ^= S8[0xff & B]; A &= 0x3f3f3f3f; \
+ L ^= S6[0xff & (B >> 8)]; B >>= 16; \
+ L ^= S7[0xff & A]; \
+ L ^= S5[0xff & (A >> 8)]; A >>= 16; \
+ L ^= S4[0xff & B]; \
+ L ^= S2[0xff & (B >> 8)]; \
+ L ^= S3[0xff & A]; \
+ L ^= S1[0xff & (A >> 8)];
+
+/*
+ * PC2 lookup tables are organized as 2 consecutive sets of 4 interleaved
+ * tables of 128 elements. One set is for C_i and the other for D_i, while
+ * the 4 interleaved tables correspond to four 7-bit subsets of C_i or D_i.
+ *
+ * After PC1 each of the variables a,b,c,d contains a 7 bit subset of C_i
+ * or D_i in bits 7-1 (bit 0 being the least significant).
+ */
+
+#define T1(x) pt[2 * (x) + 0]
+#define T2(x) pt[2 * (x) + 1]
+#define T3(x) pt[2 * (x) + 2]
+#define T4(x) pt[2 * (x) + 3]
+
+#define DES_PC2(a, b, c, d) (T4(d) | T3(c) | T2(b) | T1(a))
+
+/*
+ * Encryption key expansion
+ *
+ * RFC2451: Weak key checks SHOULD be performed.
+ *
+ * FIPS 74:
+ *
+ * Keys having duals are keys which produce all zeros, all ones, or
+ * alternating zero-one patterns in the C and D registers after Permuted
+ * Choice 1 has operated on the key.
+ *
+ */
+static unsigned long des_ekey(u32 *pe, const u8 *k)
+{
+ /* K&R: long is at least 32 bits */
+ unsigned long a, b, c, d, w;
+ const u32 *pt = pc2;
+
+ d = k[4]; d &= 0x0e; d <<= 4; d |= k[0] & 0x1e; d = pc1[d];
+ c = k[5]; c &= 0x0e; c <<= 4; c |= k[1] & 0x1e; c = pc1[c];
+ b = k[6]; b &= 0x0e; b <<= 4; b |= k[2] & 0x1e; b = pc1[b];
+ a = k[7]; a &= 0x0e; a <<= 4; a |= k[3] & 0x1e; a = pc1[a];
+
+ pe[15 * 2 + 0] = DES_PC2(a, b, c, d); d = rs[d];
+ pe[14 * 2 + 0] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b];
+ pe[13 * 2 + 0] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d];
+ pe[12 * 2 + 0] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b];
+ pe[11 * 2 + 0] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d];
+ pe[10 * 2 + 0] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b];
+ pe[ 9 * 2 + 0] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d];
+ pe[ 8 * 2 + 0] = DES_PC2(d, a, b, c); c = rs[c];
+ pe[ 7 * 2 + 0] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a];
+ pe[ 6 * 2 + 0] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c];
+ pe[ 5 * 2 + 0] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a];
+ pe[ 4 * 2 + 0] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c];
+ pe[ 3 * 2 + 0] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a];
+ pe[ 2 * 2 + 0] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c];
+ pe[ 1 * 2 + 0] = DES_PC2(c, d, a, b); b = rs[b];
+ pe[ 0 * 2 + 0] = DES_PC2(b, c, d, a);
+
+ /* Check if first half is weak */
+ w = (a ^ c) | (b ^ d) | (rs[a] ^ c) | (b ^ rs[d]);
+
+ /* Skip to next table set */
+ pt += 512;
+
+ d = k[0]; d &= 0xe0; d >>= 4; d |= k[4] & 0xf0; d = pc1[d + 1];
+ c = k[1]; c &= 0xe0; c >>= 4; c |= k[5] & 0xf0; c = pc1[c + 1];
+ b = k[2]; b &= 0xe0; b >>= 4; b |= k[6] & 0xf0; b = pc1[b + 1];
+ a = k[3]; a &= 0xe0; a >>= 4; a |= k[7] & 0xf0; a = pc1[a + 1];
+
+ /* Check if second half is weak */
+ w |= (a ^ c) | (b ^ d) | (rs[a] ^ c) | (b ^ rs[d]);
+
+ pe[15 * 2 + 1] = DES_PC2(a, b, c, d); d = rs[d];
+ pe[14 * 2 + 1] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b];
+ pe[13 * 2 + 1] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d];
+ pe[12 * 2 + 1] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b];
+ pe[11 * 2 + 1] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d];
+ pe[10 * 2 + 1] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b];
+ pe[ 9 * 2 + 1] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d];
+ pe[ 8 * 2 + 1] = DES_PC2(d, a, b, c); c = rs[c];
+ pe[ 7 * 2 + 1] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a];
+ pe[ 6 * 2 + 1] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c];
+ pe[ 5 * 2 + 1] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a];
+ pe[ 4 * 2 + 1] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c];
+ pe[ 3 * 2 + 1] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a];
+ pe[ 2 * 2 + 1] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c];
+ pe[ 1 * 2 + 1] = DES_PC2(c, d, a, b); b = rs[b];
+ pe[ 0 * 2 + 1] = DES_PC2(b, c, d, a);
+
+ /* Fixup: 2413 5768 -> 1357 2468 */
+ for (d = 0; d < 16; ++d) {
+ a = pe[2 * d];
+ b = pe[2 * d + 1];
+ c = a ^ b;
+ c &= 0xffff0000;
+ a ^= c;
+ b ^= c;
+ ROL(b, 18);
+ pe[2 * d] = a;
+ pe[2 * d + 1] = b;
+ }
+
+ /* Zero if weak key */
+ return w;
+}
+
+int des_expand_key(struct des_ctx *ctx, const u8 *key, unsigned int keylen)
+{
+ if (keylen != DES_KEY_SIZE)
+ return -EINVAL;
+
+ return des_ekey(ctx->expkey, key) ? 0 : -ENOKEY;
+}
+EXPORT_SYMBOL_GPL(des_expand_key);
+
+/*
+ * Decryption key expansion
+ *
+ * No weak key checking is performed, as this is only used by triple DES
+ *
+ */
+static void dkey(u32 *pe, const u8 *k)
+{
+ /* K&R: long is at least 32 bits */
+ unsigned long a, b, c, d;
+ const u32 *pt = pc2;
+
+ d = k[4]; d &= 0x0e; d <<= 4; d |= k[0] & 0x1e; d = pc1[d];
+ c = k[5]; c &= 0x0e; c <<= 4; c |= k[1] & 0x1e; c = pc1[c];
+ b = k[6]; b &= 0x0e; b <<= 4; b |= k[2] & 0x1e; b = pc1[b];
+ a = k[7]; a &= 0x0e; a <<= 4; a |= k[3] & 0x1e; a = pc1[a];
+
+ pe[ 0 * 2] = DES_PC2(a, b, c, d); d = rs[d];
+ pe[ 1 * 2] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b];
+ pe[ 2 * 2] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d];
+ pe[ 3 * 2] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b];
+ pe[ 4 * 2] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d];
+ pe[ 5 * 2] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b];
+ pe[ 6 * 2] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d];
+ pe[ 7 * 2] = DES_PC2(d, a, b, c); c = rs[c];
+ pe[ 8 * 2] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a];
+ pe[ 9 * 2] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c];
+ pe[10 * 2] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a];
+ pe[11 * 2] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c];
+ pe[12 * 2] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a];
+ pe[13 * 2] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c];
+ pe[14 * 2] = DES_PC2(c, d, a, b); b = rs[b];
+ pe[15 * 2] = DES_PC2(b, c, d, a);
+
+ /* Skip to next table set */
+ pt += 512;
+
+ d = k[0]; d &= 0xe0; d >>= 4; d |= k[4] & 0xf0; d = pc1[d + 1];
+ c = k[1]; c &= 0xe0; c >>= 4; c |= k[5] & 0xf0; c = pc1[c + 1];
+ b = k[2]; b &= 0xe0; b >>= 4; b |= k[6] & 0xf0; b = pc1[b + 1];
+ a = k[3]; a &= 0xe0; a >>= 4; a |= k[7] & 0xf0; a = pc1[a + 1];
+
+ pe[ 0 * 2 + 1] = DES_PC2(a, b, c, d); d = rs[d];
+ pe[ 1 * 2 + 1] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b];
+ pe[ 2 * 2 + 1] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d];
+ pe[ 3 * 2 + 1] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b];
+ pe[ 4 * 2 + 1] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d];
+ pe[ 5 * 2 + 1] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b];
+ pe[ 6 * 2 + 1] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d];
+ pe[ 7 * 2 + 1] = DES_PC2(d, a, b, c); c = rs[c];
+ pe[ 8 * 2 + 1] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a];
+ pe[ 9 * 2 + 1] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c];
+ pe[10 * 2 + 1] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a];
+ pe[11 * 2 + 1] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c];
+ pe[12 * 2 + 1] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a];
+ pe[13 * 2 + 1] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c];
+ pe[14 * 2 + 1] = DES_PC2(c, d, a, b); b = rs[b];
+ pe[15 * 2 + 1] = DES_PC2(b, c, d, a);
+
+ /* Fixup: 2413 5768 -> 1357 2468 */
+ for (d = 0; d < 16; ++d) {
+ a = pe[2 * d];
+ b = pe[2 * d + 1];
+ c = a ^ b;
+ c &= 0xffff0000;
+ a ^= c;
+ b ^= c;
+ ROL(b, 18);
+ pe[2 * d] = a;
+ pe[2 * d + 1] = b;
+ }
+}
+
+void des_encrypt(const struct des_ctx *ctx, u8 *dst, const u8 *src)
+{
+ const u32 *K = ctx->expkey;
+ u32 L, R, A, B;
+ int i;
+
+ L = get_unaligned_le32(src);
+ R = get_unaligned_le32(src + 4);
+
+ IP(L, R, A);
+ for (i = 0; i < 8; i++) {
+ ROUND(L, R, A, B, K, 2);
+ ROUND(R, L, A, B, K, 2);
+ }
+ FP(R, L, A);
+
+ put_unaligned_le32(R, dst);
+ put_unaligned_le32(L, dst + 4);
+}
+EXPORT_SYMBOL_GPL(des_encrypt);
+
+void des_decrypt(const struct des_ctx *ctx, u8 *dst, const u8 *src)
+{
+ const u32 *K = ctx->expkey + DES_EXPKEY_WORDS - 2;
+ u32 L, R, A, B;
+ int i;
+
+ L = get_unaligned_le32(src);
+ R = get_unaligned_le32(src + 4);
+
+ IP(L, R, A);
+ for (i = 0; i < 8; i++) {
+ ROUND(L, R, A, B, K, -2);
+ ROUND(R, L, A, B, K, -2);
+ }
+ FP(R, L, A);
+
+ put_unaligned_le32(R, dst);
+ put_unaligned_le32(L, dst + 4);
+}
+EXPORT_SYMBOL_GPL(des_decrypt);
+
+int des3_ede_expand_key(struct des3_ede_ctx *ctx, const u8 *key,
+ unsigned int keylen)
+{
+ u32 *pe = ctx->expkey;
+ int err;
+
+ if (keylen != DES3_EDE_KEY_SIZE)
+ return -EINVAL;
+
+ err = des3_ede_verify_key(key, keylen, true);
+ if (err && err != -ENOKEY)
+ return err;
+
+ des_ekey(pe, key); pe += DES_EXPKEY_WORDS; key += DES_KEY_SIZE;
+ dkey(pe, key); pe += DES_EXPKEY_WORDS; key += DES_KEY_SIZE;
+ des_ekey(pe, key);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(des3_ede_expand_key);
+
+void des3_ede_encrypt(const struct des3_ede_ctx *dctx, u8 *dst, const u8 *src)
+{
+ const u32 *K = dctx->expkey;
+ u32 L, R, A, B;
+ int i;
+
+ L = get_unaligned_le32(src);
+ R = get_unaligned_le32(src + 4);
+
+ IP(L, R, A);
+ for (i = 0; i < 8; i++) {
+ ROUND(L, R, A, B, K, 2);
+ ROUND(R, L, A, B, K, 2);
+ }
+ for (i = 0; i < 8; i++) {
+ ROUND(R, L, A, B, K, 2);
+ ROUND(L, R, A, B, K, 2);
+ }
+ for (i = 0; i < 8; i++) {
+ ROUND(L, R, A, B, K, 2);
+ ROUND(R, L, A, B, K, 2);
+ }
+ FP(R, L, A);
+
+ put_unaligned_le32(R, dst);
+ put_unaligned_le32(L, dst + 4);
+}
+EXPORT_SYMBOL_GPL(des3_ede_encrypt);
+
+void des3_ede_decrypt(const struct des3_ede_ctx *dctx, u8 *dst, const u8 *src)
+{
+ const u32 *K = dctx->expkey + DES3_EDE_EXPKEY_WORDS - 2;
+ u32 L, R, A, B;
+ int i;
+
+ L = get_unaligned_le32(src);
+ R = get_unaligned_le32(src + 4);
+
+ IP(L, R, A);
+ for (i = 0; i < 8; i++) {
+ ROUND(L, R, A, B, K, -2);
+ ROUND(R, L, A, B, K, -2);
+ }
+ for (i = 0; i < 8; i++) {
+ ROUND(R, L, A, B, K, -2);
+ ROUND(L, R, A, B, K, -2);
+ }
+ for (i = 0; i < 8; i++) {
+ ROUND(L, R, A, B, K, -2);
+ ROUND(R, L, A, B, K, -2);
+ }
+ FP(R, L, A);
+
+ put_unaligned_le32(R, dst);
+ put_unaligned_le32(L, dst + 4);
+}
+EXPORT_SYMBOL_GPL(des3_ede_decrypt);
+
+MODULE_LICENSE("GPL");
diff --git a/lib/crypto/gf128mul.c b/lib/crypto/gf128mul.c
new file mode 100644
index 000000000000..8f8c45e0cdcf
--- /dev/null
+++ b/lib/crypto/gf128mul.c
@@ -0,0 +1,436 @@
+/* gf128mul.c - GF(2^128) multiplication functions
+ *
+ * Copyright (c) 2003, Dr Brian Gladman, Worcester, UK.
+ * Copyright (c) 2006, Rik Snel <rsnel@cube.dyndns.org>
+ *
+ * Based on Dr Brian Gladman's (GPL'd) work published at
+ * http://gladman.plushost.co.uk/oldsite/cryptography_technology/index.php
+ * See the original copyright notice below.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+/*
+ ---------------------------------------------------------------------------
+ Copyright (c) 2003, Dr Brian Gladman, Worcester, UK. All rights reserved.
+
+ LICENSE TERMS
+
+ The free distribution and use of this software in both source and binary
+ form is allowed (with or without changes) provided that:
+
+ 1. distributions of this source code include the above copyright
+ notice, this list of conditions and the following disclaimer;
+
+ 2. distributions in binary form include the above copyright
+ notice, this list of conditions and the following disclaimer
+ in the documentation and/or other associated materials;
+
+ 3. the copyright holder's name is not used to endorse products
+ built using this software without specific written permission.
+
+ ALTERNATIVELY, provided that this notice is retained in full, this product
+ may be distributed under the terms of the GNU General Public License (GPL),
+ in which case the provisions of the GPL apply INSTEAD OF those given above.
+
+ DISCLAIMER
+
+ This software is provided 'as is' with no explicit or implied warranties
+ in respect of its properties, including, but not limited to, correctness
+ and/or fitness for purpose.
+ ---------------------------------------------------------------------------
+ Issue 31/01/2006
+
+ This file provides fast multiplication in GF(2^128) as required by several
+ cryptographic authentication modes
+*/
+
+#include <crypto/gf128mul.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+
+#define gf128mul_dat(q) { \
+ q(0x00), q(0x01), q(0x02), q(0x03), q(0x04), q(0x05), q(0x06), q(0x07),\
+ q(0x08), q(0x09), q(0x0a), q(0x0b), q(0x0c), q(0x0d), q(0x0e), q(0x0f),\
+ q(0x10), q(0x11), q(0x12), q(0x13), q(0x14), q(0x15), q(0x16), q(0x17),\
+ q(0x18), q(0x19), q(0x1a), q(0x1b), q(0x1c), q(0x1d), q(0x1e), q(0x1f),\
+ q(0x20), q(0x21), q(0x22), q(0x23), q(0x24), q(0x25), q(0x26), q(0x27),\
+ q(0x28), q(0x29), q(0x2a), q(0x2b), q(0x2c), q(0x2d), q(0x2e), q(0x2f),\
+ q(0x30), q(0x31), q(0x32), q(0x33), q(0x34), q(0x35), q(0x36), q(0x37),\
+ q(0x38), q(0x39), q(0x3a), q(0x3b), q(0x3c), q(0x3d), q(0x3e), q(0x3f),\
+ q(0x40), q(0x41), q(0x42), q(0x43), q(0x44), q(0x45), q(0x46), q(0x47),\
+ q(0x48), q(0x49), q(0x4a), q(0x4b), q(0x4c), q(0x4d), q(0x4e), q(0x4f),\
+ q(0x50), q(0x51), q(0x52), q(0x53), q(0x54), q(0x55), q(0x56), q(0x57),\
+ q(0x58), q(0x59), q(0x5a), q(0x5b), q(0x5c), q(0x5d), q(0x5e), q(0x5f),\
+ q(0x60), q(0x61), q(0x62), q(0x63), q(0x64), q(0x65), q(0x66), q(0x67),\
+ q(0x68), q(0x69), q(0x6a), q(0x6b), q(0x6c), q(0x6d), q(0x6e), q(0x6f),\
+ q(0x70), q(0x71), q(0x72), q(0x73), q(0x74), q(0x75), q(0x76), q(0x77),\
+ q(0x78), q(0x79), q(0x7a), q(0x7b), q(0x7c), q(0x7d), q(0x7e), q(0x7f),\
+ q(0x80), q(0x81), q(0x82), q(0x83), q(0x84), q(0x85), q(0x86), q(0x87),\
+ q(0x88), q(0x89), q(0x8a), q(0x8b), q(0x8c), q(0x8d), q(0x8e), q(0x8f),\
+ q(0x90), q(0x91), q(0x92), q(0x93), q(0x94), q(0x95), q(0x96), q(0x97),\
+ q(0x98), q(0x99), q(0x9a), q(0x9b), q(0x9c), q(0x9d), q(0x9e), q(0x9f),\
+ q(0xa0), q(0xa1), q(0xa2), q(0xa3), q(0xa4), q(0xa5), q(0xa6), q(0xa7),\
+ q(0xa8), q(0xa9), q(0xaa), q(0xab), q(0xac), q(0xad), q(0xae), q(0xaf),\
+ q(0xb0), q(0xb1), q(0xb2), q(0xb3), q(0xb4), q(0xb5), q(0xb6), q(0xb7),\
+ q(0xb8), q(0xb9), q(0xba), q(0xbb), q(0xbc), q(0xbd), q(0xbe), q(0xbf),\
+ q(0xc0), q(0xc1), q(0xc2), q(0xc3), q(0xc4), q(0xc5), q(0xc6), q(0xc7),\
+ q(0xc8), q(0xc9), q(0xca), q(0xcb), q(0xcc), q(0xcd), q(0xce), q(0xcf),\
+ q(0xd0), q(0xd1), q(0xd2), q(0xd3), q(0xd4), q(0xd5), q(0xd6), q(0xd7),\
+ q(0xd8), q(0xd9), q(0xda), q(0xdb), q(0xdc), q(0xdd), q(0xde), q(0xdf),\
+ q(0xe0), q(0xe1), q(0xe2), q(0xe3), q(0xe4), q(0xe5), q(0xe6), q(0xe7),\
+ q(0xe8), q(0xe9), q(0xea), q(0xeb), q(0xec), q(0xed), q(0xee), q(0xef),\
+ q(0xf0), q(0xf1), q(0xf2), q(0xf3), q(0xf4), q(0xf5), q(0xf6), q(0xf7),\
+ q(0xf8), q(0xf9), q(0xfa), q(0xfb), q(0xfc), q(0xfd), q(0xfe), q(0xff) \
+}
+
+/*
+ * Given a value i in 0..255 as the byte overflow when a field element
+ * in GF(2^128) is multiplied by x^8, the following macro returns the
+ * 16-bit value that must be XOR-ed into the low-degree end of the
+ * product to reduce it modulo the polynomial x^128 + x^7 + x^2 + x + 1.
+ *
+ * There are two versions of the macro, and hence two tables: one for
+ * the "be" convention where the highest-order bit is the coefficient of
+ * the highest-degree polynomial term, and one for the "le" convention
+ * where the highest-order bit is the coefficient of the lowest-degree
+ * polynomial term. In both cases the values are stored in CPU byte
+ * endianness such that the coefficients are ordered consistently across
+ * bytes, i.e. in the "be" table bits 15..0 of the stored value
+ * correspond to the coefficients of x^15..x^0, and in the "le" table
+ * bits 15..0 correspond to the coefficients of x^0..x^15.
+ *
+ * Therefore, provided that the appropriate byte endianness conversions
+ * are done by the multiplication functions (and these must be in place
+ * anyway to support both little endian and big endian CPUs), the "be"
+ * table can be used for multiplications of both "bbe" and "ble"
+ * elements, and the "le" table can be used for multiplications of both
+ * "lle" and "lbe" elements.
+ */
+
+#define xda_be(i) ( \
+ (i & 0x80 ? 0x4380 : 0) ^ (i & 0x40 ? 0x21c0 : 0) ^ \
+ (i & 0x20 ? 0x10e0 : 0) ^ (i & 0x10 ? 0x0870 : 0) ^ \
+ (i & 0x08 ? 0x0438 : 0) ^ (i & 0x04 ? 0x021c : 0) ^ \
+ (i & 0x02 ? 0x010e : 0) ^ (i & 0x01 ? 0x0087 : 0) \
+)
+
+#define xda_le(i) ( \
+ (i & 0x80 ? 0xe100 : 0) ^ (i & 0x40 ? 0x7080 : 0) ^ \
+ (i & 0x20 ? 0x3840 : 0) ^ (i & 0x10 ? 0x1c20 : 0) ^ \
+ (i & 0x08 ? 0x0e10 : 0) ^ (i & 0x04 ? 0x0708 : 0) ^ \
+ (i & 0x02 ? 0x0384 : 0) ^ (i & 0x01 ? 0x01c2 : 0) \
+)
+
+static const u16 gf128mul_table_le[256] = gf128mul_dat(xda_le);
+static const u16 gf128mul_table_be[256] = gf128mul_dat(xda_be);
+
+/*
+ * The following functions multiply a field element by x^8 in
+ * the polynomial field representation. They use 64-bit word operations
+ * to gain speed but compensate for machine endianness and hence work
+ * correctly on both styles of machine.
+ */
+
+static void gf128mul_x8_lle(be128 *x)
+{
+ u64 a = be64_to_cpu(x->a);
+ u64 b = be64_to_cpu(x->b);
+ u64 _tt = gf128mul_table_le[b & 0xff];
+
+ x->b = cpu_to_be64((b >> 8) | (a << 56));
+ x->a = cpu_to_be64((a >> 8) ^ (_tt << 48));
+}
+
+/* time invariant version of gf128mul_x8_lle */
+static void gf128mul_x8_lle_ti(be128 *x)
+{
+ u64 a = be64_to_cpu(x->a);
+ u64 b = be64_to_cpu(x->b);
+ u64 _tt = xda_le(b & 0xff); /* avoid table lookup */
+
+ x->b = cpu_to_be64((b >> 8) | (a << 56));
+ x->a = cpu_to_be64((a >> 8) ^ (_tt << 48));
+}
+
+static void gf128mul_x8_bbe(be128 *x)
+{
+ u64 a = be64_to_cpu(x->a);
+ u64 b = be64_to_cpu(x->b);
+ u64 _tt = gf128mul_table_be[a >> 56];
+
+ x->a = cpu_to_be64((a << 8) | (b >> 56));
+ x->b = cpu_to_be64((b << 8) ^ _tt);
+}
+
+void gf128mul_x8_ble(le128 *r, const le128 *x)
+{
+ u64 a = le64_to_cpu(x->a);
+ u64 b = le64_to_cpu(x->b);
+ u64 _tt = gf128mul_table_be[a >> 56];
+
+ r->a = cpu_to_le64((a << 8) | (b >> 56));
+ r->b = cpu_to_le64((b << 8) ^ _tt);
+}
+EXPORT_SYMBOL(gf128mul_x8_ble);
+
+void gf128mul_lle(be128 *r, const be128 *b)
+{
+ /*
+ * The p array should be aligned to twice the size of its element type,
+ * so that every even/odd pair is guaranteed to share a cacheline
+ * (assuming a cacheline size of 32 bytes or more, which is by far the
+ * most common). This ensures that each be128_xor() call in the loop
+ * takes the same amount of time regardless of the value of 'ch', which
+ * is derived from function parameter 'b', which is commonly used as a
+ * key, e.g., for GHASH. The odd array elements are all set to zero,
+ * making each be128_xor() a NOP if its associated bit in 'ch' is not
+ * set, and this is equivalent to calling be128_xor() conditionally.
+ * This approach aims to avoid leaking information about such keys
+ * through execution time variances.
+ *
+ * Unfortunately, __aligned(16) or higher does not work on x86 for
+ * variables on the stack so we need to perform the alignment by hand.
+ */
+ be128 array[16 + 3] = {};
+ be128 *p = PTR_ALIGN(&array[0], 2 * sizeof(be128));
+ int i;
+
+ p[0] = *r;
+ for (i = 0; i < 7; ++i)
+ gf128mul_x_lle(&p[2 * i + 2], &p[2 * i]);
+
+ memset(r, 0, sizeof(*r));
+ for (i = 0;;) {
+ u8 ch = ((u8 *)b)[15 - i];
+
+ be128_xor(r, r, &p[ 0 + !(ch & 0x80)]);
+ be128_xor(r, r, &p[ 2 + !(ch & 0x40)]);
+ be128_xor(r, r, &p[ 4 + !(ch & 0x20)]);
+ be128_xor(r, r, &p[ 6 + !(ch & 0x10)]);
+ be128_xor(r, r, &p[ 8 + !(ch & 0x08)]);
+ be128_xor(r, r, &p[10 + !(ch & 0x04)]);
+ be128_xor(r, r, &p[12 + !(ch & 0x02)]);
+ be128_xor(r, r, &p[14 + !(ch & 0x01)]);
+
+ if (++i >= 16)
+ break;
+
+ gf128mul_x8_lle_ti(r); /* use the time invariant version */
+ }
+}
+EXPORT_SYMBOL(gf128mul_lle);
+
+void gf128mul_bbe(be128 *r, const be128 *b)
+{
+ be128 p[8];
+ int i;
+
+ p[0] = *r;
+ for (i = 0; i < 7; ++i)
+ gf128mul_x_bbe(&p[i + 1], &p[i]);
+
+ memset(r, 0, sizeof(*r));
+ for (i = 0;;) {
+ u8 ch = ((u8 *)b)[i];
+
+ if (ch & 0x80)
+ be128_xor(r, r, &p[7]);
+ if (ch & 0x40)
+ be128_xor(r, r, &p[6]);
+ if (ch & 0x20)
+ be128_xor(r, r, &p[5]);
+ if (ch & 0x10)
+ be128_xor(r, r, &p[4]);
+ if (ch & 0x08)
+ be128_xor(r, r, &p[3]);
+ if (ch & 0x04)
+ be128_xor(r, r, &p[2]);
+ if (ch & 0x02)
+ be128_xor(r, r, &p[1]);
+ if (ch & 0x01)
+ be128_xor(r, r, &p[0]);
+
+ if (++i >= 16)
+ break;
+
+ gf128mul_x8_bbe(r);
+ }
+}
+EXPORT_SYMBOL(gf128mul_bbe);
+
+/* This version uses 64k bytes of table space.
+ A 16 byte buffer has to be multiplied by a 16 byte key
+ value in GF(2^128). If we consider a GF(2^128) value in
+ the buffer's lowest byte, we can construct a table of
+ the 256 16 byte values that result from the 256 values
+ of this byte. This requires 4096 bytes. But we also
+ need tables for each of the 16 higher bytes in the
+ buffer as well, which makes 64 kbytes in total.
+*/
+/* additional explanation
+ * t[0][BYTE] contains g*BYTE
+ * t[1][BYTE] contains g*x^8*BYTE
+ * ..
+ * t[15][BYTE] contains g*x^120*BYTE */
+struct gf128mul_64k *gf128mul_init_64k_bbe(const be128 *g)
+{
+ struct gf128mul_64k *t;
+ int i, j, k;
+
+ t = kzalloc(sizeof(*t), GFP_KERNEL);
+ if (!t)
+ goto out;
+
+ for (i = 0; i < 16; i++) {
+ t->t[i] = kzalloc(sizeof(*t->t[i]), GFP_KERNEL);
+ if (!t->t[i]) {
+ gf128mul_free_64k(t);
+ t = NULL;
+ goto out;
+ }
+ }
+
+ t->t[0]->t[1] = *g;
+ for (j = 1; j <= 64; j <<= 1)
+ gf128mul_x_bbe(&t->t[0]->t[j + j], &t->t[0]->t[j]);
+
+ for (i = 0;;) {
+ for (j = 2; j < 256; j += j)
+ for (k = 1; k < j; ++k)
+ be128_xor(&t->t[i]->t[j + k],
+ &t->t[i]->t[j], &t->t[i]->t[k]);
+
+ if (++i >= 16)
+ break;
+
+ for (j = 128; j > 0; j >>= 1) {
+ t->t[i]->t[j] = t->t[i - 1]->t[j];
+ gf128mul_x8_bbe(&t->t[i]->t[j]);
+ }
+ }
+
+out:
+ return t;
+}
+EXPORT_SYMBOL(gf128mul_init_64k_bbe);
+
+void gf128mul_free_64k(struct gf128mul_64k *t)
+{
+ int i;
+
+ for (i = 0; i < 16; i++)
+ kfree_sensitive(t->t[i]);
+ kfree_sensitive(t);
+}
+EXPORT_SYMBOL(gf128mul_free_64k);
+
+void gf128mul_64k_bbe(be128 *a, const struct gf128mul_64k *t)
+{
+ u8 *ap = (u8 *)a;
+ be128 r[1];
+ int i;
+
+ *r = t->t[0]->t[ap[15]];
+ for (i = 1; i < 16; ++i)
+ be128_xor(r, r, &t->t[i]->t[ap[15 - i]]);
+ *a = *r;
+}
+EXPORT_SYMBOL(gf128mul_64k_bbe);
+
+/* This version uses 4k bytes of table space.
+ A 16 byte buffer has to be multiplied by a 16 byte key
+ value in GF(2^128). If we consider a GF(2^128) value in a
+ single byte, we can construct a table of the 256 16 byte
+ values that result from the 256 values of this byte.
+ This requires 4096 bytes. If we take the highest byte in
+ the buffer and use this table to get the result, we then
+ have to multiply by x^120 to get the final value. For the
+ next highest byte the result has to be multiplied by x^112
+ and so on. But we can do this by accumulating the result
+ in an accumulator starting with the result for the top
+ byte. We repeatedly multiply the accumulator value by
+ x^8 and then add in (i.e. xor) the 16 bytes of the next
+ lower byte in the buffer, stopping when we reach the
+ lowest byte. This requires a 4096 byte table.
+*/
+struct gf128mul_4k *gf128mul_init_4k_lle(const be128 *g)
+{
+ struct gf128mul_4k *t;
+ int j, k;
+
+ t = kzalloc(sizeof(*t), GFP_KERNEL);
+ if (!t)
+ goto out;
+
+ t->t[128] = *g;
+ for (j = 64; j > 0; j >>= 1)
+ gf128mul_x_lle(&t->t[j], &t->t[j+j]);
+
+ for (j = 2; j < 256; j += j)
+ for (k = 1; k < j; ++k)
+ be128_xor(&t->t[j + k], &t->t[j], &t->t[k]);
+
+out:
+ return t;
+}
+EXPORT_SYMBOL(gf128mul_init_4k_lle);
+
+struct gf128mul_4k *gf128mul_init_4k_bbe(const be128 *g)
+{
+ struct gf128mul_4k *t;
+ int j, k;
+
+ t = kzalloc(sizeof(*t), GFP_KERNEL);
+ if (!t)
+ goto out;
+
+ t->t[1] = *g;
+ for (j = 1; j <= 64; j <<= 1)
+ gf128mul_x_bbe(&t->t[j + j], &t->t[j]);
+
+ for (j = 2; j < 256; j += j)
+ for (k = 1; k < j; ++k)
+ be128_xor(&t->t[j + k], &t->t[j], &t->t[k]);
+
+out:
+ return t;
+}
+EXPORT_SYMBOL(gf128mul_init_4k_bbe);
+
+void gf128mul_4k_lle(be128 *a, const struct gf128mul_4k *t)
+{
+ u8 *ap = (u8 *)a;
+ be128 r[1];
+ int i = 15;
+
+ *r = t->t[ap[15]];
+ while (i--) {
+ gf128mul_x8_lle(r);
+ be128_xor(r, r, &t->t[ap[i]]);
+ }
+ *a = *r;
+}
+EXPORT_SYMBOL(gf128mul_4k_lle);
+
+void gf128mul_4k_bbe(be128 *a, const struct gf128mul_4k *t)
+{
+ u8 *ap = (u8 *)a;
+ be128 r[1];
+ int i = 0;
+
+ *r = t->t[ap[0]];
+ while (++i < 16) {
+ gf128mul_x8_bbe(r);
+ be128_xor(r, r, &t->t[ap[i]]);
+ }
+ *a = *r;
+}
+EXPORT_SYMBOL(gf128mul_4k_bbe);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Functions for multiplying elements of GF(2^128)");
diff --git a/lib/crypto/libchacha.c b/lib/crypto/libchacha.c
new file mode 100644
index 000000000000..dabc3accae05
--- /dev/null
+++ b/lib/crypto/libchacha.c
@@ -0,0 +1,35 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * The ChaCha stream cipher (RFC7539)
+ *
+ * Copyright (C) 2015 Martin Willi
+ */
+
+#include <linux/kernel.h>
+#include <linux/export.h>
+#include <linux/module.h>
+
+#include <crypto/algapi.h> // for crypto_xor_cpy
+#include <crypto/chacha.h>
+
+void chacha_crypt_generic(u32 *state, u8 *dst, const u8 *src,
+ unsigned int bytes, int nrounds)
+{
+ /* aligned to potentially speed up crypto_xor() */
+ u8 stream[CHACHA_BLOCK_SIZE] __aligned(sizeof(long));
+
+ while (bytes >= CHACHA_BLOCK_SIZE) {
+ chacha_block_generic(state, stream, nrounds);
+ crypto_xor_cpy(dst, src, stream, CHACHA_BLOCK_SIZE);
+ bytes -= CHACHA_BLOCK_SIZE;
+ dst += CHACHA_BLOCK_SIZE;
+ src += CHACHA_BLOCK_SIZE;
+ }
+ if (bytes) {
+ chacha_block_generic(state, stream, nrounds);
+ crypto_xor_cpy(dst, src, stream, bytes);
+ }
+}
+EXPORT_SYMBOL(chacha_crypt_generic);
+
+MODULE_LICENSE("GPL");
diff --git a/lib/crypto/memneq.c b/lib/crypto/memneq.c
new file mode 100644
index 000000000000..243d8677cc51
--- /dev/null
+++ b/lib/crypto/memneq.c
@@ -0,0 +1,173 @@
+/*
+ * Constant-time equality testing of memory regions.
+ *
+ * Authors:
+ *
+ * James Yonan <james@openvpn.net>
+ * Daniel Borkmann <dborkman@redhat.com>
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2013 OpenVPN Technologies, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2013 OpenVPN Technologies, Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of OpenVPN Technologies nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <asm/unaligned.h>
+#include <crypto/algapi.h>
+#include <linux/module.h>
+
+/* Generic path for arbitrary size */
+static inline unsigned long
+__crypto_memneq_generic(const void *a, const void *b, size_t size)
+{
+ unsigned long neq = 0;
+
+#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
+ while (size >= sizeof(unsigned long)) {
+ neq |= get_unaligned((unsigned long *)a) ^
+ get_unaligned((unsigned long *)b);
+ OPTIMIZER_HIDE_VAR(neq);
+ a += sizeof(unsigned long);
+ b += sizeof(unsigned long);
+ size -= sizeof(unsigned long);
+ }
+#endif /* CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS */
+ while (size > 0) {
+ neq |= *(unsigned char *)a ^ *(unsigned char *)b;
+ OPTIMIZER_HIDE_VAR(neq);
+ a += 1;
+ b += 1;
+ size -= 1;
+ }
+ return neq;
+}
+
+/* Loop-free fast-path for frequently used 16-byte size */
+static inline unsigned long __crypto_memneq_16(const void *a, const void *b)
+{
+ unsigned long neq = 0;
+
+#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+ if (sizeof(unsigned long) == 8) {
+ neq |= get_unaligned((unsigned long *)a) ^
+ get_unaligned((unsigned long *)b);
+ OPTIMIZER_HIDE_VAR(neq);
+ neq |= get_unaligned((unsigned long *)(a + 8)) ^
+ get_unaligned((unsigned long *)(b + 8));
+ OPTIMIZER_HIDE_VAR(neq);
+ } else if (sizeof(unsigned int) == 4) {
+ neq |= get_unaligned((unsigned int *)a) ^
+ get_unaligned((unsigned int *)b);
+ OPTIMIZER_HIDE_VAR(neq);
+ neq |= get_unaligned((unsigned int *)(a + 4)) ^
+ get_unaligned((unsigned int *)(b + 4));
+ OPTIMIZER_HIDE_VAR(neq);
+ neq |= get_unaligned((unsigned int *)(a + 8)) ^
+ get_unaligned((unsigned int *)(b + 8));
+ OPTIMIZER_HIDE_VAR(neq);
+ neq |= get_unaligned((unsigned int *)(a + 12)) ^
+ get_unaligned((unsigned int *)(b + 12));
+ OPTIMIZER_HIDE_VAR(neq);
+ } else
+#endif /* CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS */
+ {
+ neq |= *(unsigned char *)(a) ^ *(unsigned char *)(b);
+ OPTIMIZER_HIDE_VAR(neq);
+ neq |= *(unsigned char *)(a+1) ^ *(unsigned char *)(b+1);
+ OPTIMIZER_HIDE_VAR(neq);
+ neq |= *(unsigned char *)(a+2) ^ *(unsigned char *)(b+2);
+ OPTIMIZER_HIDE_VAR(neq);
+ neq |= *(unsigned char *)(a+3) ^ *(unsigned char *)(b+3);
+ OPTIMIZER_HIDE_VAR(neq);
+ neq |= *(unsigned char *)(a+4) ^ *(unsigned char *)(b+4);
+ OPTIMIZER_HIDE_VAR(neq);
+ neq |= *(unsigned char *)(a+5) ^ *(unsigned char *)(b+5);
+ OPTIMIZER_HIDE_VAR(neq);
+ neq |= *(unsigned char *)(a+6) ^ *(unsigned char *)(b+6);
+ OPTIMIZER_HIDE_VAR(neq);
+ neq |= *(unsigned char *)(a+7) ^ *(unsigned char *)(b+7);
+ OPTIMIZER_HIDE_VAR(neq);
+ neq |= *(unsigned char *)(a+8) ^ *(unsigned char *)(b+8);
+ OPTIMIZER_HIDE_VAR(neq);
+ neq |= *(unsigned char *)(a+9) ^ *(unsigned char *)(b+9);
+ OPTIMIZER_HIDE_VAR(neq);
+ neq |= *(unsigned char *)(a+10) ^ *(unsigned char *)(b+10);
+ OPTIMIZER_HIDE_VAR(neq);
+ neq |= *(unsigned char *)(a+11) ^ *(unsigned char *)(b+11);
+ OPTIMIZER_HIDE_VAR(neq);
+ neq |= *(unsigned char *)(a+12) ^ *(unsigned char *)(b+12);
+ OPTIMIZER_HIDE_VAR(neq);
+ neq |= *(unsigned char *)(a+13) ^ *(unsigned char *)(b+13);
+ OPTIMIZER_HIDE_VAR(neq);
+ neq |= *(unsigned char *)(a+14) ^ *(unsigned char *)(b+14);
+ OPTIMIZER_HIDE_VAR(neq);
+ neq |= *(unsigned char *)(a+15) ^ *(unsigned char *)(b+15);
+ OPTIMIZER_HIDE_VAR(neq);
+ }
+
+ return neq;
+}
+
+/* Compare two areas of memory without leaking timing information,
+ * and with special optimizations for common sizes. Users should
+ * not call this function directly, but should instead use
+ * crypto_memneq defined in crypto/algapi.h.
+ */
+noinline unsigned long __crypto_memneq(const void *a, const void *b,
+ size_t size)
+{
+ switch (size) {
+ case 16:
+ return __crypto_memneq_16(a, b);
+ default:
+ return __crypto_memneq_generic(a, b, size);
+ }
+}
+EXPORT_SYMBOL(__crypto_memneq);
diff --git a/lib/mpi/Makefile b/lib/crypto/mpi/Makefile
index d5874a7f5ff9..6e6ef9a34fe1 100644
--- a/lib/mpi/Makefile
+++ b/lib/crypto/mpi/Makefile
@@ -13,9 +13,16 @@ mpi-y = \
generic_mpih-rshift.o \
generic_mpih-sub1.o \
generic_mpih-add1.o \
+ ec.o \
mpicoder.o \
+ mpi-add.o \
mpi-bit.o \
mpi-cmp.o \
+ mpi-sub-ui.o \
+ mpi-div.o \
+ mpi-inv.o \
+ mpi-mod.o \
+ mpi-mul.o \
mpih-cmp.o \
mpih-div.o \
mpih-mul.o \
diff --git a/lib/crypto/mpi/ec.c b/lib/crypto/mpi/ec.c
new file mode 100644
index 000000000000..e16dca1e23d5
--- /dev/null
+++ b/lib/crypto/mpi/ec.c
@@ -0,0 +1,1509 @@
+/* ec.c - Elliptic Curve functions
+ * Copyright (C) 2007 Free Software Foundation, Inc.
+ * Copyright (C) 2013 g10 Code GmbH
+ *
+ * This file is part of Libgcrypt.
+ *
+ * Libgcrypt is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as
+ * published by the Free Software Foundation; either version 2.1 of
+ * the License, or (at your option) any later version.
+ *
+ * Libgcrypt is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "mpi-internal.h"
+#include "longlong.h"
+
+#define point_init(a) mpi_point_init((a))
+#define point_free(a) mpi_point_free_parts((a))
+
+#define log_error(fmt, ...) pr_err(fmt, ##__VA_ARGS__)
+#define log_fatal(fmt, ...) pr_err(fmt, ##__VA_ARGS__)
+
+#define DIM(v) (sizeof(v)/sizeof((v)[0]))
+
+
+/* Create a new point option. NBITS gives the size in bits of one
+ * coordinate; it is only used to pre-allocate some resources and
+ * might also be passed as 0 to use a default value.
+ */
+MPI_POINT mpi_point_new(unsigned int nbits)
+{
+ MPI_POINT p;
+
+ (void)nbits; /* Currently not used. */
+
+ p = kmalloc(sizeof(*p), GFP_KERNEL);
+ if (p)
+ mpi_point_init(p);
+ return p;
+}
+EXPORT_SYMBOL_GPL(mpi_point_new);
+
+/* Release the point object P. P may be NULL. */
+void mpi_point_release(MPI_POINT p)
+{
+ if (p) {
+ mpi_point_free_parts(p);
+ kfree(p);
+ }
+}
+EXPORT_SYMBOL_GPL(mpi_point_release);
+
+/* Initialize the fields of a point object. gcry_mpi_point_free_parts
+ * may be used to release the fields.
+ */
+void mpi_point_init(MPI_POINT p)
+{
+ p->x = mpi_new(0);
+ p->y = mpi_new(0);
+ p->z = mpi_new(0);
+}
+EXPORT_SYMBOL_GPL(mpi_point_init);
+
+/* Release the parts of a point object. */
+void mpi_point_free_parts(MPI_POINT p)
+{
+ mpi_free(p->x); p->x = NULL;
+ mpi_free(p->y); p->y = NULL;
+ mpi_free(p->z); p->z = NULL;
+}
+EXPORT_SYMBOL_GPL(mpi_point_free_parts);
+
+/* Set the value from S into D. */
+static void point_set(MPI_POINT d, MPI_POINT s)
+{
+ mpi_set(d->x, s->x);
+ mpi_set(d->y, s->y);
+ mpi_set(d->z, s->z);
+}
+
+static void point_resize(MPI_POINT p, struct mpi_ec_ctx *ctx)
+{
+ size_t nlimbs = ctx->p->nlimbs;
+
+ mpi_resize(p->x, nlimbs);
+ p->x->nlimbs = nlimbs;
+ mpi_resize(p->z, nlimbs);
+ p->z->nlimbs = nlimbs;
+
+ if (ctx->model != MPI_EC_MONTGOMERY) {
+ mpi_resize(p->y, nlimbs);
+ p->y->nlimbs = nlimbs;
+ }
+}
+
+static void point_swap_cond(MPI_POINT d, MPI_POINT s, unsigned long swap,
+ struct mpi_ec_ctx *ctx)
+{
+ mpi_swap_cond(d->x, s->x, swap);
+ if (ctx->model != MPI_EC_MONTGOMERY)
+ mpi_swap_cond(d->y, s->y, swap);
+ mpi_swap_cond(d->z, s->z, swap);
+}
+
+
+/* W = W mod P. */
+static void ec_mod(MPI w, struct mpi_ec_ctx *ec)
+{
+ if (ec->t.p_barrett)
+ mpi_mod_barrett(w, w, ec->t.p_barrett);
+ else
+ mpi_mod(w, w, ec->p);
+}
+
+static void ec_addm(MPI w, MPI u, MPI v, struct mpi_ec_ctx *ctx)
+{
+ mpi_add(w, u, v);
+ ec_mod(w, ctx);
+}
+
+static void ec_subm(MPI w, MPI u, MPI v, struct mpi_ec_ctx *ec)
+{
+ mpi_sub(w, u, v);
+ while (w->sign)
+ mpi_add(w, w, ec->p);
+ /*ec_mod(w, ec);*/
+}
+
+static void ec_mulm(MPI w, MPI u, MPI v, struct mpi_ec_ctx *ctx)
+{
+ mpi_mul(w, u, v);
+ ec_mod(w, ctx);
+}
+
+/* W = 2 * U mod P. */
+static void ec_mul2(MPI w, MPI u, struct mpi_ec_ctx *ctx)
+{
+ mpi_lshift(w, u, 1);
+ ec_mod(w, ctx);
+}
+
+static void ec_powm(MPI w, const MPI b, const MPI e,
+ struct mpi_ec_ctx *ctx)
+{
+ mpi_powm(w, b, e, ctx->p);
+ /* mpi_abs(w); */
+}
+
+/* Shortcut for
+ * ec_powm(B, B, mpi_const(MPI_C_TWO), ctx);
+ * for easier optimization.
+ */
+static void ec_pow2(MPI w, const MPI b, struct mpi_ec_ctx *ctx)
+{
+ /* Using mpi_mul is slightly faster (at least on amd64). */
+ /* mpi_powm(w, b, mpi_const(MPI_C_TWO), ctx->p); */
+ ec_mulm(w, b, b, ctx);
+}
+
+/* Shortcut for
+ * ec_powm(B, B, mpi_const(MPI_C_THREE), ctx);
+ * for easier optimization.
+ */
+static void ec_pow3(MPI w, const MPI b, struct mpi_ec_ctx *ctx)
+{
+ mpi_powm(w, b, mpi_const(MPI_C_THREE), ctx->p);
+}
+
+static void ec_invm(MPI x, MPI a, struct mpi_ec_ctx *ctx)
+{
+ if (!mpi_invm(x, a, ctx->p))
+ log_error("ec_invm: inverse does not exist:\n");
+}
+
+static void mpih_set_cond(mpi_ptr_t wp, mpi_ptr_t up,
+ mpi_size_t usize, unsigned long set)
+{
+ mpi_size_t i;
+ mpi_limb_t mask = ((mpi_limb_t)0) - set;
+ mpi_limb_t x;
+
+ for (i = 0; i < usize; i++) {
+ x = mask & (wp[i] ^ up[i]);
+ wp[i] = wp[i] ^ x;
+ }
+}
+
+/* Routines for 2^255 - 19. */
+
+#define LIMB_SIZE_25519 ((256+BITS_PER_MPI_LIMB-1)/BITS_PER_MPI_LIMB)
+
+static void ec_addm_25519(MPI w, MPI u, MPI v, struct mpi_ec_ctx *ctx)
+{
+ mpi_ptr_t wp, up, vp;
+ mpi_size_t wsize = LIMB_SIZE_25519;
+ mpi_limb_t n[LIMB_SIZE_25519];
+ mpi_limb_t borrow;
+
+ if (w->nlimbs != wsize || u->nlimbs != wsize || v->nlimbs != wsize)
+ log_bug("addm_25519: different sizes\n");
+
+ memset(n, 0, sizeof(n));
+ up = u->d;
+ vp = v->d;
+ wp = w->d;
+
+ mpihelp_add_n(wp, up, vp, wsize);
+ borrow = mpihelp_sub_n(wp, wp, ctx->p->d, wsize);
+ mpih_set_cond(n, ctx->p->d, wsize, (borrow != 0UL));
+ mpihelp_add_n(wp, wp, n, wsize);
+ wp[LIMB_SIZE_25519-1] &= ~((mpi_limb_t)1 << (255 % BITS_PER_MPI_LIMB));
+}
+
+static void ec_subm_25519(MPI w, MPI u, MPI v, struct mpi_ec_ctx *ctx)
+{
+ mpi_ptr_t wp, up, vp;
+ mpi_size_t wsize = LIMB_SIZE_25519;
+ mpi_limb_t n[LIMB_SIZE_25519];
+ mpi_limb_t borrow;
+
+ if (w->nlimbs != wsize || u->nlimbs != wsize || v->nlimbs != wsize)
+ log_bug("subm_25519: different sizes\n");
+
+ memset(n, 0, sizeof(n));
+ up = u->d;
+ vp = v->d;
+ wp = w->d;
+
+ borrow = mpihelp_sub_n(wp, up, vp, wsize);
+ mpih_set_cond(n, ctx->p->d, wsize, (borrow != 0UL));
+ mpihelp_add_n(wp, wp, n, wsize);
+ wp[LIMB_SIZE_25519-1] &= ~((mpi_limb_t)1 << (255 % BITS_PER_MPI_LIMB));
+}
+
+static void ec_mulm_25519(MPI w, MPI u, MPI v, struct mpi_ec_ctx *ctx)
+{
+ mpi_ptr_t wp, up, vp;
+ mpi_size_t wsize = LIMB_SIZE_25519;
+ mpi_limb_t n[LIMB_SIZE_25519*2];
+ mpi_limb_t m[LIMB_SIZE_25519+1];
+ mpi_limb_t cy;
+ int msb;
+
+ (void)ctx;
+ if (w->nlimbs != wsize || u->nlimbs != wsize || v->nlimbs != wsize)
+ log_bug("mulm_25519: different sizes\n");
+
+ up = u->d;
+ vp = v->d;
+ wp = w->d;
+
+ mpihelp_mul_n(n, up, vp, wsize);
+ memcpy(wp, n, wsize * BYTES_PER_MPI_LIMB);
+ wp[LIMB_SIZE_25519-1] &= ~((mpi_limb_t)1 << (255 % BITS_PER_MPI_LIMB));
+
+ memcpy(m, n+LIMB_SIZE_25519-1, (wsize+1) * BYTES_PER_MPI_LIMB);
+ mpihelp_rshift(m, m, LIMB_SIZE_25519+1, (255 % BITS_PER_MPI_LIMB));
+
+ memcpy(n, m, wsize * BYTES_PER_MPI_LIMB);
+ cy = mpihelp_lshift(m, m, LIMB_SIZE_25519, 4);
+ m[LIMB_SIZE_25519] = cy;
+ cy = mpihelp_add_n(m, m, n, wsize);
+ m[LIMB_SIZE_25519] += cy;
+ cy = mpihelp_add_n(m, m, n, wsize);
+ m[LIMB_SIZE_25519] += cy;
+ cy = mpihelp_add_n(m, m, n, wsize);
+ m[LIMB_SIZE_25519] += cy;
+
+ cy = mpihelp_add_n(wp, wp, m, wsize);
+ m[LIMB_SIZE_25519] += cy;
+
+ memset(m, 0, wsize * BYTES_PER_MPI_LIMB);
+ msb = (wp[LIMB_SIZE_25519-1] >> (255 % BITS_PER_MPI_LIMB));
+ m[0] = (m[LIMB_SIZE_25519] * 2 + msb) * 19;
+ wp[LIMB_SIZE_25519-1] &= ~((mpi_limb_t)1 << (255 % BITS_PER_MPI_LIMB));
+ mpihelp_add_n(wp, wp, m, wsize);
+
+ m[0] = 0;
+ cy = mpihelp_sub_n(wp, wp, ctx->p->d, wsize);
+ mpih_set_cond(m, ctx->p->d, wsize, (cy != 0UL));
+ mpihelp_add_n(wp, wp, m, wsize);
+}
+
+static void ec_mul2_25519(MPI w, MPI u, struct mpi_ec_ctx *ctx)
+{
+ ec_addm_25519(w, u, u, ctx);
+}
+
+static void ec_pow2_25519(MPI w, const MPI b, struct mpi_ec_ctx *ctx)
+{
+ ec_mulm_25519(w, b, b, ctx);
+}
+
+/* Routines for 2^448 - 2^224 - 1. */
+
+#define LIMB_SIZE_448 ((448+BITS_PER_MPI_LIMB-1)/BITS_PER_MPI_LIMB)
+#define LIMB_SIZE_HALF_448 ((LIMB_SIZE_448+1)/2)
+
+static void ec_addm_448(MPI w, MPI u, MPI v, struct mpi_ec_ctx *ctx)
+{
+ mpi_ptr_t wp, up, vp;
+ mpi_size_t wsize = LIMB_SIZE_448;
+ mpi_limb_t n[LIMB_SIZE_448];
+ mpi_limb_t cy;
+
+ if (w->nlimbs != wsize || u->nlimbs != wsize || v->nlimbs != wsize)
+ log_bug("addm_448: different sizes\n");
+
+ memset(n, 0, sizeof(n));
+ up = u->d;
+ vp = v->d;
+ wp = w->d;
+
+ cy = mpihelp_add_n(wp, up, vp, wsize);
+ mpih_set_cond(n, ctx->p->d, wsize, (cy != 0UL));
+ mpihelp_sub_n(wp, wp, n, wsize);
+}
+
+static void ec_subm_448(MPI w, MPI u, MPI v, struct mpi_ec_ctx *ctx)
+{
+ mpi_ptr_t wp, up, vp;
+ mpi_size_t wsize = LIMB_SIZE_448;
+ mpi_limb_t n[LIMB_SIZE_448];
+ mpi_limb_t borrow;
+
+ if (w->nlimbs != wsize || u->nlimbs != wsize || v->nlimbs != wsize)
+ log_bug("subm_448: different sizes\n");
+
+ memset(n, 0, sizeof(n));
+ up = u->d;
+ vp = v->d;
+ wp = w->d;
+
+ borrow = mpihelp_sub_n(wp, up, vp, wsize);
+ mpih_set_cond(n, ctx->p->d, wsize, (borrow != 0UL));
+ mpihelp_add_n(wp, wp, n, wsize);
+}
+
+static void ec_mulm_448(MPI w, MPI u, MPI v, struct mpi_ec_ctx *ctx)
+{
+ mpi_ptr_t wp, up, vp;
+ mpi_size_t wsize = LIMB_SIZE_448;
+ mpi_limb_t n[LIMB_SIZE_448*2];
+ mpi_limb_t a2[LIMB_SIZE_HALF_448];
+ mpi_limb_t a3[LIMB_SIZE_HALF_448];
+ mpi_limb_t b0[LIMB_SIZE_HALF_448];
+ mpi_limb_t b1[LIMB_SIZE_HALF_448];
+ mpi_limb_t cy;
+ int i;
+#if (LIMB_SIZE_HALF_448 > LIMB_SIZE_448/2)
+ mpi_limb_t b1_rest, a3_rest;
+#endif
+
+ if (w->nlimbs != wsize || u->nlimbs != wsize || v->nlimbs != wsize)
+ log_bug("mulm_448: different sizes\n");
+
+ up = u->d;
+ vp = v->d;
+ wp = w->d;
+
+ mpihelp_mul_n(n, up, vp, wsize);
+
+ for (i = 0; i < (wsize + 1) / 2; i++) {
+ b0[i] = n[i];
+ b1[i] = n[i+wsize/2];
+ a2[i] = n[i+wsize];
+ a3[i] = n[i+wsize+wsize/2];
+ }
+
+#if (LIMB_SIZE_HALF_448 > LIMB_SIZE_448/2)
+ b0[LIMB_SIZE_HALF_448-1] &= ((mpi_limb_t)1UL << 32)-1;
+ a2[LIMB_SIZE_HALF_448-1] &= ((mpi_limb_t)1UL << 32)-1;
+
+ b1_rest = 0;
+ a3_rest = 0;
+
+ for (i = (wsize + 1) / 2 - 1; i >= 0; i--) {
+ mpi_limb_t b1v, a3v;
+ b1v = b1[i];
+ a3v = a3[i];
+ b1[i] = (b1_rest << 32) | (b1v >> 32);
+ a3[i] = (a3_rest << 32) | (a3v >> 32);
+ b1_rest = b1v & (((mpi_limb_t)1UL << 32)-1);
+ a3_rest = a3v & (((mpi_limb_t)1UL << 32)-1);
+ }
+#endif
+
+ cy = mpihelp_add_n(b0, b0, a2, LIMB_SIZE_HALF_448);
+ cy += mpihelp_add_n(b0, b0, a3, LIMB_SIZE_HALF_448);
+ for (i = 0; i < (wsize + 1) / 2; i++)
+ wp[i] = b0[i];
+#if (LIMB_SIZE_HALF_448 > LIMB_SIZE_448/2)
+ wp[LIMB_SIZE_HALF_448-1] &= (((mpi_limb_t)1UL << 32)-1);
+#endif
+
+#if (LIMB_SIZE_HALF_448 > LIMB_SIZE_448/2)
+ cy = b0[LIMB_SIZE_HALF_448-1] >> 32;
+#endif
+
+ cy = mpihelp_add_1(b1, b1, LIMB_SIZE_HALF_448, cy);
+ cy += mpihelp_add_n(b1, b1, a2, LIMB_SIZE_HALF_448);
+ cy += mpihelp_add_n(b1, b1, a3, LIMB_SIZE_HALF_448);
+ cy += mpihelp_add_n(b1, b1, a3, LIMB_SIZE_HALF_448);
+#if (LIMB_SIZE_HALF_448 > LIMB_SIZE_448/2)
+ b1_rest = 0;
+ for (i = (wsize + 1) / 2 - 1; i >= 0; i--) {
+ mpi_limb_t b1v = b1[i];
+ b1[i] = (b1_rest << 32) | (b1v >> 32);
+ b1_rest = b1v & (((mpi_limb_t)1UL << 32)-1);
+ }
+ wp[LIMB_SIZE_HALF_448-1] |= (b1_rest << 32);
+#endif
+ for (i = 0; i < wsize / 2; i++)
+ wp[i+(wsize + 1) / 2] = b1[i];
+
+#if (LIMB_SIZE_HALF_448 > LIMB_SIZE_448/2)
+ cy = b1[LIMB_SIZE_HALF_448-1];
+#endif
+
+ memset(n, 0, wsize * BYTES_PER_MPI_LIMB);
+
+#if (LIMB_SIZE_HALF_448 > LIMB_SIZE_448/2)
+ n[LIMB_SIZE_HALF_448-1] = cy << 32;
+#else
+ n[LIMB_SIZE_HALF_448] = cy;
+#endif
+ n[0] = cy;
+ mpihelp_add_n(wp, wp, n, wsize);
+
+ memset(n, 0, wsize * BYTES_PER_MPI_LIMB);
+ cy = mpihelp_sub_n(wp, wp, ctx->p->d, wsize);
+ mpih_set_cond(n, ctx->p->d, wsize, (cy != 0UL));
+ mpihelp_add_n(wp, wp, n, wsize);
+}
+
+static void ec_mul2_448(MPI w, MPI u, struct mpi_ec_ctx *ctx)
+{
+ ec_addm_448(w, u, u, ctx);
+}
+
+static void ec_pow2_448(MPI w, const MPI b, struct mpi_ec_ctx *ctx)
+{
+ ec_mulm_448(w, b, b, ctx);
+}
+
+struct field_table {
+ const char *p;
+
+ /* computation routines for the field. */
+ void (*addm)(MPI w, MPI u, MPI v, struct mpi_ec_ctx *ctx);
+ void (*subm)(MPI w, MPI u, MPI v, struct mpi_ec_ctx *ctx);
+ void (*mulm)(MPI w, MPI u, MPI v, struct mpi_ec_ctx *ctx);
+ void (*mul2)(MPI w, MPI u, struct mpi_ec_ctx *ctx);
+ void (*pow2)(MPI w, const MPI b, struct mpi_ec_ctx *ctx);
+};
+
+static const struct field_table field_table[] = {
+ {
+ "0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFED",
+ ec_addm_25519,
+ ec_subm_25519,
+ ec_mulm_25519,
+ ec_mul2_25519,
+ ec_pow2_25519
+ },
+ {
+ "0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFE"
+ "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF",
+ ec_addm_448,
+ ec_subm_448,
+ ec_mulm_448,
+ ec_mul2_448,
+ ec_pow2_448
+ },
+ { NULL, NULL, NULL, NULL, NULL, NULL },
+};
+
+/* Force recomputation of all helper variables. */
+static void mpi_ec_get_reset(struct mpi_ec_ctx *ec)
+{
+ ec->t.valid.a_is_pminus3 = 0;
+ ec->t.valid.two_inv_p = 0;
+}
+
+/* Accessor for helper variable. */
+static int ec_get_a_is_pminus3(struct mpi_ec_ctx *ec)
+{
+ MPI tmp;
+
+ if (!ec->t.valid.a_is_pminus3) {
+ ec->t.valid.a_is_pminus3 = 1;
+ tmp = mpi_alloc_like(ec->p);
+ mpi_sub_ui(tmp, ec->p, 3);
+ ec->t.a_is_pminus3 = !mpi_cmp(ec->a, tmp);
+ mpi_free(tmp);
+ }
+
+ return ec->t.a_is_pminus3;
+}
+
+/* Accessor for helper variable. */
+static MPI ec_get_two_inv_p(struct mpi_ec_ctx *ec)
+{
+ if (!ec->t.valid.two_inv_p) {
+ ec->t.valid.two_inv_p = 1;
+ if (!ec->t.two_inv_p)
+ ec->t.two_inv_p = mpi_alloc(0);
+ ec_invm(ec->t.two_inv_p, mpi_const(MPI_C_TWO), ec);
+ }
+ return ec->t.two_inv_p;
+}
+
+static const char *const curve25519_bad_points[] = {
+ "0x7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffed",
+ "0x0000000000000000000000000000000000000000000000000000000000000000",
+ "0x0000000000000000000000000000000000000000000000000000000000000001",
+ "0x00b8495f16056286fdb1329ceb8d09da6ac49ff1fae35616aeb8413b7c7aebe0",
+ "0x57119fd0dd4e22d8868e1c58c45c44045bef839c55b1d0b1248c50a3bc959c5f",
+ "0x7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffec",
+ "0x7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffee",
+ NULL
+};
+
+static const char *const curve448_bad_points[] = {
+ "0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffe"
+ "ffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
+ "0x00000000000000000000000000000000000000000000000000000000"
+ "00000000000000000000000000000000000000000000000000000000",
+ "0x00000000000000000000000000000000000000000000000000000000"
+ "00000000000000000000000000000000000000000000000000000001",
+ "0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffe"
+ "fffffffffffffffffffffffffffffffffffffffffffffffffffffffe",
+ "0xffffffffffffffffffffffffffffffffffffffffffffffffffffffff"
+ "00000000000000000000000000000000000000000000000000000000",
+ NULL
+};
+
+static const char *const *bad_points_table[] = {
+ curve25519_bad_points,
+ curve448_bad_points,
+};
+
+static void mpi_ec_coefficient_normalize(MPI a, MPI p)
+{
+ if (a->sign) {
+ mpi_resize(a, p->nlimbs);
+ mpihelp_sub_n(a->d, p->d, a->d, p->nlimbs);
+ a->nlimbs = p->nlimbs;
+ a->sign = 0;
+ }
+}
+
+/* This function initialized a context for elliptic curve based on the
+ * field GF(p). P is the prime specifying this field, A is the first
+ * coefficient. CTX is expected to be zeroized.
+ */
+void mpi_ec_init(struct mpi_ec_ctx *ctx, enum gcry_mpi_ec_models model,
+ enum ecc_dialects dialect,
+ int flags, MPI p, MPI a, MPI b)
+{
+ int i;
+ static int use_barrett = -1 /* TODO: 1 or -1 */;
+
+ mpi_ec_coefficient_normalize(a, p);
+ mpi_ec_coefficient_normalize(b, p);
+
+ /* Fixme: Do we want to check some constraints? e.g. a < p */
+
+ ctx->model = model;
+ ctx->dialect = dialect;
+ ctx->flags = flags;
+ if (dialect == ECC_DIALECT_ED25519)
+ ctx->nbits = 256;
+ else
+ ctx->nbits = mpi_get_nbits(p);
+ ctx->p = mpi_copy(p);
+ ctx->a = mpi_copy(a);
+ ctx->b = mpi_copy(b);
+
+ ctx->d = NULL;
+ ctx->t.two_inv_p = NULL;
+
+ ctx->t.p_barrett = use_barrett > 0 ? mpi_barrett_init(ctx->p, 0) : NULL;
+
+ mpi_ec_get_reset(ctx);
+
+ if (model == MPI_EC_MONTGOMERY) {
+ for (i = 0; i < DIM(bad_points_table); i++) {
+ MPI p_candidate = mpi_scanval(bad_points_table[i][0]);
+ int match_p = !mpi_cmp(ctx->p, p_candidate);
+ int j;
+
+ mpi_free(p_candidate);
+ if (!match_p)
+ continue;
+
+ for (j = 0; i < DIM(ctx->t.scratch) && bad_points_table[i][j]; j++)
+ ctx->t.scratch[j] = mpi_scanval(bad_points_table[i][j]);
+ }
+ } else {
+ /* Allocate scratch variables. */
+ for (i = 0; i < DIM(ctx->t.scratch); i++)
+ ctx->t.scratch[i] = mpi_alloc_like(ctx->p);
+ }
+
+ ctx->addm = ec_addm;
+ ctx->subm = ec_subm;
+ ctx->mulm = ec_mulm;
+ ctx->mul2 = ec_mul2;
+ ctx->pow2 = ec_pow2;
+
+ for (i = 0; field_table[i].p; i++) {
+ MPI f_p;
+
+ f_p = mpi_scanval(field_table[i].p);
+ if (!f_p)
+ break;
+
+ if (!mpi_cmp(p, f_p)) {
+ ctx->addm = field_table[i].addm;
+ ctx->subm = field_table[i].subm;
+ ctx->mulm = field_table[i].mulm;
+ ctx->mul2 = field_table[i].mul2;
+ ctx->pow2 = field_table[i].pow2;
+ mpi_free(f_p);
+
+ mpi_resize(ctx->a, ctx->p->nlimbs);
+ ctx->a->nlimbs = ctx->p->nlimbs;
+
+ mpi_resize(ctx->b, ctx->p->nlimbs);
+ ctx->b->nlimbs = ctx->p->nlimbs;
+
+ for (i = 0; i < DIM(ctx->t.scratch) && ctx->t.scratch[i]; i++)
+ ctx->t.scratch[i]->nlimbs = ctx->p->nlimbs;
+
+ break;
+ }
+
+ mpi_free(f_p);
+ }
+}
+EXPORT_SYMBOL_GPL(mpi_ec_init);
+
+void mpi_ec_deinit(struct mpi_ec_ctx *ctx)
+{
+ int i;
+
+ mpi_barrett_free(ctx->t.p_barrett);
+
+ /* Domain parameter. */
+ mpi_free(ctx->p);
+ mpi_free(ctx->a);
+ mpi_free(ctx->b);
+ mpi_point_release(ctx->G);
+ mpi_free(ctx->n);
+
+ /* The key. */
+ mpi_point_release(ctx->Q);
+ mpi_free(ctx->d);
+
+ /* Private data of ec.c. */
+ mpi_free(ctx->t.two_inv_p);
+
+ for (i = 0; i < DIM(ctx->t.scratch); i++)
+ mpi_free(ctx->t.scratch[i]);
+}
+EXPORT_SYMBOL_GPL(mpi_ec_deinit);
+
+/* Compute the affine coordinates from the projective coordinates in
+ * POINT. Set them into X and Y. If one coordinate is not required,
+ * X or Y may be passed as NULL. CTX is the usual context. Returns: 0
+ * on success or !0 if POINT is at infinity.
+ */
+int mpi_ec_get_affine(MPI x, MPI y, MPI_POINT point, struct mpi_ec_ctx *ctx)
+{
+ if (!mpi_cmp_ui(point->z, 0))
+ return -1;
+
+ switch (ctx->model) {
+ case MPI_EC_WEIERSTRASS: /* Using Jacobian coordinates. */
+ {
+ MPI z1, z2, z3;
+
+ z1 = mpi_new(0);
+ z2 = mpi_new(0);
+ ec_invm(z1, point->z, ctx); /* z1 = z^(-1) mod p */
+ ec_mulm(z2, z1, z1, ctx); /* z2 = z^(-2) mod p */
+
+ if (x)
+ ec_mulm(x, point->x, z2, ctx);
+
+ if (y) {
+ z3 = mpi_new(0);
+ ec_mulm(z3, z2, z1, ctx); /* z3 = z^(-3) mod p */
+ ec_mulm(y, point->y, z3, ctx);
+ mpi_free(z3);
+ }
+
+ mpi_free(z2);
+ mpi_free(z1);
+ }
+ return 0;
+
+ case MPI_EC_MONTGOMERY:
+ {
+ if (x)
+ mpi_set(x, point->x);
+
+ if (y) {
+ log_fatal("%s: Getting Y-coordinate on %s is not supported\n",
+ "mpi_ec_get_affine", "Montgomery");
+ return -1;
+ }
+ }
+ return 0;
+
+ case MPI_EC_EDWARDS:
+ {
+ MPI z;
+
+ z = mpi_new(0);
+ ec_invm(z, point->z, ctx);
+
+ mpi_resize(z, ctx->p->nlimbs);
+ z->nlimbs = ctx->p->nlimbs;
+
+ if (x) {
+ mpi_resize(x, ctx->p->nlimbs);
+ x->nlimbs = ctx->p->nlimbs;
+ ctx->mulm(x, point->x, z, ctx);
+ }
+ if (y) {
+ mpi_resize(y, ctx->p->nlimbs);
+ y->nlimbs = ctx->p->nlimbs;
+ ctx->mulm(y, point->y, z, ctx);
+ }
+
+ mpi_free(z);
+ }
+ return 0;
+
+ default:
+ return -1;
+ }
+}
+EXPORT_SYMBOL_GPL(mpi_ec_get_affine);
+
+/* RESULT = 2 * POINT (Weierstrass version). */
+static void dup_point_weierstrass(MPI_POINT result,
+ MPI_POINT point, struct mpi_ec_ctx *ctx)
+{
+#define x3 (result->x)
+#define y3 (result->y)
+#define z3 (result->z)
+#define t1 (ctx->t.scratch[0])
+#define t2 (ctx->t.scratch[1])
+#define t3 (ctx->t.scratch[2])
+#define l1 (ctx->t.scratch[3])
+#define l2 (ctx->t.scratch[4])
+#define l3 (ctx->t.scratch[5])
+
+ if (!mpi_cmp_ui(point->y, 0) || !mpi_cmp_ui(point->z, 0)) {
+ /* P_y == 0 || P_z == 0 => [1:1:0] */
+ mpi_set_ui(x3, 1);
+ mpi_set_ui(y3, 1);
+ mpi_set_ui(z3, 0);
+ } else {
+ if (ec_get_a_is_pminus3(ctx)) {
+ /* Use the faster case. */
+ /* L1 = 3(X - Z^2)(X + Z^2) */
+ /* T1: used for Z^2. */
+ /* T2: used for the right term. */
+ ec_pow2(t1, point->z, ctx);
+ ec_subm(l1, point->x, t1, ctx);
+ ec_mulm(l1, l1, mpi_const(MPI_C_THREE), ctx);
+ ec_addm(t2, point->x, t1, ctx);
+ ec_mulm(l1, l1, t2, ctx);
+ } else {
+ /* Standard case. */
+ /* L1 = 3X^2 + aZ^4 */
+ /* T1: used for aZ^4. */
+ ec_pow2(l1, point->x, ctx);
+ ec_mulm(l1, l1, mpi_const(MPI_C_THREE), ctx);
+ ec_powm(t1, point->z, mpi_const(MPI_C_FOUR), ctx);
+ ec_mulm(t1, t1, ctx->a, ctx);
+ ec_addm(l1, l1, t1, ctx);
+ }
+ /* Z3 = 2YZ */
+ ec_mulm(z3, point->y, point->z, ctx);
+ ec_mul2(z3, z3, ctx);
+
+ /* L2 = 4XY^2 */
+ /* T2: used for Y2; required later. */
+ ec_pow2(t2, point->y, ctx);
+ ec_mulm(l2, t2, point->x, ctx);
+ ec_mulm(l2, l2, mpi_const(MPI_C_FOUR), ctx);
+
+ /* X3 = L1^2 - 2L2 */
+ /* T1: used for L2^2. */
+ ec_pow2(x3, l1, ctx);
+ ec_mul2(t1, l2, ctx);
+ ec_subm(x3, x3, t1, ctx);
+
+ /* L3 = 8Y^4 */
+ /* T2: taken from above. */
+ ec_pow2(t2, t2, ctx);
+ ec_mulm(l3, t2, mpi_const(MPI_C_EIGHT), ctx);
+
+ /* Y3 = L1(L2 - X3) - L3 */
+ ec_subm(y3, l2, x3, ctx);
+ ec_mulm(y3, y3, l1, ctx);
+ ec_subm(y3, y3, l3, ctx);
+ }
+
+#undef x3
+#undef y3
+#undef z3
+#undef t1
+#undef t2
+#undef t3
+#undef l1
+#undef l2
+#undef l3
+}
+
+/* RESULT = 2 * POINT (Montgomery version). */
+static void dup_point_montgomery(MPI_POINT result,
+ MPI_POINT point, struct mpi_ec_ctx *ctx)
+{
+ (void)result;
+ (void)point;
+ (void)ctx;
+ log_fatal("%s: %s not yet supported\n",
+ "mpi_ec_dup_point", "Montgomery");
+}
+
+/* RESULT = 2 * POINT (Twisted Edwards version). */
+static void dup_point_edwards(MPI_POINT result,
+ MPI_POINT point, struct mpi_ec_ctx *ctx)
+{
+#define X1 (point->x)
+#define Y1 (point->y)
+#define Z1 (point->z)
+#define X3 (result->x)
+#define Y3 (result->y)
+#define Z3 (result->z)
+#define B (ctx->t.scratch[0])
+#define C (ctx->t.scratch[1])
+#define D (ctx->t.scratch[2])
+#define E (ctx->t.scratch[3])
+#define F (ctx->t.scratch[4])
+#define H (ctx->t.scratch[5])
+#define J (ctx->t.scratch[6])
+
+ /* Compute: (X_3 : Y_3 : Z_3) = 2( X_1 : Y_1 : Z_1 ) */
+
+ /* B = (X_1 + Y_1)^2 */
+ ctx->addm(B, X1, Y1, ctx);
+ ctx->pow2(B, B, ctx);
+
+ /* C = X_1^2 */
+ /* D = Y_1^2 */
+ ctx->pow2(C, X1, ctx);
+ ctx->pow2(D, Y1, ctx);
+
+ /* E = aC */
+ if (ctx->dialect == ECC_DIALECT_ED25519)
+ ctx->subm(E, ctx->p, C, ctx);
+ else
+ ctx->mulm(E, ctx->a, C, ctx);
+
+ /* F = E + D */
+ ctx->addm(F, E, D, ctx);
+
+ /* H = Z_1^2 */
+ ctx->pow2(H, Z1, ctx);
+
+ /* J = F - 2H */
+ ctx->mul2(J, H, ctx);
+ ctx->subm(J, F, J, ctx);
+
+ /* X_3 = (B - C - D) · J */
+ ctx->subm(X3, B, C, ctx);
+ ctx->subm(X3, X3, D, ctx);
+ ctx->mulm(X3, X3, J, ctx);
+
+ /* Y_3 = F · (E - D) */
+ ctx->subm(Y3, E, D, ctx);
+ ctx->mulm(Y3, Y3, F, ctx);
+
+ /* Z_3 = F · J */
+ ctx->mulm(Z3, F, J, ctx);
+
+#undef X1
+#undef Y1
+#undef Z1
+#undef X3
+#undef Y3
+#undef Z3
+#undef B
+#undef C
+#undef D
+#undef E
+#undef F
+#undef H
+#undef J
+}
+
+/* RESULT = 2 * POINT */
+static void
+mpi_ec_dup_point(MPI_POINT result, MPI_POINT point, struct mpi_ec_ctx *ctx)
+{
+ switch (ctx->model) {
+ case MPI_EC_WEIERSTRASS:
+ dup_point_weierstrass(result, point, ctx);
+ break;
+ case MPI_EC_MONTGOMERY:
+ dup_point_montgomery(result, point, ctx);
+ break;
+ case MPI_EC_EDWARDS:
+ dup_point_edwards(result, point, ctx);
+ break;
+ }
+}
+
+/* RESULT = P1 + P2 (Weierstrass version).*/
+static void add_points_weierstrass(MPI_POINT result,
+ MPI_POINT p1, MPI_POINT p2,
+ struct mpi_ec_ctx *ctx)
+{
+#define x1 (p1->x)
+#define y1 (p1->y)
+#define z1 (p1->z)
+#define x2 (p2->x)
+#define y2 (p2->y)
+#define z2 (p2->z)
+#define x3 (result->x)
+#define y3 (result->y)
+#define z3 (result->z)
+#define l1 (ctx->t.scratch[0])
+#define l2 (ctx->t.scratch[1])
+#define l3 (ctx->t.scratch[2])
+#define l4 (ctx->t.scratch[3])
+#define l5 (ctx->t.scratch[4])
+#define l6 (ctx->t.scratch[5])
+#define l7 (ctx->t.scratch[6])
+#define l8 (ctx->t.scratch[7])
+#define l9 (ctx->t.scratch[8])
+#define t1 (ctx->t.scratch[9])
+#define t2 (ctx->t.scratch[10])
+
+ if ((!mpi_cmp(x1, x2)) && (!mpi_cmp(y1, y2)) && (!mpi_cmp(z1, z2))) {
+ /* Same point; need to call the duplicate function. */
+ mpi_ec_dup_point(result, p1, ctx);
+ } else if (!mpi_cmp_ui(z1, 0)) {
+ /* P1 is at infinity. */
+ mpi_set(x3, p2->x);
+ mpi_set(y3, p2->y);
+ mpi_set(z3, p2->z);
+ } else if (!mpi_cmp_ui(z2, 0)) {
+ /* P2 is at infinity. */
+ mpi_set(x3, p1->x);
+ mpi_set(y3, p1->y);
+ mpi_set(z3, p1->z);
+ } else {
+ int z1_is_one = !mpi_cmp_ui(z1, 1);
+ int z2_is_one = !mpi_cmp_ui(z2, 1);
+
+ /* l1 = x1 z2^2 */
+ /* l2 = x2 z1^2 */
+ if (z2_is_one)
+ mpi_set(l1, x1);
+ else {
+ ec_pow2(l1, z2, ctx);
+ ec_mulm(l1, l1, x1, ctx);
+ }
+ if (z1_is_one)
+ mpi_set(l2, x2);
+ else {
+ ec_pow2(l2, z1, ctx);
+ ec_mulm(l2, l2, x2, ctx);
+ }
+ /* l3 = l1 - l2 */
+ ec_subm(l3, l1, l2, ctx);
+ /* l4 = y1 z2^3 */
+ ec_powm(l4, z2, mpi_const(MPI_C_THREE), ctx);
+ ec_mulm(l4, l4, y1, ctx);
+ /* l5 = y2 z1^3 */
+ ec_powm(l5, z1, mpi_const(MPI_C_THREE), ctx);
+ ec_mulm(l5, l5, y2, ctx);
+ /* l6 = l4 - l5 */
+ ec_subm(l6, l4, l5, ctx);
+
+ if (!mpi_cmp_ui(l3, 0)) {
+ if (!mpi_cmp_ui(l6, 0)) {
+ /* P1 and P2 are the same - use duplicate function. */
+ mpi_ec_dup_point(result, p1, ctx);
+ } else {
+ /* P1 is the inverse of P2. */
+ mpi_set_ui(x3, 1);
+ mpi_set_ui(y3, 1);
+ mpi_set_ui(z3, 0);
+ }
+ } else {
+ /* l7 = l1 + l2 */
+ ec_addm(l7, l1, l2, ctx);
+ /* l8 = l4 + l5 */
+ ec_addm(l8, l4, l5, ctx);
+ /* z3 = z1 z2 l3 */
+ ec_mulm(z3, z1, z2, ctx);
+ ec_mulm(z3, z3, l3, ctx);
+ /* x3 = l6^2 - l7 l3^2 */
+ ec_pow2(t1, l6, ctx);
+ ec_pow2(t2, l3, ctx);
+ ec_mulm(t2, t2, l7, ctx);
+ ec_subm(x3, t1, t2, ctx);
+ /* l9 = l7 l3^2 - 2 x3 */
+ ec_mul2(t1, x3, ctx);
+ ec_subm(l9, t2, t1, ctx);
+ /* y3 = (l9 l6 - l8 l3^3)/2 */
+ ec_mulm(l9, l9, l6, ctx);
+ ec_powm(t1, l3, mpi_const(MPI_C_THREE), ctx); /* fixme: Use saved value*/
+ ec_mulm(t1, t1, l8, ctx);
+ ec_subm(y3, l9, t1, ctx);
+ ec_mulm(y3, y3, ec_get_two_inv_p(ctx), ctx);
+ }
+ }
+
+#undef x1
+#undef y1
+#undef z1
+#undef x2
+#undef y2
+#undef z2
+#undef x3
+#undef y3
+#undef z3
+#undef l1
+#undef l2
+#undef l3
+#undef l4
+#undef l5
+#undef l6
+#undef l7
+#undef l8
+#undef l9
+#undef t1
+#undef t2
+}
+
+/* RESULT = P1 + P2 (Montgomery version).*/
+static void add_points_montgomery(MPI_POINT result,
+ MPI_POINT p1, MPI_POINT p2,
+ struct mpi_ec_ctx *ctx)
+{
+ (void)result;
+ (void)p1;
+ (void)p2;
+ (void)ctx;
+ log_fatal("%s: %s not yet supported\n",
+ "mpi_ec_add_points", "Montgomery");
+}
+
+/* RESULT = P1 + P2 (Twisted Edwards version).*/
+static void add_points_edwards(MPI_POINT result,
+ MPI_POINT p1, MPI_POINT p2,
+ struct mpi_ec_ctx *ctx)
+{
+#define X1 (p1->x)
+#define Y1 (p1->y)
+#define Z1 (p1->z)
+#define X2 (p2->x)
+#define Y2 (p2->y)
+#define Z2 (p2->z)
+#define X3 (result->x)
+#define Y3 (result->y)
+#define Z3 (result->z)
+#define A (ctx->t.scratch[0])
+#define B (ctx->t.scratch[1])
+#define C (ctx->t.scratch[2])
+#define D (ctx->t.scratch[3])
+#define E (ctx->t.scratch[4])
+#define F (ctx->t.scratch[5])
+#define G (ctx->t.scratch[6])
+#define tmp (ctx->t.scratch[7])
+
+ point_resize(result, ctx);
+
+ /* Compute: (X_3 : Y_3 : Z_3) = (X_1 : Y_1 : Z_1) + (X_2 : Y_2 : Z_3) */
+
+ /* A = Z1 · Z2 */
+ ctx->mulm(A, Z1, Z2, ctx);
+
+ /* B = A^2 */
+ ctx->pow2(B, A, ctx);
+
+ /* C = X1 · X2 */
+ ctx->mulm(C, X1, X2, ctx);
+
+ /* D = Y1 · Y2 */
+ ctx->mulm(D, Y1, Y2, ctx);
+
+ /* E = d · C · D */
+ ctx->mulm(E, ctx->b, C, ctx);
+ ctx->mulm(E, E, D, ctx);
+
+ /* F = B - E */
+ ctx->subm(F, B, E, ctx);
+
+ /* G = B + E */
+ ctx->addm(G, B, E, ctx);
+
+ /* X_3 = A · F · ((X_1 + Y_1) · (X_2 + Y_2) - C - D) */
+ ctx->addm(tmp, X1, Y1, ctx);
+ ctx->addm(X3, X2, Y2, ctx);
+ ctx->mulm(X3, X3, tmp, ctx);
+ ctx->subm(X3, X3, C, ctx);
+ ctx->subm(X3, X3, D, ctx);
+ ctx->mulm(X3, X3, F, ctx);
+ ctx->mulm(X3, X3, A, ctx);
+
+ /* Y_3 = A · G · (D - aC) */
+ if (ctx->dialect == ECC_DIALECT_ED25519) {
+ ctx->addm(Y3, D, C, ctx);
+ } else {
+ ctx->mulm(Y3, ctx->a, C, ctx);
+ ctx->subm(Y3, D, Y3, ctx);
+ }
+ ctx->mulm(Y3, Y3, G, ctx);
+ ctx->mulm(Y3, Y3, A, ctx);
+
+ /* Z_3 = F · G */
+ ctx->mulm(Z3, F, G, ctx);
+
+
+#undef X1
+#undef Y1
+#undef Z1
+#undef X2
+#undef Y2
+#undef Z2
+#undef X3
+#undef Y3
+#undef Z3
+#undef A
+#undef B
+#undef C
+#undef D
+#undef E
+#undef F
+#undef G
+#undef tmp
+}
+
+/* Compute a step of Montgomery Ladder (only use X and Z in the point).
+ * Inputs: P1, P2, and x-coordinate of DIF = P1 - P1.
+ * Outputs: PRD = 2 * P1 and SUM = P1 + P2.
+ */
+static void montgomery_ladder(MPI_POINT prd, MPI_POINT sum,
+ MPI_POINT p1, MPI_POINT p2, MPI dif_x,
+ struct mpi_ec_ctx *ctx)
+{
+ ctx->addm(sum->x, p2->x, p2->z, ctx);
+ ctx->subm(p2->z, p2->x, p2->z, ctx);
+ ctx->addm(prd->x, p1->x, p1->z, ctx);
+ ctx->subm(p1->z, p1->x, p1->z, ctx);
+ ctx->mulm(p2->x, p1->z, sum->x, ctx);
+ ctx->mulm(p2->z, prd->x, p2->z, ctx);
+ ctx->pow2(p1->x, prd->x, ctx);
+ ctx->pow2(p1->z, p1->z, ctx);
+ ctx->addm(sum->x, p2->x, p2->z, ctx);
+ ctx->subm(p2->z, p2->x, p2->z, ctx);
+ ctx->mulm(prd->x, p1->x, p1->z, ctx);
+ ctx->subm(p1->z, p1->x, p1->z, ctx);
+ ctx->pow2(sum->x, sum->x, ctx);
+ ctx->pow2(sum->z, p2->z, ctx);
+ ctx->mulm(prd->z, p1->z, ctx->a, ctx); /* CTX->A: (a-2)/4 */
+ ctx->mulm(sum->z, sum->z, dif_x, ctx);
+ ctx->addm(prd->z, p1->x, prd->z, ctx);
+ ctx->mulm(prd->z, prd->z, p1->z, ctx);
+}
+
+/* RESULT = P1 + P2 */
+void mpi_ec_add_points(MPI_POINT result,
+ MPI_POINT p1, MPI_POINT p2,
+ struct mpi_ec_ctx *ctx)
+{
+ switch (ctx->model) {
+ case MPI_EC_WEIERSTRASS:
+ add_points_weierstrass(result, p1, p2, ctx);
+ break;
+ case MPI_EC_MONTGOMERY:
+ add_points_montgomery(result, p1, p2, ctx);
+ break;
+ case MPI_EC_EDWARDS:
+ add_points_edwards(result, p1, p2, ctx);
+ break;
+ }
+}
+EXPORT_SYMBOL_GPL(mpi_ec_add_points);
+
+/* Scalar point multiplication - the main function for ECC. If takes
+ * an integer SCALAR and a POINT as well as the usual context CTX.
+ * RESULT will be set to the resulting point.
+ */
+void mpi_ec_mul_point(MPI_POINT result,
+ MPI scalar, MPI_POINT point,
+ struct mpi_ec_ctx *ctx)
+{
+ MPI x1, y1, z1, k, h, yy;
+ unsigned int i, loops;
+ struct gcry_mpi_point p1, p2, p1inv;
+
+ if (ctx->model == MPI_EC_EDWARDS) {
+ /* Simple left to right binary method. Algorithm 3.27 from
+ * {author={Hankerson, Darrel and Menezes, Alfred J. and Vanstone, Scott},
+ * title = {Guide to Elliptic Curve Cryptography},
+ * year = {2003}, isbn = {038795273X},
+ * url = {http://www.cacr.math.uwaterloo.ca/ecc/},
+ * publisher = {Springer-Verlag New York, Inc.}}
+ */
+ unsigned int nbits;
+ int j;
+
+ if (mpi_cmp(scalar, ctx->p) >= 0)
+ nbits = mpi_get_nbits(scalar);
+ else
+ nbits = mpi_get_nbits(ctx->p);
+
+ mpi_set_ui(result->x, 0);
+ mpi_set_ui(result->y, 1);
+ mpi_set_ui(result->z, 1);
+ point_resize(point, ctx);
+
+ point_resize(result, ctx);
+ point_resize(point, ctx);
+
+ for (j = nbits-1; j >= 0; j--) {
+ mpi_ec_dup_point(result, result, ctx);
+ if (mpi_test_bit(scalar, j))
+ mpi_ec_add_points(result, result, point, ctx);
+ }
+ return;
+ } else if (ctx->model == MPI_EC_MONTGOMERY) {
+ unsigned int nbits;
+ int j;
+ struct gcry_mpi_point p1_, p2_;
+ MPI_POINT q1, q2, prd, sum;
+ unsigned long sw;
+ mpi_size_t rsize;
+
+ /* Compute scalar point multiplication with Montgomery Ladder.
+ * Note that we don't use Y-coordinate in the points at all.
+ * RESULT->Y will be filled by zero.
+ */
+
+ nbits = mpi_get_nbits(scalar);
+ point_init(&p1);
+ point_init(&p2);
+ point_init(&p1_);
+ point_init(&p2_);
+ mpi_set_ui(p1.x, 1);
+ mpi_free(p2.x);
+ p2.x = mpi_copy(point->x);
+ mpi_set_ui(p2.z, 1);
+
+ point_resize(&p1, ctx);
+ point_resize(&p2, ctx);
+ point_resize(&p1_, ctx);
+ point_resize(&p2_, ctx);
+
+ mpi_resize(point->x, ctx->p->nlimbs);
+ point->x->nlimbs = ctx->p->nlimbs;
+
+ q1 = &p1;
+ q2 = &p2;
+ prd = &p1_;
+ sum = &p2_;
+
+ for (j = nbits-1; j >= 0; j--) {
+ MPI_POINT t;
+
+ sw = mpi_test_bit(scalar, j);
+ point_swap_cond(q1, q2, sw, ctx);
+ montgomery_ladder(prd, sum, q1, q2, point->x, ctx);
+ point_swap_cond(prd, sum, sw, ctx);
+ t = q1; q1 = prd; prd = t;
+ t = q2; q2 = sum; sum = t;
+ }
+
+ mpi_clear(result->y);
+ sw = (nbits & 1);
+ point_swap_cond(&p1, &p1_, sw, ctx);
+
+ rsize = p1.z->nlimbs;
+ MPN_NORMALIZE(p1.z->d, rsize);
+ if (rsize == 0) {
+ mpi_set_ui(result->x, 1);
+ mpi_set_ui(result->z, 0);
+ } else {
+ z1 = mpi_new(0);
+ ec_invm(z1, p1.z, ctx);
+ ec_mulm(result->x, p1.x, z1, ctx);
+ mpi_set_ui(result->z, 1);
+ mpi_free(z1);
+ }
+
+ point_free(&p1);
+ point_free(&p2);
+ point_free(&p1_);
+ point_free(&p2_);
+ return;
+ }
+
+ x1 = mpi_alloc_like(ctx->p);
+ y1 = mpi_alloc_like(ctx->p);
+ h = mpi_alloc_like(ctx->p);
+ k = mpi_copy(scalar);
+ yy = mpi_copy(point->y);
+
+ if (mpi_has_sign(k)) {
+ k->sign = 0;
+ ec_invm(yy, yy, ctx);
+ }
+
+ if (!mpi_cmp_ui(point->z, 1)) {
+ mpi_set(x1, point->x);
+ mpi_set(y1, yy);
+ } else {
+ MPI z2, z3;
+
+ z2 = mpi_alloc_like(ctx->p);
+ z3 = mpi_alloc_like(ctx->p);
+ ec_mulm(z2, point->z, point->z, ctx);
+ ec_mulm(z3, point->z, z2, ctx);
+ ec_invm(z2, z2, ctx);
+ ec_mulm(x1, point->x, z2, ctx);
+ ec_invm(z3, z3, ctx);
+ ec_mulm(y1, yy, z3, ctx);
+ mpi_free(z2);
+ mpi_free(z3);
+ }
+ z1 = mpi_copy(mpi_const(MPI_C_ONE));
+
+ mpi_mul(h, k, mpi_const(MPI_C_THREE)); /* h = 3k */
+ loops = mpi_get_nbits(h);
+ if (loops < 2) {
+ /* If SCALAR is zero, the above mpi_mul sets H to zero and thus
+ * LOOPs will be zero. To avoid an underflow of I in the main
+ * loop we set LOOP to 2 and the result to (0,0,0).
+ */
+ loops = 2;
+ mpi_clear(result->x);
+ mpi_clear(result->y);
+ mpi_clear(result->z);
+ } else {
+ mpi_set(result->x, point->x);
+ mpi_set(result->y, yy);
+ mpi_set(result->z, point->z);
+ }
+ mpi_free(yy); yy = NULL;
+
+ p1.x = x1; x1 = NULL;
+ p1.y = y1; y1 = NULL;
+ p1.z = z1; z1 = NULL;
+ point_init(&p2);
+ point_init(&p1inv);
+
+ /* Invert point: y = p - y mod p */
+ point_set(&p1inv, &p1);
+ ec_subm(p1inv.y, ctx->p, p1inv.y, ctx);
+
+ for (i = loops-2; i > 0; i--) {
+ mpi_ec_dup_point(result, result, ctx);
+ if (mpi_test_bit(h, i) == 1 && mpi_test_bit(k, i) == 0) {
+ point_set(&p2, result);
+ mpi_ec_add_points(result, &p2, &p1, ctx);
+ }
+ if (mpi_test_bit(h, i) == 0 && mpi_test_bit(k, i) == 1) {
+ point_set(&p2, result);
+ mpi_ec_add_points(result, &p2, &p1inv, ctx);
+ }
+ }
+
+ point_free(&p1);
+ point_free(&p2);
+ point_free(&p1inv);
+ mpi_free(h);
+ mpi_free(k);
+}
+EXPORT_SYMBOL_GPL(mpi_ec_mul_point);
+
+/* Return true if POINT is on the curve described by CTX. */
+int mpi_ec_curve_point(MPI_POINT point, struct mpi_ec_ctx *ctx)
+{
+ int res = 0;
+ MPI x, y, w;
+
+ x = mpi_new(0);
+ y = mpi_new(0);
+ w = mpi_new(0);
+
+ /* Check that the point is in range. This needs to be done here and
+ * not after conversion to affine coordinates.
+ */
+ if (mpi_cmpabs(point->x, ctx->p) >= 0)
+ goto leave;
+ if (mpi_cmpabs(point->y, ctx->p) >= 0)
+ goto leave;
+ if (mpi_cmpabs(point->z, ctx->p) >= 0)
+ goto leave;
+
+ switch (ctx->model) {
+ case MPI_EC_WEIERSTRASS:
+ {
+ MPI xxx;
+
+ if (mpi_ec_get_affine(x, y, point, ctx))
+ goto leave;
+
+ xxx = mpi_new(0);
+
+ /* y^2 == x^3 + a·x + b */
+ ec_pow2(y, y, ctx);
+
+ ec_pow3(xxx, x, ctx);
+ ec_mulm(w, ctx->a, x, ctx);
+ ec_addm(w, w, ctx->b, ctx);
+ ec_addm(w, w, xxx, ctx);
+
+ if (!mpi_cmp(y, w))
+ res = 1;
+
+ mpi_free(xxx);
+ }
+ break;
+
+ case MPI_EC_MONTGOMERY:
+ {
+#define xx y
+ /* With Montgomery curve, only X-coordinate is valid. */
+ if (mpi_ec_get_affine(x, NULL, point, ctx))
+ goto leave;
+
+ /* The equation is: b * y^2 == x^3 + a · x^2 + x */
+ /* We check if right hand is quadratic residue or not by
+ * Euler's criterion.
+ */
+ /* CTX->A has (a-2)/4 and CTX->B has b^-1 */
+ ec_mulm(w, ctx->a, mpi_const(MPI_C_FOUR), ctx);
+ ec_addm(w, w, mpi_const(MPI_C_TWO), ctx);
+ ec_mulm(w, w, x, ctx);
+ ec_pow2(xx, x, ctx);
+ ec_addm(w, w, xx, ctx);
+ ec_addm(w, w, mpi_const(MPI_C_ONE), ctx);
+ ec_mulm(w, w, x, ctx);
+ ec_mulm(w, w, ctx->b, ctx);
+#undef xx
+ /* Compute Euler's criterion: w^(p-1)/2 */
+#define p_minus1 y
+ ec_subm(p_minus1, ctx->p, mpi_const(MPI_C_ONE), ctx);
+ mpi_rshift(p_minus1, p_minus1, 1);
+ ec_powm(w, w, p_minus1, ctx);
+
+ res = !mpi_cmp_ui(w, 1);
+#undef p_minus1
+ }
+ break;
+
+ case MPI_EC_EDWARDS:
+ {
+ if (mpi_ec_get_affine(x, y, point, ctx))
+ goto leave;
+
+ mpi_resize(w, ctx->p->nlimbs);
+ w->nlimbs = ctx->p->nlimbs;
+
+ /* a · x^2 + y^2 - 1 - b · x^2 · y^2 == 0 */
+ ctx->pow2(x, x, ctx);
+ ctx->pow2(y, y, ctx);
+ if (ctx->dialect == ECC_DIALECT_ED25519)
+ ctx->subm(w, ctx->p, x, ctx);
+ else
+ ctx->mulm(w, ctx->a, x, ctx);
+ ctx->addm(w, w, y, ctx);
+ ctx->mulm(x, x, y, ctx);
+ ctx->mulm(x, x, ctx->b, ctx);
+ ctx->subm(w, w, x, ctx);
+ if (!mpi_cmp_ui(w, 1))
+ res = 1;
+ }
+ break;
+ }
+
+leave:
+ mpi_free(w);
+ mpi_free(x);
+ mpi_free(y);
+
+ return res;
+}
+EXPORT_SYMBOL_GPL(mpi_ec_curve_point);
diff --git a/lib/mpi/generic_mpih-add1.c b/lib/crypto/mpi/generic_mpih-add1.c
index 299308b5461c..299308b5461c 100644
--- a/lib/mpi/generic_mpih-add1.c
+++ b/lib/crypto/mpi/generic_mpih-add1.c
diff --git a/lib/mpi/generic_mpih-lshift.c b/lib/crypto/mpi/generic_mpih-lshift.c
index 7b21f5938a50..7b21f5938a50 100644
--- a/lib/mpi/generic_mpih-lshift.c
+++ b/lib/crypto/mpi/generic_mpih-lshift.c
diff --git a/lib/mpi/generic_mpih-mul1.c b/lib/crypto/mpi/generic_mpih-mul1.c
index e020e61d47b9..e020e61d47b9 100644
--- a/lib/mpi/generic_mpih-mul1.c
+++ b/lib/crypto/mpi/generic_mpih-mul1.c
diff --git a/lib/mpi/generic_mpih-mul2.c b/lib/crypto/mpi/generic_mpih-mul2.c
index 9484d8528243..9484d8528243 100644
--- a/lib/mpi/generic_mpih-mul2.c
+++ b/lib/crypto/mpi/generic_mpih-mul2.c
diff --git a/lib/mpi/generic_mpih-mul3.c b/lib/crypto/mpi/generic_mpih-mul3.c
index ccdbab4121e0..ccdbab4121e0 100644
--- a/lib/mpi/generic_mpih-mul3.c
+++ b/lib/crypto/mpi/generic_mpih-mul3.c
diff --git a/lib/mpi/generic_mpih-rshift.c b/lib/crypto/mpi/generic_mpih-rshift.c
index e07bc69aa898..e07bc69aa898 100644
--- a/lib/mpi/generic_mpih-rshift.c
+++ b/lib/crypto/mpi/generic_mpih-rshift.c
diff --git a/lib/mpi/generic_mpih-sub1.c b/lib/crypto/mpi/generic_mpih-sub1.c
index eea4382aad5f..eea4382aad5f 100644
--- a/lib/mpi/generic_mpih-sub1.c
+++ b/lib/crypto/mpi/generic_mpih-sub1.c
diff --git a/lib/mpi/longlong.h b/lib/crypto/mpi/longlong.h
index 08c60d10747f..b6fa1d08fb55 100644
--- a/lib/mpi/longlong.h
+++ b/lib/crypto/mpi/longlong.h
@@ -48,8 +48,8 @@
/* Define auxiliary asm macros.
*
- * 1) umul_ppmm(high_prod, low_prod, multipler, multiplicand) multiplies two
- * UWtype integers MULTIPLER and MULTIPLICAND, and generates a two UWtype
+ * 1) umul_ppmm(high_prod, low_prod, multiplier, multiplicand) multiplies two
+ * UWtype integers MULTIPLIER and MULTIPLICAND, and generates a two UWtype
* word product in HIGH_PROD and LOW_PROD.
*
* 2) __umulsidi3(a,b) multiplies two UWtype integers A and B, and returns a
@@ -397,8 +397,8 @@ do { \
#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
__asm__ ("addl %5,%1\n" \
"adcl %3,%0" \
- : "=r" ((USItype)(sh)), \
- "=&r" ((USItype)(sl)) \
+ : "=r" (sh), \
+ "=&r" (sl) \
: "%0" ((USItype)(ah)), \
"g" ((USItype)(bh)), \
"%1" ((USItype)(al)), \
@@ -406,22 +406,22 @@ do { \
#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
__asm__ ("subl %5,%1\n" \
"sbbl %3,%0" \
- : "=r" ((USItype)(sh)), \
- "=&r" ((USItype)(sl)) \
+ : "=r" (sh), \
+ "=&r" (sl) \
: "0" ((USItype)(ah)), \
"g" ((USItype)(bh)), \
"1" ((USItype)(al)), \
"g" ((USItype)(bl)))
#define umul_ppmm(w1, w0, u, v) \
__asm__ ("mull %3" \
- : "=a" ((USItype)(w0)), \
- "=d" ((USItype)(w1)) \
+ : "=a" (w0), \
+ "=d" (w1) \
: "%0" ((USItype)(u)), \
"rm" ((USItype)(v)))
#define udiv_qrnnd(q, r, n1, n0, d) \
__asm__ ("divl %4" \
- : "=a" ((USItype)(q)), \
- "=d" ((USItype)(r)) \
+ : "=a" (q), \
+ "=d" (r) \
: "0" ((USItype)(n0)), \
"1" ((USItype)(n1)), \
"rm" ((USItype)(d)))
@@ -639,30 +639,12 @@ do { \
************** MIPS *****************
***************************************/
#if defined(__mips__) && W_TYPE_SIZE == 32
-#if (__GNUC__ >= 5) || (__GNUC__ >= 4 && __GNUC_MINOR__ >= 4)
#define umul_ppmm(w1, w0, u, v) \
do { \
UDItype __ll = (UDItype)(u) * (v); \
w1 = __ll >> 32; \
w0 = __ll; \
} while (0)
-#elif __GNUC__ > 2 || __GNUC_MINOR__ >= 7
-#define umul_ppmm(w1, w0, u, v) \
- __asm__ ("multu %2,%3" \
- : "=l" ((USItype)(w0)), \
- "=h" ((USItype)(w1)) \
- : "d" ((USItype)(u)), \
- "d" ((USItype)(v)))
-#else
-#define umul_ppmm(w1, w0, u, v) \
- __asm__ ("multu %2,%3\n" \
- "mflo %0\n" \
- "mfhi %1" \
- : "=d" ((USItype)(w0)), \
- "=d" ((USItype)(w1)) \
- : "d" ((USItype)(u)), \
- "d" ((USItype)(v)))
-#endif
#define UMUL_TIME 10
#define UDIV_TIME 100
#endif /* __mips__ */
@@ -671,7 +653,7 @@ do { \
************** MIPS/64 **************
***************************************/
#if (defined(__mips) && __mips >= 3) && W_TYPE_SIZE == 64
-#if defined(__mips_isa_rev) && __mips_isa_rev >= 6
+#if defined(__mips_isa_rev) && __mips_isa_rev >= 6 && defined(CONFIG_CC_IS_GCC)
/*
* GCC ends up emitting a __multi3 intrinsic call for MIPS64r6 with the plain C
* code below, so we special case MIPS64r6 until the compiler can do better.
@@ -687,7 +669,7 @@ do { \
: "d" ((UDItype)(u)), \
"d" ((UDItype)(v))); \
} while (0)
-#elif (__GNUC__ >= 5) || (__GNUC__ >= 4 && __GNUC_MINOR__ >= 4)
+#else
#define umul_ppmm(w1, w0, u, v) \
do { \
typedef unsigned int __ll_UTItype __attribute__((mode(TI))); \
@@ -695,22 +677,6 @@ do { \
w1 = __ll >> 64; \
w0 = __ll; \
} while (0)
-#elif __GNUC__ > 2 || __GNUC_MINOR__ >= 7
-#define umul_ppmm(w1, w0, u, v) \
- __asm__ ("dmultu %2,%3" \
- : "=l" ((UDItype)(w0)), \
- "=h" ((UDItype)(w1)) \
- : "d" ((UDItype)(u)), \
- "d" ((UDItype)(v)))
-#else
-#define umul_ppmm(w1, w0, u, v) \
- __asm__ ("dmultu %2,%3\n" \
- "mflo %0\n" \
- "mfhi %1" \
- : "=d" ((UDItype)(w0)), \
- "=d" ((UDItype)(w1)) \
- : "d" ((UDItype)(u)), \
- "d" ((UDItype)(v)))
#endif
#define UMUL_TIME 20
#define UDIV_TIME 140
@@ -756,22 +722,22 @@ do { \
do { \
if (__builtin_constant_p(bh) && (bh) == 0) \
__asm__ ("{a%I4|add%I4c} %1,%3,%4\n\t{aze|addze} %0,%2" \
- : "=r" ((USItype)(sh)), \
- "=&r" ((USItype)(sl)) \
+ : "=r" (sh), \
+ "=&r" (sl) \
: "%r" ((USItype)(ah)), \
"%r" ((USItype)(al)), \
"rI" ((USItype)(bl))); \
else if (__builtin_constant_p(bh) && (bh) == ~(USItype) 0) \
__asm__ ("{a%I4|add%I4c} %1,%3,%4\n\t{ame|addme} %0,%2" \
- : "=r" ((USItype)(sh)), \
- "=&r" ((USItype)(sl)) \
+ : "=r" (sh), \
+ "=&r" (sl) \
: "%r" ((USItype)(ah)), \
"%r" ((USItype)(al)), \
"rI" ((USItype)(bl))); \
else \
__asm__ ("{a%I5|add%I5c} %1,%4,%5\n\t{ae|adde} %0,%2,%3" \
- : "=r" ((USItype)(sh)), \
- "=&r" ((USItype)(sl)) \
+ : "=r" (sh), \
+ "=&r" (sl) \
: "%r" ((USItype)(ah)), \
"r" ((USItype)(bh)), \
"%r" ((USItype)(al)), \
@@ -781,36 +747,36 @@ do { \
do { \
if (__builtin_constant_p(ah) && (ah) == 0) \
__asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{sfze|subfze} %0,%2" \
- : "=r" ((USItype)(sh)), \
- "=&r" ((USItype)(sl)) \
+ : "=r" (sh), \
+ "=&r" (sl) \
: "r" ((USItype)(bh)), \
"rI" ((USItype)(al)), \
"r" ((USItype)(bl))); \
else if (__builtin_constant_p(ah) && (ah) == ~(USItype) 0) \
__asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{sfme|subfme} %0,%2" \
- : "=r" ((USItype)(sh)), \
- "=&r" ((USItype)(sl)) \
+ : "=r" (sh), \
+ "=&r" (sl) \
: "r" ((USItype)(bh)), \
"rI" ((USItype)(al)), \
"r" ((USItype)(bl))); \
else if (__builtin_constant_p(bh) && (bh) == 0) \
__asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{ame|addme} %0,%2" \
- : "=r" ((USItype)(sh)), \
- "=&r" ((USItype)(sl)) \
+ : "=r" (sh), \
+ "=&r" (sl) \
: "r" ((USItype)(ah)), \
"rI" ((USItype)(al)), \
"r" ((USItype)(bl))); \
else if (__builtin_constant_p(bh) && (bh) == ~(USItype) 0) \
__asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{aze|addze} %0,%2" \
- : "=r" ((USItype)(sh)), \
- "=&r" ((USItype)(sl)) \
+ : "=r" (sh), \
+ "=&r" (sl) \
: "r" ((USItype)(ah)), \
"rI" ((USItype)(al)), \
"r" ((USItype)(bl))); \
else \
__asm__ ("{sf%I4|subf%I4c} %1,%5,%4\n\t{sfe|subfe} %0,%3,%2" \
- : "=r" ((USItype)(sh)), \
- "=&r" ((USItype)(sl)) \
+ : "=r" (sh), \
+ "=&r" (sl) \
: "r" ((USItype)(ah)), \
"r" ((USItype)(bh)), \
"rI" ((USItype)(al)), \
@@ -821,7 +787,7 @@ do { \
do { \
USItype __m0 = (m0), __m1 = (m1); \
__asm__ ("mulhwu %0,%1,%2" \
- : "=r" ((USItype) ph) \
+ : "=r" (ph) \
: "%r" (__m0), \
"r" (__m1)); \
(pl) = __m0 * __m1; \
diff --git a/lib/crypto/mpi/mpi-add.c b/lib/crypto/mpi/mpi-add.c
new file mode 100644
index 000000000000..9056fc5167fc
--- /dev/null
+++ b/lib/crypto/mpi/mpi-add.c
@@ -0,0 +1,155 @@
+/* mpi-add.c - MPI functions
+ * Copyright (C) 1994, 1996, 1998, 2001, 2002,
+ * 2003 Free Software Foundation, Inc.
+ *
+ * This file is part of Libgcrypt.
+ *
+ * Note: This code is heavily based on the GNU MP Library.
+ * Actually it's the same code with only minor changes in the
+ * way the data is stored; this is to support the abstraction
+ * of an optional secure memory allocation which may be used
+ * to avoid revealing of sensitive data due to paging etc.
+ */
+
+#include "mpi-internal.h"
+
+/****************
+ * Add the unsigned integer V to the mpi-integer U and store the
+ * result in W. U and V may be the same.
+ */
+void mpi_add_ui(MPI w, MPI u, unsigned long v)
+{
+ mpi_ptr_t wp, up;
+ mpi_size_t usize, wsize;
+ int usign, wsign;
+
+ usize = u->nlimbs;
+ usign = u->sign;
+ wsign = 0;
+
+ /* If not space for W (and possible carry), increase space. */
+ wsize = usize + 1;
+ if (w->alloced < wsize)
+ mpi_resize(w, wsize);
+
+ /* These must be after realloc (U may be the same as W). */
+ up = u->d;
+ wp = w->d;
+
+ if (!usize) { /* simple */
+ wp[0] = v;
+ wsize = v ? 1:0;
+ } else if (!usign) { /* mpi is not negative */
+ mpi_limb_t cy;
+ cy = mpihelp_add_1(wp, up, usize, v);
+ wp[usize] = cy;
+ wsize = usize + cy;
+ } else {
+ /* The signs are different. Need exact comparison to determine
+ * which operand to subtract from which.
+ */
+ if (usize == 1 && up[0] < v) {
+ wp[0] = v - up[0];
+ wsize = 1;
+ } else {
+ mpihelp_sub_1(wp, up, usize, v);
+ /* Size can decrease with at most one limb. */
+ wsize = usize - (wp[usize-1] == 0);
+ wsign = 1;
+ }
+ }
+
+ w->nlimbs = wsize;
+ w->sign = wsign;
+}
+
+
+void mpi_add(MPI w, MPI u, MPI v)
+{
+ mpi_ptr_t wp, up, vp;
+ mpi_size_t usize, vsize, wsize;
+ int usign, vsign, wsign;
+
+ if (u->nlimbs < v->nlimbs) { /* Swap U and V. */
+ usize = v->nlimbs;
+ usign = v->sign;
+ vsize = u->nlimbs;
+ vsign = u->sign;
+ wsize = usize + 1;
+ RESIZE_IF_NEEDED(w, wsize);
+ /* These must be after realloc (u or v may be the same as w). */
+ up = v->d;
+ vp = u->d;
+ } else {
+ usize = u->nlimbs;
+ usign = u->sign;
+ vsize = v->nlimbs;
+ vsign = v->sign;
+ wsize = usize + 1;
+ RESIZE_IF_NEEDED(w, wsize);
+ /* These must be after realloc (u or v may be the same as w). */
+ up = u->d;
+ vp = v->d;
+ }
+ wp = w->d;
+ wsign = 0;
+
+ if (!vsize) { /* simple */
+ MPN_COPY(wp, up, usize);
+ wsize = usize;
+ wsign = usign;
+ } else if (usign != vsign) { /* different sign */
+ /* This test is right since USIZE >= VSIZE */
+ if (usize != vsize) {
+ mpihelp_sub(wp, up, usize, vp, vsize);
+ wsize = usize;
+ MPN_NORMALIZE(wp, wsize);
+ wsign = usign;
+ } else if (mpihelp_cmp(up, vp, usize) < 0) {
+ mpihelp_sub_n(wp, vp, up, usize);
+ wsize = usize;
+ MPN_NORMALIZE(wp, wsize);
+ if (!usign)
+ wsign = 1;
+ } else {
+ mpihelp_sub_n(wp, up, vp, usize);
+ wsize = usize;
+ MPN_NORMALIZE(wp, wsize);
+ if (usign)
+ wsign = 1;
+ }
+ } else { /* U and V have same sign. Add them. */
+ mpi_limb_t cy = mpihelp_add(wp, up, usize, vp, vsize);
+ wp[usize] = cy;
+ wsize = usize + cy;
+ if (usign)
+ wsign = 1;
+ }
+
+ w->nlimbs = wsize;
+ w->sign = wsign;
+}
+EXPORT_SYMBOL_GPL(mpi_add);
+
+void mpi_sub(MPI w, MPI u, MPI v)
+{
+ MPI vv = mpi_copy(v);
+ vv->sign = !vv->sign;
+ mpi_add(w, u, vv);
+ mpi_free(vv);
+}
+EXPORT_SYMBOL_GPL(mpi_sub);
+
+void mpi_addm(MPI w, MPI u, MPI v, MPI m)
+{
+ mpi_add(w, u, v);
+ mpi_mod(w, w, m);
+}
+EXPORT_SYMBOL_GPL(mpi_addm);
+
+void mpi_subm(MPI w, MPI u, MPI v, MPI m)
+{
+ mpi_sub(w, u, v);
+ mpi_mod(w, w, m);
+}
+EXPORT_SYMBOL_GPL(mpi_subm);
diff --git a/lib/crypto/mpi/mpi-bit.c b/lib/crypto/mpi/mpi-bit.c
new file mode 100644
index 000000000000..070ba784c9f1
--- /dev/null
+++ b/lib/crypto/mpi/mpi-bit.c
@@ -0,0 +1,308 @@
+/* mpi-bit.c - MPI bit level functions
+ * Copyright (C) 1998, 1999 Free Software Foundation, Inc.
+ *
+ * This file is part of GnuPG.
+ *
+ * GnuPG is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * GnuPG is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
+ */
+
+#include "mpi-internal.h"
+#include "longlong.h"
+
+#define A_LIMB_1 ((mpi_limb_t) 1)
+
+/****************
+ * Sometimes we have MSL (most significant limbs) which are 0;
+ * this is for some reasons not good, so this function removes them.
+ */
+void mpi_normalize(MPI a)
+{
+ for (; a->nlimbs && !a->d[a->nlimbs - 1]; a->nlimbs--)
+ ;
+}
+EXPORT_SYMBOL_GPL(mpi_normalize);
+
+/****************
+ * Return the number of bits in A.
+ */
+unsigned mpi_get_nbits(MPI a)
+{
+ unsigned n;
+
+ mpi_normalize(a);
+
+ if (a->nlimbs) {
+ mpi_limb_t alimb = a->d[a->nlimbs - 1];
+ if (alimb)
+ n = count_leading_zeros(alimb);
+ else
+ n = BITS_PER_MPI_LIMB;
+ n = BITS_PER_MPI_LIMB - n + (a->nlimbs - 1) * BITS_PER_MPI_LIMB;
+ } else
+ n = 0;
+ return n;
+}
+EXPORT_SYMBOL_GPL(mpi_get_nbits);
+
+/****************
+ * Test whether bit N is set.
+ */
+int mpi_test_bit(MPI a, unsigned int n)
+{
+ unsigned int limbno, bitno;
+ mpi_limb_t limb;
+
+ limbno = n / BITS_PER_MPI_LIMB;
+ bitno = n % BITS_PER_MPI_LIMB;
+
+ if (limbno >= a->nlimbs)
+ return 0; /* too far left: this is a 0 */
+ limb = a->d[limbno];
+ return (limb & (A_LIMB_1 << bitno)) ? 1 : 0;
+}
+EXPORT_SYMBOL_GPL(mpi_test_bit);
+
+/****************
+ * Set bit N of A.
+ */
+void mpi_set_bit(MPI a, unsigned int n)
+{
+ unsigned int i, limbno, bitno;
+
+ limbno = n / BITS_PER_MPI_LIMB;
+ bitno = n % BITS_PER_MPI_LIMB;
+
+ if (limbno >= a->nlimbs) {
+ for (i = a->nlimbs; i < a->alloced; i++)
+ a->d[i] = 0;
+ mpi_resize(a, limbno+1);
+ a->nlimbs = limbno+1;
+ }
+ a->d[limbno] |= (A_LIMB_1<<bitno);
+}
+
+/****************
+ * Set bit N of A. and clear all bits above
+ */
+void mpi_set_highbit(MPI a, unsigned int n)
+{
+ unsigned int i, limbno, bitno;
+
+ limbno = n / BITS_PER_MPI_LIMB;
+ bitno = n % BITS_PER_MPI_LIMB;
+
+ if (limbno >= a->nlimbs) {
+ for (i = a->nlimbs; i < a->alloced; i++)
+ a->d[i] = 0;
+ mpi_resize(a, limbno+1);
+ a->nlimbs = limbno+1;
+ }
+ a->d[limbno] |= (A_LIMB_1<<bitno);
+ for (bitno++; bitno < BITS_PER_MPI_LIMB; bitno++)
+ a->d[limbno] &= ~(A_LIMB_1 << bitno);
+ a->nlimbs = limbno+1;
+}
+EXPORT_SYMBOL_GPL(mpi_set_highbit);
+
+/****************
+ * clear bit N of A and all bits above
+ */
+void mpi_clear_highbit(MPI a, unsigned int n)
+{
+ unsigned int limbno, bitno;
+
+ limbno = n / BITS_PER_MPI_LIMB;
+ bitno = n % BITS_PER_MPI_LIMB;
+
+ if (limbno >= a->nlimbs)
+ return; /* not allocated, therefore no need to clear bits :-) */
+
+ for ( ; bitno < BITS_PER_MPI_LIMB; bitno++)
+ a->d[limbno] &= ~(A_LIMB_1 << bitno);
+ a->nlimbs = limbno+1;
+}
+
+/****************
+ * Clear bit N of A.
+ */
+void mpi_clear_bit(MPI a, unsigned int n)
+{
+ unsigned int limbno, bitno;
+
+ limbno = n / BITS_PER_MPI_LIMB;
+ bitno = n % BITS_PER_MPI_LIMB;
+
+ if (limbno >= a->nlimbs)
+ return; /* Don't need to clear this bit, it's far too left. */
+ a->d[limbno] &= ~(A_LIMB_1 << bitno);
+}
+EXPORT_SYMBOL_GPL(mpi_clear_bit);
+
+
+/****************
+ * Shift A by COUNT limbs to the right
+ * This is used only within the MPI library
+ */
+void mpi_rshift_limbs(MPI a, unsigned int count)
+{
+ mpi_ptr_t ap = a->d;
+ mpi_size_t n = a->nlimbs;
+ unsigned int i;
+
+ if (count >= n) {
+ a->nlimbs = 0;
+ return;
+ }
+
+ for (i = 0; i < n - count; i++)
+ ap[i] = ap[i+count];
+ ap[i] = 0;
+ a->nlimbs -= count;
+}
+
+/*
+ * Shift A by N bits to the right.
+ */
+void mpi_rshift(MPI x, MPI a, unsigned int n)
+{
+ mpi_size_t xsize;
+ unsigned int i;
+ unsigned int nlimbs = (n/BITS_PER_MPI_LIMB);
+ unsigned int nbits = (n%BITS_PER_MPI_LIMB);
+
+ if (x == a) {
+ /* In-place operation. */
+ if (nlimbs >= x->nlimbs) {
+ x->nlimbs = 0;
+ return;
+ }
+
+ if (nlimbs) {
+ for (i = 0; i < x->nlimbs - nlimbs; i++)
+ x->d[i] = x->d[i+nlimbs];
+ x->d[i] = 0;
+ x->nlimbs -= nlimbs;
+ }
+ if (x->nlimbs && nbits)
+ mpihelp_rshift(x->d, x->d, x->nlimbs, nbits);
+ } else if (nlimbs) {
+ /* Copy and shift by more or equal bits than in a limb. */
+ xsize = a->nlimbs;
+ x->sign = a->sign;
+ RESIZE_IF_NEEDED(x, xsize);
+ x->nlimbs = xsize;
+ for (i = 0; i < a->nlimbs; i++)
+ x->d[i] = a->d[i];
+ x->nlimbs = i;
+
+ if (nlimbs >= x->nlimbs) {
+ x->nlimbs = 0;
+ return;
+ }
+
+ if (nlimbs) {
+ for (i = 0; i < x->nlimbs - nlimbs; i++)
+ x->d[i] = x->d[i+nlimbs];
+ x->d[i] = 0;
+ x->nlimbs -= nlimbs;
+ }
+
+ if (x->nlimbs && nbits)
+ mpihelp_rshift(x->d, x->d, x->nlimbs, nbits);
+ } else {
+ /* Copy and shift by less than bits in a limb. */
+ xsize = a->nlimbs;
+ x->sign = a->sign;
+ RESIZE_IF_NEEDED(x, xsize);
+ x->nlimbs = xsize;
+
+ if (xsize) {
+ if (nbits)
+ mpihelp_rshift(x->d, a->d, x->nlimbs, nbits);
+ else {
+ /* The rshift helper function is not specified for
+ * NBITS==0, thus we do a plain copy here.
+ */
+ for (i = 0; i < x->nlimbs; i++)
+ x->d[i] = a->d[i];
+ }
+ }
+ }
+ MPN_NORMALIZE(x->d, x->nlimbs);
+}
+EXPORT_SYMBOL_GPL(mpi_rshift);
+
+/****************
+ * Shift A by COUNT limbs to the left
+ * This is used only within the MPI library
+ */
+void mpi_lshift_limbs(MPI a, unsigned int count)
+{
+ mpi_ptr_t ap;
+ int n = a->nlimbs;
+ int i;
+
+ if (!count || !n)
+ return;
+
+ RESIZE_IF_NEEDED(a, n+count);
+
+ ap = a->d;
+ for (i = n-1; i >= 0; i--)
+ ap[i+count] = ap[i];
+ for (i = 0; i < count; i++)
+ ap[i] = 0;
+ a->nlimbs += count;
+}
+
+/*
+ * Shift A by N bits to the left.
+ */
+void mpi_lshift(MPI x, MPI a, unsigned int n)
+{
+ unsigned int nlimbs = (n/BITS_PER_MPI_LIMB);
+ unsigned int nbits = (n%BITS_PER_MPI_LIMB);
+
+ if (x == a && !n)
+ return; /* In-place shift with an amount of zero. */
+
+ if (x != a) {
+ /* Copy A to X. */
+ unsigned int alimbs = a->nlimbs;
+ int asign = a->sign;
+ mpi_ptr_t xp, ap;
+
+ RESIZE_IF_NEEDED(x, alimbs+nlimbs+1);
+ xp = x->d;
+ ap = a->d;
+ MPN_COPY(xp, ap, alimbs);
+ x->nlimbs = alimbs;
+ x->flags = a->flags;
+ x->sign = asign;
+ }
+
+ if (nlimbs && !nbits) {
+ /* Shift a full number of limbs. */
+ mpi_lshift_limbs(x, nlimbs);
+ } else if (n) {
+ /* We use a very dump approach: Shift left by the number of
+ * limbs plus one and than fix it up by an rshift.
+ */
+ mpi_lshift_limbs(x, nlimbs+1);
+ mpi_rshift(x, x, BITS_PER_MPI_LIMB - nbits);
+ }
+
+ MPN_NORMALIZE(x->d, x->nlimbs);
+}
diff --git a/lib/mpi/mpi-cmp.c b/lib/crypto/mpi/mpi-cmp.c
index d25e9e96c310..0835b6213235 100644
--- a/lib/mpi/mpi-cmp.c
+++ b/lib/crypto/mpi/mpi-cmp.c
@@ -25,8 +25,12 @@ int mpi_cmp_ui(MPI u, unsigned long v)
mpi_limb_t limb = v;
mpi_normalize(u);
- if (!u->nlimbs && !limb)
- return 0;
+ if (u->nlimbs == 0) {
+ if (v == 0)
+ return 0;
+ else
+ return -1;
+ }
if (u->sign)
return -1;
if (u->nlimbs > 1)
@@ -41,28 +45,54 @@ int mpi_cmp_ui(MPI u, unsigned long v)
}
EXPORT_SYMBOL_GPL(mpi_cmp_ui);
-int mpi_cmp(MPI u, MPI v)
+static int do_mpi_cmp(MPI u, MPI v, int absmode)
{
- mpi_size_t usize, vsize;
+ mpi_size_t usize;
+ mpi_size_t vsize;
+ int usign;
+ int vsign;
int cmp;
mpi_normalize(u);
mpi_normalize(v);
+
usize = u->nlimbs;
vsize = v->nlimbs;
- if (!u->sign && v->sign)
+ usign = absmode ? 0 : u->sign;
+ vsign = absmode ? 0 : v->sign;
+
+ /* Compare sign bits. */
+
+ if (!usign && vsign)
return 1;
- if (u->sign && !v->sign)
+ if (usign && !vsign)
return -1;
- if (usize != vsize && !u->sign && !v->sign)
+
+ /* U and V are either both positive or both negative. */
+
+ if (usize != vsize && !usign && !vsign)
return usize - vsize;
- if (usize != vsize && u->sign && v->sign)
- return vsize - usize;
+ if (usize != vsize && usign && vsign)
+ return vsize + usize;
if (!usize)
return 0;
cmp = mpihelp_cmp(u->d, v->d, usize);
- if (u->sign)
- return -cmp;
- return cmp;
+ if (!cmp)
+ return 0;
+ if ((cmp < 0?1:0) == (usign?1:0))
+ return 1;
+
+ return -1;
+}
+
+int mpi_cmp(MPI u, MPI v)
+{
+ return do_mpi_cmp(u, v, 0);
}
EXPORT_SYMBOL_GPL(mpi_cmp);
+
+int mpi_cmpabs(MPI u, MPI v)
+{
+ return do_mpi_cmp(u, v, 1);
+}
+EXPORT_SYMBOL_GPL(mpi_cmpabs);
diff --git a/lib/crypto/mpi/mpi-div.c b/lib/crypto/mpi/mpi-div.c
new file mode 100644
index 000000000000..45beab8b9e9e
--- /dev/null
+++ b/lib/crypto/mpi/mpi-div.c
@@ -0,0 +1,234 @@
+/* mpi-div.c - MPI functions
+ * Copyright (C) 1994, 1996, 1998, 2001, 2002,
+ * 2003 Free Software Foundation, Inc.
+ *
+ * This file is part of Libgcrypt.
+ *
+ * Note: This code is heavily based on the GNU MP Library.
+ * Actually it's the same code with only minor changes in the
+ * way the data is stored; this is to support the abstraction
+ * of an optional secure memory allocation which may be used
+ * to avoid revealing of sensitive data due to paging etc.
+ */
+
+#include "mpi-internal.h"
+#include "longlong.h"
+
+void mpi_tdiv_qr(MPI quot, MPI rem, MPI num, MPI den);
+void mpi_fdiv_qr(MPI quot, MPI rem, MPI dividend, MPI divisor);
+
+void mpi_fdiv_r(MPI rem, MPI dividend, MPI divisor)
+{
+ int divisor_sign = divisor->sign;
+ MPI temp_divisor = NULL;
+
+ /* We need the original value of the divisor after the remainder has been
+ * preliminary calculated. We have to copy it to temporary space if it's
+ * the same variable as REM.
+ */
+ if (rem == divisor) {
+ temp_divisor = mpi_copy(divisor);
+ divisor = temp_divisor;
+ }
+
+ mpi_tdiv_r(rem, dividend, divisor);
+
+ if (((divisor_sign?1:0) ^ (dividend->sign?1:0)) && rem->nlimbs)
+ mpi_add(rem, rem, divisor);
+
+ if (temp_divisor)
+ mpi_free(temp_divisor);
+}
+
+void mpi_fdiv_q(MPI quot, MPI dividend, MPI divisor)
+{
+ MPI tmp = mpi_alloc(mpi_get_nlimbs(quot));
+ mpi_fdiv_qr(quot, tmp, dividend, divisor);
+ mpi_free(tmp);
+}
+
+void mpi_fdiv_qr(MPI quot, MPI rem, MPI dividend, MPI divisor)
+{
+ int divisor_sign = divisor->sign;
+ MPI temp_divisor = NULL;
+
+ if (quot == divisor || rem == divisor) {
+ temp_divisor = mpi_copy(divisor);
+ divisor = temp_divisor;
+ }
+
+ mpi_tdiv_qr(quot, rem, dividend, divisor);
+
+ if ((divisor_sign ^ dividend->sign) && rem->nlimbs) {
+ mpi_sub_ui(quot, quot, 1);
+ mpi_add(rem, rem, divisor);
+ }
+
+ if (temp_divisor)
+ mpi_free(temp_divisor);
+}
+
+/* If den == quot, den needs temporary storage.
+ * If den == rem, den needs temporary storage.
+ * If num == quot, num needs temporary storage.
+ * If den has temporary storage, it can be normalized while being copied,
+ * i.e no extra storage should be allocated.
+ */
+
+void mpi_tdiv_r(MPI rem, MPI num, MPI den)
+{
+ mpi_tdiv_qr(NULL, rem, num, den);
+}
+
+void mpi_tdiv_qr(MPI quot, MPI rem, MPI num, MPI den)
+{
+ mpi_ptr_t np, dp;
+ mpi_ptr_t qp, rp;
+ mpi_size_t nsize = num->nlimbs;
+ mpi_size_t dsize = den->nlimbs;
+ mpi_size_t qsize, rsize;
+ mpi_size_t sign_remainder = num->sign;
+ mpi_size_t sign_quotient = num->sign ^ den->sign;
+ unsigned int normalization_steps;
+ mpi_limb_t q_limb;
+ mpi_ptr_t marker[5];
+ int markidx = 0;
+
+ /* Ensure space is enough for quotient and remainder.
+ * We need space for an extra limb in the remainder, because it's
+ * up-shifted (normalized) below.
+ */
+ rsize = nsize + 1;
+ mpi_resize(rem, rsize);
+
+ qsize = rsize - dsize; /* qsize cannot be bigger than this. */
+ if (qsize <= 0) {
+ if (num != rem) {
+ rem->nlimbs = num->nlimbs;
+ rem->sign = num->sign;
+ MPN_COPY(rem->d, num->d, nsize);
+ }
+ if (quot) {
+ /* This needs to follow the assignment to rem, in case the
+ * numerator and quotient are the same.
+ */
+ quot->nlimbs = 0;
+ quot->sign = 0;
+ }
+ return;
+ }
+
+ if (quot)
+ mpi_resize(quot, qsize);
+
+ /* Read pointers here, when reallocation is finished. */
+ np = num->d;
+ dp = den->d;
+ rp = rem->d;
+
+ /* Optimize division by a single-limb divisor. */
+ if (dsize == 1) {
+ mpi_limb_t rlimb;
+ if (quot) {
+ qp = quot->d;
+ rlimb = mpihelp_divmod_1(qp, np, nsize, dp[0]);
+ qsize -= qp[qsize - 1] == 0;
+ quot->nlimbs = qsize;
+ quot->sign = sign_quotient;
+ } else
+ rlimb = mpihelp_mod_1(np, nsize, dp[0]);
+ rp[0] = rlimb;
+ rsize = rlimb != 0?1:0;
+ rem->nlimbs = rsize;
+ rem->sign = sign_remainder;
+ return;
+ }
+
+
+ if (quot) {
+ qp = quot->d;
+ /* Make sure QP and NP point to different objects. Otherwise the
+ * numerator would be gradually overwritten by the quotient limbs.
+ */
+ if (qp == np) { /* Copy NP object to temporary space. */
+ np = marker[markidx++] = mpi_alloc_limb_space(nsize);
+ MPN_COPY(np, qp, nsize);
+ }
+ } else /* Put quotient at top of remainder. */
+ qp = rp + dsize;
+
+ normalization_steps = count_leading_zeros(dp[dsize - 1]);
+
+ /* Normalize the denominator, i.e. make its most significant bit set by
+ * shifting it NORMALIZATION_STEPS bits to the left. Also shift the
+ * numerator the same number of steps (to keep the quotient the same!).
+ */
+ if (normalization_steps) {
+ mpi_ptr_t tp;
+ mpi_limb_t nlimb;
+
+ /* Shift up the denominator setting the most significant bit of
+ * the most significant word. Use temporary storage not to clobber
+ * the original contents of the denominator.
+ */
+ tp = marker[markidx++] = mpi_alloc_limb_space(dsize);
+ mpihelp_lshift(tp, dp, dsize, normalization_steps);
+ dp = tp;
+
+ /* Shift up the numerator, possibly introducing a new most
+ * significant word. Move the shifted numerator in the remainder
+ * meanwhile.
+ */
+ nlimb = mpihelp_lshift(rp, np, nsize, normalization_steps);
+ if (nlimb) {
+ rp[nsize] = nlimb;
+ rsize = nsize + 1;
+ } else
+ rsize = nsize;
+ } else {
+ /* The denominator is already normalized, as required. Copy it to
+ * temporary space if it overlaps with the quotient or remainder.
+ */
+ if (dp == rp || (quot && (dp == qp))) {
+ mpi_ptr_t tp;
+
+ tp = marker[markidx++] = mpi_alloc_limb_space(dsize);
+ MPN_COPY(tp, dp, dsize);
+ dp = tp;
+ }
+
+ /* Move the numerator to the remainder. */
+ if (rp != np)
+ MPN_COPY(rp, np, nsize);
+
+ rsize = nsize;
+ }
+
+ q_limb = mpihelp_divrem(qp, 0, rp, rsize, dp, dsize);
+
+ if (quot) {
+ qsize = rsize - dsize;
+ if (q_limb) {
+ qp[qsize] = q_limb;
+ qsize += 1;
+ }
+
+ quot->nlimbs = qsize;
+ quot->sign = sign_quotient;
+ }
+
+ rsize = dsize;
+ MPN_NORMALIZE(rp, rsize);
+
+ if (normalization_steps && rsize) {
+ mpihelp_rshift(rp, rp, rsize, normalization_steps);
+ rsize -= rp[rsize - 1] == 0?1:0;
+ }
+
+ rem->nlimbs = rsize;
+ rem->sign = sign_remainder;
+ while (markidx) {
+ markidx--;
+ mpi_free_limb_space(marker[markidx]);
+ }
+}
diff --git a/lib/mpi/mpi-inline.h b/lib/crypto/mpi/mpi-inline.h
index 980b6b940953..980b6b940953 100644
--- a/lib/mpi/mpi-inline.h
+++ b/lib/crypto/mpi/mpi-inline.h
diff --git a/lib/mpi/mpi-internal.h b/lib/crypto/mpi/mpi-internal.h
index 91df5f0b70f2..554002182db1 100644
--- a/lib/mpi/mpi-internal.h
+++ b/lib/crypto/mpi/mpi-internal.h
@@ -52,6 +52,12 @@
typedef mpi_limb_t *mpi_ptr_t; /* pointer to a limb */
typedef int mpi_size_t; /* (must be a signed type) */
+#define RESIZE_IF_NEEDED(a, b) \
+ do { \
+ if ((a)->alloced < (b)) \
+ mpi_resize((a), (b)); \
+ } while (0)
+
/* Copy N limbs from S to D. */
#define MPN_COPY(d, s, n) \
do { \
@@ -60,6 +66,14 @@ typedef int mpi_size_t; /* (must be a signed type) */
(d)[_i] = (s)[_i]; \
} while (0)
+#define MPN_COPY_INCR(d, s, n) \
+ do { \
+ mpi_size_t _i; \
+ for (_i = 0; _i < (n); _i++) \
+ (d)[_i] = (s)[_i]; \
+ } while (0)
+
+
#define MPN_COPY_DECR(d, s, n) \
do { \
mpi_size_t _i; \
@@ -92,6 +106,38 @@ typedef int mpi_size_t; /* (must be a signed type) */
mul_n(prodp, up, vp, size, tspace); \
} while (0);
+/* Divide the two-limb number in (NH,,NL) by D, with DI being the largest
+ * limb not larger than (2**(2*BITS_PER_MP_LIMB))/D - (2**BITS_PER_MP_LIMB).
+ * If this would yield overflow, DI should be the largest possible number
+ * (i.e., only ones). For correct operation, the most significant bit of D
+ * has to be set. Put the quotient in Q and the remainder in R.
+ */
+#define UDIV_QRNND_PREINV(q, r, nh, nl, d, di) \
+ do { \
+ mpi_limb_t _ql __maybe_unused; \
+ mpi_limb_t _q, _r; \
+ mpi_limb_t _xh, _xl; \
+ umul_ppmm(_q, _ql, (nh), (di)); \
+ _q += (nh); /* DI is 2**BITS_PER_MPI_LIMB too small */ \
+ umul_ppmm(_xh, _xl, _q, (d)); \
+ sub_ddmmss(_xh, _r, (nh), (nl), _xh, _xl); \
+ if (_xh) { \
+ sub_ddmmss(_xh, _r, _xh, _r, 0, (d)); \
+ _q++; \
+ if (_xh) { \
+ sub_ddmmss(_xh, _r, _xh, _r, 0, (d)); \
+ _q++; \
+ } \
+ } \
+ if (_r >= (d)) { \
+ _r -= (d); \
+ _q++; \
+ } \
+ (r) = _r; \
+ (q) = _q; \
+ } while (0)
+
+
/*-- mpiutil.c --*/
mpi_ptr_t mpi_alloc_limb_space(unsigned nlimbs);
void mpi_free_limb_space(mpi_ptr_t a);
@@ -135,6 +181,8 @@ int mpihelp_mul(mpi_ptr_t prodp, mpi_ptr_t up, mpi_size_t usize,
void mpih_sqr_n_basecase(mpi_ptr_t prodp, mpi_ptr_t up, mpi_size_t size);
void mpih_sqr_n(mpi_ptr_t prodp, mpi_ptr_t up, mpi_size_t size,
mpi_ptr_t tspace);
+void mpihelp_mul_n(mpi_ptr_t prodp,
+ mpi_ptr_t up, mpi_ptr_t vp, mpi_size_t size);
int mpihelp_mul_karatsuba_case(mpi_ptr_t prodp,
mpi_ptr_t up, mpi_size_t usize,
@@ -146,9 +194,14 @@ mpi_limb_t mpihelp_mul_1(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr,
mpi_size_t s1_size, mpi_limb_t s2_limb);
/*-- mpih-div.c --*/
+mpi_limb_t mpihelp_mod_1(mpi_ptr_t dividend_ptr, mpi_size_t dividend_size,
+ mpi_limb_t divisor_limb);
mpi_limb_t mpihelp_divrem(mpi_ptr_t qp, mpi_size_t qextra_limbs,
mpi_ptr_t np, mpi_size_t nsize,
mpi_ptr_t dp, mpi_size_t dsize);
+mpi_limb_t mpihelp_divmod_1(mpi_ptr_t quot_ptr,
+ mpi_ptr_t dividend_ptr, mpi_size_t dividend_size,
+ mpi_limb_t divisor_limb);
/*-- generic_mpih-[lr]shift.c --*/
mpi_limb_t mpihelp_lshift(mpi_ptr_t wp, mpi_ptr_t up, mpi_size_t usize,
diff --git a/lib/crypto/mpi/mpi-inv.c b/lib/crypto/mpi/mpi-inv.c
new file mode 100644
index 000000000000..61e37d18f793
--- /dev/null
+++ b/lib/crypto/mpi/mpi-inv.c
@@ -0,0 +1,143 @@
+/* mpi-inv.c - MPI functions
+ * Copyright (C) 1998, 2001, 2002, 2003 Free Software Foundation, Inc.
+ *
+ * This file is part of Libgcrypt.
+ *
+ * Libgcrypt is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as
+ * published by the Free Software Foundation; either version 2.1 of
+ * the License, or (at your option) any later version.
+ *
+ * Libgcrypt is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "mpi-internal.h"
+
+/****************
+ * Calculate the multiplicative inverse X of A mod N
+ * That is: Find the solution x for
+ * 1 = (a*x) mod n
+ */
+int mpi_invm(MPI x, MPI a, MPI n)
+{
+ /* Extended Euclid's algorithm (See TAOCP Vol II, 4.5.2, Alg X)
+ * modified according to Michael Penk's solution for Exercise 35
+ * with further enhancement
+ */
+ MPI u, v, u1, u2 = NULL, u3, v1, v2 = NULL, v3, t1, t2 = NULL, t3;
+ unsigned int k;
+ int sign;
+ int odd;
+
+ if (!mpi_cmp_ui(a, 0))
+ return 0; /* Inverse does not exists. */
+ if (!mpi_cmp_ui(n, 1))
+ return 0; /* Inverse does not exists. */
+
+ u = mpi_copy(a);
+ v = mpi_copy(n);
+
+ for (k = 0; !mpi_test_bit(u, 0) && !mpi_test_bit(v, 0); k++) {
+ mpi_rshift(u, u, 1);
+ mpi_rshift(v, v, 1);
+ }
+ odd = mpi_test_bit(v, 0);
+
+ u1 = mpi_alloc_set_ui(1);
+ if (!odd)
+ u2 = mpi_alloc_set_ui(0);
+ u3 = mpi_copy(u);
+ v1 = mpi_copy(v);
+ if (!odd) {
+ v2 = mpi_alloc(mpi_get_nlimbs(u));
+ mpi_sub(v2, u1, u); /* U is used as const 1 */
+ }
+ v3 = mpi_copy(v);
+ if (mpi_test_bit(u, 0)) { /* u is odd */
+ t1 = mpi_alloc_set_ui(0);
+ if (!odd) {
+ t2 = mpi_alloc_set_ui(1);
+ t2->sign = 1;
+ }
+ t3 = mpi_copy(v);
+ t3->sign = !t3->sign;
+ goto Y4;
+ } else {
+ t1 = mpi_alloc_set_ui(1);
+ if (!odd)
+ t2 = mpi_alloc_set_ui(0);
+ t3 = mpi_copy(u);
+ }
+
+ do {
+ do {
+ if (!odd) {
+ if (mpi_test_bit(t1, 0) || mpi_test_bit(t2, 0)) {
+ /* one is odd */
+ mpi_add(t1, t1, v);
+ mpi_sub(t2, t2, u);
+ }
+ mpi_rshift(t1, t1, 1);
+ mpi_rshift(t2, t2, 1);
+ mpi_rshift(t3, t3, 1);
+ } else {
+ if (mpi_test_bit(t1, 0))
+ mpi_add(t1, t1, v);
+ mpi_rshift(t1, t1, 1);
+ mpi_rshift(t3, t3, 1);
+ }
+Y4:
+ ;
+ } while (!mpi_test_bit(t3, 0)); /* while t3 is even */
+
+ if (!t3->sign) {
+ mpi_set(u1, t1);
+ if (!odd)
+ mpi_set(u2, t2);
+ mpi_set(u3, t3);
+ } else {
+ mpi_sub(v1, v, t1);
+ sign = u->sign; u->sign = !u->sign;
+ if (!odd)
+ mpi_sub(v2, u, t2);
+ u->sign = sign;
+ sign = t3->sign; t3->sign = !t3->sign;
+ mpi_set(v3, t3);
+ t3->sign = sign;
+ }
+ mpi_sub(t1, u1, v1);
+ if (!odd)
+ mpi_sub(t2, u2, v2);
+ mpi_sub(t3, u3, v3);
+ if (t1->sign) {
+ mpi_add(t1, t1, v);
+ if (!odd)
+ mpi_sub(t2, t2, u);
+ }
+ } while (mpi_cmp_ui(t3, 0)); /* while t3 != 0 */
+ /* mpi_lshift( u3, k ); */
+ mpi_set(x, u1);
+
+ mpi_free(u1);
+ mpi_free(v1);
+ mpi_free(t1);
+ if (!odd) {
+ mpi_free(u2);
+ mpi_free(v2);
+ mpi_free(t2);
+ }
+ mpi_free(u3);
+ mpi_free(v3);
+ mpi_free(t3);
+
+ mpi_free(u);
+ mpi_free(v);
+ return 1;
+}
+EXPORT_SYMBOL_GPL(mpi_invm);
diff --git a/lib/crypto/mpi/mpi-mod.c b/lib/crypto/mpi/mpi-mod.c
new file mode 100644
index 000000000000..54fcc01564d9
--- /dev/null
+++ b/lib/crypto/mpi/mpi-mod.c
@@ -0,0 +1,157 @@
+/* mpi-mod.c - Modular reduction
+ * Copyright (C) 1998, 1999, 2001, 2002, 2003,
+ * 2007 Free Software Foundation, Inc.
+ *
+ * This file is part of Libgcrypt.
+ */
+
+
+#include "mpi-internal.h"
+#include "longlong.h"
+
+/* Context used with Barrett reduction. */
+struct barrett_ctx_s {
+ MPI m; /* The modulus - may not be modified. */
+ int m_copied; /* If true, M needs to be released. */
+ int k;
+ MPI y;
+ MPI r1; /* Helper MPI. */
+ MPI r2; /* Helper MPI. */
+ MPI r3; /* Helper MPI allocated on demand. */
+};
+
+
+
+void mpi_mod(MPI rem, MPI dividend, MPI divisor)
+{
+ mpi_fdiv_r(rem, dividend, divisor);
+}
+
+/* This function returns a new context for Barrett based operations on
+ * the modulus M. This context needs to be released using
+ * _gcry_mpi_barrett_free. If COPY is true M will be transferred to
+ * the context and the user may change M. If COPY is false, M may not
+ * be changed until gcry_mpi_barrett_free has been called.
+ */
+mpi_barrett_t mpi_barrett_init(MPI m, int copy)
+{
+ mpi_barrett_t ctx;
+ MPI tmp;
+
+ mpi_normalize(m);
+ ctx = kcalloc(1, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return NULL;
+
+ if (copy) {
+ ctx->m = mpi_copy(m);
+ ctx->m_copied = 1;
+ } else
+ ctx->m = m;
+
+ ctx->k = mpi_get_nlimbs(m);
+ tmp = mpi_alloc(ctx->k + 1);
+
+ /* Barrett precalculation: y = floor(b^(2k) / m). */
+ mpi_set_ui(tmp, 1);
+ mpi_lshift_limbs(tmp, 2 * ctx->k);
+ mpi_fdiv_q(tmp, tmp, m);
+
+ ctx->y = tmp;
+ ctx->r1 = mpi_alloc(2 * ctx->k + 1);
+ ctx->r2 = mpi_alloc(2 * ctx->k + 1);
+
+ return ctx;
+}
+
+void mpi_barrett_free(mpi_barrett_t ctx)
+{
+ if (ctx) {
+ mpi_free(ctx->y);
+ mpi_free(ctx->r1);
+ mpi_free(ctx->r2);
+ if (ctx->r3)
+ mpi_free(ctx->r3);
+ if (ctx->m_copied)
+ mpi_free(ctx->m);
+ kfree(ctx);
+ }
+}
+
+
+/* R = X mod M
+ *
+ * Using Barrett reduction. Before using this function
+ * _gcry_mpi_barrett_init must have been called to do the
+ * precalculations. CTX is the context created by this precalculation
+ * and also conveys M. If the Barret reduction could no be done a
+ * straightforward reduction method is used.
+ *
+ * We assume that these conditions are met:
+ * Input: x =(x_2k-1 ...x_0)_b
+ * m =(m_k-1 ....m_0)_b with m_k-1 != 0
+ * Output: r = x mod m
+ */
+void mpi_mod_barrett(MPI r, MPI x, mpi_barrett_t ctx)
+{
+ MPI m = ctx->m;
+ int k = ctx->k;
+ MPI y = ctx->y;
+ MPI r1 = ctx->r1;
+ MPI r2 = ctx->r2;
+ int sign;
+
+ mpi_normalize(x);
+ if (mpi_get_nlimbs(x) > 2*k) {
+ mpi_mod(r, x, m);
+ return;
+ }
+
+ sign = x->sign;
+ x->sign = 0;
+
+ /* 1. q1 = floor( x / b^k-1)
+ * q2 = q1 * y
+ * q3 = floor( q2 / b^k+1 )
+ * Actually, we don't need qx, we can work direct on r2
+ */
+ mpi_set(r2, x);
+ mpi_rshift_limbs(r2, k-1);
+ mpi_mul(r2, r2, y);
+ mpi_rshift_limbs(r2, k+1);
+
+ /* 2. r1 = x mod b^k+1
+ * r2 = q3 * m mod b^k+1
+ * r = r1 - r2
+ * 3. if r < 0 then r = r + b^k+1
+ */
+ mpi_set(r1, x);
+ if (r1->nlimbs > k+1) /* Quick modulo operation. */
+ r1->nlimbs = k+1;
+ mpi_mul(r2, r2, m);
+ if (r2->nlimbs > k+1) /* Quick modulo operation. */
+ r2->nlimbs = k+1;
+ mpi_sub(r, r1, r2);
+
+ if (mpi_has_sign(r)) {
+ if (!ctx->r3) {
+ ctx->r3 = mpi_alloc(k + 2);
+ mpi_set_ui(ctx->r3, 1);
+ mpi_lshift_limbs(ctx->r3, k + 1);
+ }
+ mpi_add(r, r, ctx->r3);
+ }
+
+ /* 4. while r >= m do r = r - m */
+ while (mpi_cmp(r, m) >= 0)
+ mpi_sub(r, r, m);
+
+ x->sign = sign;
+}
+
+
+void mpi_mul_barrett(MPI w, MPI u, MPI v, mpi_barrett_t ctx)
+{
+ mpi_mul(w, u, v);
+ mpi_mod_barrett(w, w, ctx);
+}
diff --git a/lib/crypto/mpi/mpi-mul.c b/lib/crypto/mpi/mpi-mul.c
new file mode 100644
index 000000000000..7f4eda8560dc
--- /dev/null
+++ b/lib/crypto/mpi/mpi-mul.c
@@ -0,0 +1,92 @@
+/* mpi-mul.c - MPI functions
+ * Copyright (C) 1994, 1996, 1998, 2001, 2002,
+ * 2003 Free Software Foundation, Inc.
+ *
+ * This file is part of Libgcrypt.
+ *
+ * Note: This code is heavily based on the GNU MP Library.
+ * Actually it's the same code with only minor changes in the
+ * way the data is stored; this is to support the abstraction
+ * of an optional secure memory allocation which may be used
+ * to avoid revealing of sensitive data due to paging etc.
+ */
+
+#include "mpi-internal.h"
+
+void mpi_mul(MPI w, MPI u, MPI v)
+{
+ mpi_size_t usize, vsize, wsize;
+ mpi_ptr_t up, vp, wp;
+ mpi_limb_t cy;
+ int usign, vsign, sign_product;
+ int assign_wp = 0;
+ mpi_ptr_t tmp_limb = NULL;
+
+ if (u->nlimbs < v->nlimbs) {
+ /* Swap U and V. */
+ usize = v->nlimbs;
+ usign = v->sign;
+ up = v->d;
+ vsize = u->nlimbs;
+ vsign = u->sign;
+ vp = u->d;
+ } else {
+ usize = u->nlimbs;
+ usign = u->sign;
+ up = u->d;
+ vsize = v->nlimbs;
+ vsign = v->sign;
+ vp = v->d;
+ }
+ sign_product = usign ^ vsign;
+ wp = w->d;
+
+ /* Ensure W has space enough to store the result. */
+ wsize = usize + vsize;
+ if (w->alloced < wsize) {
+ if (wp == up || wp == vp) {
+ wp = mpi_alloc_limb_space(wsize);
+ assign_wp = 1;
+ } else {
+ mpi_resize(w, wsize);
+ wp = w->d;
+ }
+ } else { /* Make U and V not overlap with W. */
+ if (wp == up) {
+ /* W and U are identical. Allocate temporary space for U. */
+ up = tmp_limb = mpi_alloc_limb_space(usize);
+ /* Is V identical too? Keep it identical with U. */
+ if (wp == vp)
+ vp = up;
+ /* Copy to the temporary space. */
+ MPN_COPY(up, wp, usize);
+ } else if (wp == vp) {
+ /* W and V are identical. Allocate temporary space for V. */
+ vp = tmp_limb = mpi_alloc_limb_space(vsize);
+ /* Copy to the temporary space. */
+ MPN_COPY(vp, wp, vsize);
+ }
+ }
+
+ if (!vsize)
+ wsize = 0;
+ else {
+ mpihelp_mul(wp, up, usize, vp, vsize, &cy);
+ wsize -= cy ? 0:1;
+ }
+
+ if (assign_wp)
+ mpi_assign_limb_space(w, wp, wsize);
+ w->nlimbs = wsize;
+ w->sign = sign_product;
+ if (tmp_limb)
+ mpi_free_limb_space(tmp_limb);
+}
+EXPORT_SYMBOL_GPL(mpi_mul);
+
+void mpi_mulm(MPI w, MPI u, MPI v, MPI m)
+{
+ mpi_mul(w, u, v);
+ mpi_tdiv_r(w, w, m);
+}
+EXPORT_SYMBOL_GPL(mpi_mulm);
diff --git a/lib/mpi/mpi-pow.c b/lib/crypto/mpi/mpi-pow.c
index 2fd7a46d55ec..2fd7a46d55ec 100644
--- a/lib/mpi/mpi-pow.c
+++ b/lib/crypto/mpi/mpi-pow.c
diff --git a/lib/crypto/mpi/mpi-sub-ui.c b/lib/crypto/mpi/mpi-sub-ui.c
new file mode 100644
index 000000000000..b41b082b5f3e
--- /dev/null
+++ b/lib/crypto/mpi/mpi-sub-ui.c
@@ -0,0 +1,78 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* mpi-sub-ui.c - Subtract an unsigned integer from an MPI.
+ *
+ * Copyright 1991, 1993, 1994, 1996, 1999-2002, 2004, 2012, 2013, 2015
+ * Free Software Foundation, Inc.
+ *
+ * This file was based on the GNU MP Library source file:
+ * https://gmplib.org/repo/gmp-6.2/file/510b83519d1c/mpz/aors_ui.h
+ *
+ * The GNU MP Library is free software; you can redistribute it and/or modify
+ * it under the terms of either:
+ *
+ * * the GNU Lesser General Public License as published by the Free
+ * Software Foundation; either version 3 of the License, or (at your
+ * option) any later version.
+ *
+ * or
+ *
+ * * the GNU General Public License as published by the Free Software
+ * Foundation; either version 2 of the License, or (at your option) any
+ * later version.
+ *
+ * or both in parallel, as here.
+ *
+ * The GNU MP Library is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received copies of the GNU General Public License and the
+ * GNU Lesser General Public License along with the GNU MP Library. If not,
+ * see https://www.gnu.org/licenses/.
+ */
+
+#include "mpi-internal.h"
+
+int mpi_sub_ui(MPI w, MPI u, unsigned long vval)
+{
+ if (u->nlimbs == 0) {
+ if (mpi_resize(w, 1) < 0)
+ return -ENOMEM;
+ w->d[0] = vval;
+ w->nlimbs = (vval != 0);
+ w->sign = (vval != 0);
+ return 0;
+ }
+
+ /* If not space for W (and possible carry), increase space. */
+ if (mpi_resize(w, u->nlimbs + 1))
+ return -ENOMEM;
+
+ if (u->sign) {
+ mpi_limb_t cy;
+
+ cy = mpihelp_add_1(w->d, u->d, u->nlimbs, (mpi_limb_t) vval);
+ w->d[u->nlimbs] = cy;
+ w->nlimbs = u->nlimbs + cy;
+ w->sign = 1;
+ } else {
+ /* The signs are different. Need exact comparison to determine
+ * which operand to subtract from which.
+ */
+ if (u->nlimbs == 1 && u->d[0] < vval) {
+ w->d[0] = vval - u->d[0];
+ w->nlimbs = 1;
+ w->sign = 1;
+ } else {
+ mpihelp_sub_1(w->d, u->d, u->nlimbs, (mpi_limb_t) vval);
+ /* Size can decrease with at most one limb. */
+ w->nlimbs = (u->nlimbs - (w->d[u->nlimbs - 1] == 0));
+ w->sign = 0;
+ }
+ }
+
+ mpi_normalize(w);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mpi_sub_ui);
diff --git a/lib/mpi/mpicoder.c b/lib/crypto/mpi/mpicoder.c
index eead4b339466..3cb6bd148fa9 100644
--- a/lib/mpi/mpicoder.c
+++ b/lib/crypto/mpi/mpicoder.c
@@ -25,6 +25,7 @@
#include <linux/string.h>
#include "mpi-internal.h"
+#define MAX_EXTERN_SCAN_BYTES (16*1024*1024)
#define MAX_EXTERN_MPI_BITS 16384
/**
@@ -109,6 +110,112 @@ MPI mpi_read_from_buffer(const void *xbuffer, unsigned *ret_nread)
}
EXPORT_SYMBOL_GPL(mpi_read_from_buffer);
+/****************
+ * Fill the mpi VAL from the hex string in STR.
+ */
+int mpi_fromstr(MPI val, const char *str)
+{
+ int sign = 0;
+ int prepend_zero = 0;
+ int i, j, c, c1, c2;
+ unsigned int nbits, nbytes, nlimbs;
+ mpi_limb_t a;
+
+ if (*str == '-') {
+ sign = 1;
+ str++;
+ }
+
+ /* Skip optional hex prefix. */
+ if (*str == '0' && str[1] == 'x')
+ str += 2;
+
+ nbits = strlen(str);
+ if (nbits > MAX_EXTERN_SCAN_BYTES) {
+ mpi_clear(val);
+ return -EINVAL;
+ }
+ nbits *= 4;
+ if ((nbits % 8))
+ prepend_zero = 1;
+
+ nbytes = (nbits+7) / 8;
+ nlimbs = (nbytes+BYTES_PER_MPI_LIMB-1) / BYTES_PER_MPI_LIMB;
+
+ if (val->alloced < nlimbs)
+ mpi_resize(val, nlimbs);
+
+ i = BYTES_PER_MPI_LIMB - (nbytes % BYTES_PER_MPI_LIMB);
+ i %= BYTES_PER_MPI_LIMB;
+ j = val->nlimbs = nlimbs;
+ val->sign = sign;
+ for (; j > 0; j--) {
+ a = 0;
+ for (; i < BYTES_PER_MPI_LIMB; i++) {
+ if (prepend_zero) {
+ c1 = '0';
+ prepend_zero = 0;
+ } else
+ c1 = *str++;
+
+ if (!c1) {
+ mpi_clear(val);
+ return -EINVAL;
+ }
+ c2 = *str++;
+ if (!c2) {
+ mpi_clear(val);
+ return -EINVAL;
+ }
+ if (c1 >= '0' && c1 <= '9')
+ c = c1 - '0';
+ else if (c1 >= 'a' && c1 <= 'f')
+ c = c1 - 'a' + 10;
+ else if (c1 >= 'A' && c1 <= 'F')
+ c = c1 - 'A' + 10;
+ else {
+ mpi_clear(val);
+ return -EINVAL;
+ }
+ c <<= 4;
+ if (c2 >= '0' && c2 <= '9')
+ c |= c2 - '0';
+ else if (c2 >= 'a' && c2 <= 'f')
+ c |= c2 - 'a' + 10;
+ else if (c2 >= 'A' && c2 <= 'F')
+ c |= c2 - 'A' + 10;
+ else {
+ mpi_clear(val);
+ return -EINVAL;
+ }
+ a <<= 8;
+ a |= c;
+ }
+ i = 0;
+ val->d[j-1] = a;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mpi_fromstr);
+
+MPI mpi_scanval(const char *string)
+{
+ MPI a;
+
+ a = mpi_alloc(0);
+ if (!a)
+ return NULL;
+
+ if (mpi_fromstr(a, string)) {
+ mpi_free(a);
+ return NULL;
+ }
+ mpi_normalize(a);
+ return a;
+}
+EXPORT_SYMBOL_GPL(mpi_scanval);
+
static int count_lzeros(MPI a)
{
mpi_limb_t alimb;
@@ -127,11 +234,11 @@ static int count_lzeros(MPI a)
}
/**
- * mpi_read_buffer() - read MPI to a bufer provided by user (msb first)
+ * mpi_read_buffer() - read MPI to a buffer provided by user (msb first)
*
* @a: a multi precision integer
- * @buf: bufer to which the output will be written to. Needs to be at
- * leaset mpi_get_size(a) long.
+ * @buf: buffer to which the output will be written to. Needs to be at
+ * least mpi_get_size(a) long.
* @buf_len: size of the buf.
* @nbytes: receives the actual length of the data written on success and
* the data to-be-written on -EOVERFLOW in case buf_len was too
@@ -397,7 +504,8 @@ MPI mpi_read_raw_from_sgl(struct scatterlist *sgl, unsigned int nbytes)
while (sg_miter_next(&miter)) {
buff = miter.addr;
- len = miter.length;
+ len = min_t(unsigned, miter.length, nbytes);
+ nbytes -= len;
for (x = 0; x < len; x++) {
a <<= 8;
@@ -413,3 +521,232 @@ MPI mpi_read_raw_from_sgl(struct scatterlist *sgl, unsigned int nbytes)
return val;
}
EXPORT_SYMBOL_GPL(mpi_read_raw_from_sgl);
+
+/* Perform a two's complement operation on buffer P of size N bytes. */
+static void twocompl(unsigned char *p, unsigned int n)
+{
+ int i;
+
+ for (i = n-1; i >= 0 && !p[i]; i--)
+ ;
+ if (i >= 0) {
+ if ((p[i] & 0x01))
+ p[i] = (((p[i] ^ 0xfe) | 0x01) & 0xff);
+ else if ((p[i] & 0x02))
+ p[i] = (((p[i] ^ 0xfc) | 0x02) & 0xfe);
+ else if ((p[i] & 0x04))
+ p[i] = (((p[i] ^ 0xf8) | 0x04) & 0xfc);
+ else if ((p[i] & 0x08))
+ p[i] = (((p[i] ^ 0xf0) | 0x08) & 0xf8);
+ else if ((p[i] & 0x10))
+ p[i] = (((p[i] ^ 0xe0) | 0x10) & 0xf0);
+ else if ((p[i] & 0x20))
+ p[i] = (((p[i] ^ 0xc0) | 0x20) & 0xe0);
+ else if ((p[i] & 0x40))
+ p[i] = (((p[i] ^ 0x80) | 0x40) & 0xc0);
+ else
+ p[i] = 0x80;
+
+ for (i--; i >= 0; i--)
+ p[i] ^= 0xff;
+ }
+}
+
+int mpi_print(enum gcry_mpi_format format, unsigned char *buffer,
+ size_t buflen, size_t *nwritten, MPI a)
+{
+ unsigned int nbits = mpi_get_nbits(a);
+ size_t len;
+ size_t dummy_nwritten;
+ int negative;
+
+ if (!nwritten)
+ nwritten = &dummy_nwritten;
+
+ /* Libgcrypt does no always care to set clear the sign if the value
+ * is 0. For printing this is a bit of a surprise, in particular
+ * because if some of the formats don't support negative numbers but
+ * should be able to print a zero. Thus we need this extra test
+ * for a negative number.
+ */
+ if (a->sign && mpi_cmp_ui(a, 0))
+ negative = 1;
+ else
+ negative = 0;
+
+ len = buflen;
+ *nwritten = 0;
+ if (format == GCRYMPI_FMT_STD) {
+ unsigned char *tmp;
+ int extra = 0;
+ unsigned int n;
+
+ tmp = mpi_get_buffer(a, &n, NULL);
+ if (!tmp)
+ return -EINVAL;
+
+ if (negative) {
+ twocompl(tmp, n);
+ if (!(*tmp & 0x80)) {
+ /* Need to extend the sign. */
+ n++;
+ extra = 2;
+ }
+ } else if (n && (*tmp & 0x80)) {
+ /* Positive but the high bit of the returned buffer is set.
+ * Thus we need to print an extra leading 0x00 so that the
+ * output is interpreted as a positive number.
+ */
+ n++;
+ extra = 1;
+ }
+
+ if (buffer && n > len) {
+ /* The provided buffer is too short. */
+ kfree(tmp);
+ return -E2BIG;
+ }
+ if (buffer) {
+ unsigned char *s = buffer;
+
+ if (extra == 1)
+ *s++ = 0;
+ else if (extra)
+ *s++ = 0xff;
+ memcpy(s, tmp, n-!!extra);
+ }
+ kfree(tmp);
+ *nwritten = n;
+ return 0;
+ } else if (format == GCRYMPI_FMT_USG) {
+ unsigned int n = (nbits + 7)/8;
+
+ /* Note: We ignore the sign for this format. */
+ /* FIXME: for performance reasons we should put this into
+ * mpi_aprint because we can then use the buffer directly.
+ */
+
+ if (buffer && n > len)
+ return -E2BIG;
+ if (buffer) {
+ unsigned char *tmp;
+
+ tmp = mpi_get_buffer(a, &n, NULL);
+ if (!tmp)
+ return -EINVAL;
+ memcpy(buffer, tmp, n);
+ kfree(tmp);
+ }
+ *nwritten = n;
+ return 0;
+ } else if (format == GCRYMPI_FMT_PGP) {
+ unsigned int n = (nbits + 7)/8;
+
+ /* The PGP format can only handle unsigned integers. */
+ if (negative)
+ return -EINVAL;
+
+ if (buffer && n+2 > len)
+ return -E2BIG;
+
+ if (buffer) {
+ unsigned char *tmp;
+ unsigned char *s = buffer;
+
+ s[0] = nbits >> 8;
+ s[1] = nbits;
+
+ tmp = mpi_get_buffer(a, &n, NULL);
+ if (!tmp)
+ return -EINVAL;
+ memcpy(s+2, tmp, n);
+ kfree(tmp);
+ }
+ *nwritten = n+2;
+ return 0;
+ } else if (format == GCRYMPI_FMT_SSH) {
+ unsigned char *tmp;
+ int extra = 0;
+ unsigned int n;
+
+ tmp = mpi_get_buffer(a, &n, NULL);
+ if (!tmp)
+ return -EINVAL;
+
+ if (negative) {
+ twocompl(tmp, n);
+ if (!(*tmp & 0x80)) {
+ /* Need to extend the sign. */
+ n++;
+ extra = 2;
+ }
+ } else if (n && (*tmp & 0x80)) {
+ n++;
+ extra = 1;
+ }
+
+ if (buffer && n+4 > len) {
+ kfree(tmp);
+ return -E2BIG;
+ }
+
+ if (buffer) {
+ unsigned char *s = buffer;
+
+ *s++ = n >> 24;
+ *s++ = n >> 16;
+ *s++ = n >> 8;
+ *s++ = n;
+ if (extra == 1)
+ *s++ = 0;
+ else if (extra)
+ *s++ = 0xff;
+ memcpy(s, tmp, n-!!extra);
+ }
+ kfree(tmp);
+ *nwritten = 4+n;
+ return 0;
+ } else if (format == GCRYMPI_FMT_HEX) {
+ unsigned char *tmp;
+ int i;
+ int extra = 0;
+ unsigned int n = 0;
+
+ tmp = mpi_get_buffer(a, &n, NULL);
+ if (!tmp)
+ return -EINVAL;
+ if (!n || (*tmp & 0x80))
+ extra = 2;
+
+ if (buffer && 2*n + extra + negative + 1 > len) {
+ kfree(tmp);
+ return -E2BIG;
+ }
+ if (buffer) {
+ unsigned char *s = buffer;
+
+ if (negative)
+ *s++ = '-';
+ if (extra) {
+ *s++ = '0';
+ *s++ = '0';
+ }
+
+ for (i = 0; i < n; i++) {
+ unsigned int c = tmp[i];
+
+ *s++ = (c >> 4) < 10 ? '0'+(c>>4) : 'A'+(c>>4)-10;
+ c &= 15;
+ *s++ = c < 10 ? '0'+c : 'A'+c-10;
+ }
+ *s++ = 0;
+ *nwritten = s - buffer;
+ } else {
+ *nwritten = 2*n + extra + negative + 1;
+ }
+ kfree(tmp);
+ return 0;
+ } else
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(mpi_print);
diff --git a/lib/mpi/mpih-cmp.c b/lib/crypto/mpi/mpih-cmp.c
index f23709114a65..f23709114a65 100644
--- a/lib/mpi/mpih-cmp.c
+++ b/lib/crypto/mpi/mpih-cmp.c
diff --git a/lib/crypto/mpi/mpih-div.c b/lib/crypto/mpi/mpih-div.c
new file mode 100644
index 000000000000..be70ee2e42d3
--- /dev/null
+++ b/lib/crypto/mpi/mpih-div.c
@@ -0,0 +1,517 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* mpihelp-div.c - MPI helper functions
+ * Copyright (C) 1994, 1996 Free Software Foundation, Inc.
+ * Copyright (C) 1998, 1999 Free Software Foundation, Inc.
+ *
+ * This file is part of GnuPG.
+ *
+ * Note: This code is heavily based on the GNU MP Library.
+ * Actually it's the same code with only minor changes in the
+ * way the data is stored; this is to support the abstraction
+ * of an optional secure memory allocation which may be used
+ * to avoid revealing of sensitive data due to paging etc.
+ * The GNU MP Library itself is published under the LGPL;
+ * however I decided to publish this code under the plain GPL.
+ */
+
+#include "mpi-internal.h"
+#include "longlong.h"
+
+#ifndef UMUL_TIME
+#define UMUL_TIME 1
+#endif
+#ifndef UDIV_TIME
+#define UDIV_TIME UMUL_TIME
+#endif
+
+
+mpi_limb_t
+mpihelp_mod_1(mpi_ptr_t dividend_ptr, mpi_size_t dividend_size,
+ mpi_limb_t divisor_limb)
+{
+ mpi_size_t i;
+ mpi_limb_t n1, n0, r;
+ mpi_limb_t dummy __maybe_unused;
+
+ /* Botch: Should this be handled at all? Rely on callers? */
+ if (!dividend_size)
+ return 0;
+
+ /* If multiplication is much faster than division, and the
+ * dividend is large, pre-invert the divisor, and use
+ * only multiplications in the inner loop.
+ *
+ * This test should be read:
+ * Does it ever help to use udiv_qrnnd_preinv?
+ * && Does what we save compensate for the inversion overhead?
+ */
+ if (UDIV_TIME > (2 * UMUL_TIME + 6)
+ && (UDIV_TIME - (2 * UMUL_TIME + 6)) * dividend_size > UDIV_TIME) {
+ int normalization_steps;
+
+ normalization_steps = count_leading_zeros(divisor_limb);
+ if (normalization_steps) {
+ mpi_limb_t divisor_limb_inverted;
+
+ divisor_limb <<= normalization_steps;
+
+ /* Compute (2**2N - 2**N * DIVISOR_LIMB) / DIVISOR_LIMB. The
+ * result is a (N+1)-bit approximation to 1/DIVISOR_LIMB, with the
+ * most significant bit (with weight 2**N) implicit.
+ *
+ * Special case for DIVISOR_LIMB == 100...000.
+ */
+ if (!(divisor_limb << 1))
+ divisor_limb_inverted = ~(mpi_limb_t)0;
+ else
+ udiv_qrnnd(divisor_limb_inverted, dummy,
+ -divisor_limb, 0, divisor_limb);
+
+ n1 = dividend_ptr[dividend_size - 1];
+ r = n1 >> (BITS_PER_MPI_LIMB - normalization_steps);
+
+ /* Possible optimization:
+ * if (r == 0
+ * && divisor_limb > ((n1 << normalization_steps)
+ * | (dividend_ptr[dividend_size - 2] >> ...)))
+ * ...one division less...
+ */
+ for (i = dividend_size - 2; i >= 0; i--) {
+ n0 = dividend_ptr[i];
+ UDIV_QRNND_PREINV(dummy, r, r,
+ ((n1 << normalization_steps)
+ | (n0 >> (BITS_PER_MPI_LIMB - normalization_steps))),
+ divisor_limb, divisor_limb_inverted);
+ n1 = n0;
+ }
+ UDIV_QRNND_PREINV(dummy, r, r,
+ n1 << normalization_steps,
+ divisor_limb, divisor_limb_inverted);
+ return r >> normalization_steps;
+ } else {
+ mpi_limb_t divisor_limb_inverted;
+
+ /* Compute (2**2N - 2**N * DIVISOR_LIMB) / DIVISOR_LIMB. The
+ * result is a (N+1)-bit approximation to 1/DIVISOR_LIMB, with the
+ * most significant bit (with weight 2**N) implicit.
+ *
+ * Special case for DIVISOR_LIMB == 100...000.
+ */
+ if (!(divisor_limb << 1))
+ divisor_limb_inverted = ~(mpi_limb_t)0;
+ else
+ udiv_qrnnd(divisor_limb_inverted, dummy,
+ -divisor_limb, 0, divisor_limb);
+
+ i = dividend_size - 1;
+ r = dividend_ptr[i];
+
+ if (r >= divisor_limb)
+ r = 0;
+ else
+ i--;
+
+ for ( ; i >= 0; i--) {
+ n0 = dividend_ptr[i];
+ UDIV_QRNND_PREINV(dummy, r, r,
+ n0, divisor_limb, divisor_limb_inverted);
+ }
+ return r;
+ }
+ } else {
+ if (UDIV_NEEDS_NORMALIZATION) {
+ int normalization_steps;
+
+ normalization_steps = count_leading_zeros(divisor_limb);
+ if (normalization_steps) {
+ divisor_limb <<= normalization_steps;
+
+ n1 = dividend_ptr[dividend_size - 1];
+ r = n1 >> (BITS_PER_MPI_LIMB - normalization_steps);
+
+ /* Possible optimization:
+ * if (r == 0
+ * && divisor_limb > ((n1 << normalization_steps)
+ * | (dividend_ptr[dividend_size - 2] >> ...)))
+ * ...one division less...
+ */
+ for (i = dividend_size - 2; i >= 0; i--) {
+ n0 = dividend_ptr[i];
+ udiv_qrnnd(dummy, r, r,
+ ((n1 << normalization_steps)
+ | (n0 >> (BITS_PER_MPI_LIMB - normalization_steps))),
+ divisor_limb);
+ n1 = n0;
+ }
+ udiv_qrnnd(dummy, r, r,
+ n1 << normalization_steps,
+ divisor_limb);
+ return r >> normalization_steps;
+ }
+ }
+ /* No normalization needed, either because udiv_qrnnd doesn't require
+ * it, or because DIVISOR_LIMB is already normalized.
+ */
+ i = dividend_size - 1;
+ r = dividend_ptr[i];
+
+ if (r >= divisor_limb)
+ r = 0;
+ else
+ i--;
+
+ for (; i >= 0; i--) {
+ n0 = dividend_ptr[i];
+ udiv_qrnnd(dummy, r, r, n0, divisor_limb);
+ }
+ return r;
+ }
+}
+
+/* Divide num (NP/NSIZE) by den (DP/DSIZE) and write
+ * the NSIZE-DSIZE least significant quotient limbs at QP
+ * and the DSIZE long remainder at NP. If QEXTRA_LIMBS is
+ * non-zero, generate that many fraction bits and append them after the
+ * other quotient limbs.
+ * Return the most significant limb of the quotient, this is always 0 or 1.
+ *
+ * Preconditions:
+ * 0. NSIZE >= DSIZE.
+ * 1. The most significant bit of the divisor must be set.
+ * 2. QP must either not overlap with the input operands at all, or
+ * QP + DSIZE >= NP must hold true. (This means that it's
+ * possible to put the quotient in the high part of NUM, right after the
+ * remainder in NUM.
+ * 3. NSIZE >= DSIZE, even if QEXTRA_LIMBS is non-zero.
+ */
+
+mpi_limb_t
+mpihelp_divrem(mpi_ptr_t qp, mpi_size_t qextra_limbs,
+ mpi_ptr_t np, mpi_size_t nsize, mpi_ptr_t dp, mpi_size_t dsize)
+{
+ mpi_limb_t most_significant_q_limb = 0;
+
+ switch (dsize) {
+ case 0:
+ /* We are asked to divide by zero, so go ahead and do it! (To make
+ the compiler not remove this statement, return the value.) */
+ /*
+ * existing clients of this function have been modified
+ * not to call it with dsize == 0, so this should not happen
+ */
+ return 1 / dsize;
+
+ case 1:
+ {
+ mpi_size_t i;
+ mpi_limb_t n1;
+ mpi_limb_t d;
+
+ d = dp[0];
+ n1 = np[nsize - 1];
+
+ if (n1 >= d) {
+ n1 -= d;
+ most_significant_q_limb = 1;
+ }
+
+ qp += qextra_limbs;
+ for (i = nsize - 2; i >= 0; i--)
+ udiv_qrnnd(qp[i], n1, n1, np[i], d);
+ qp -= qextra_limbs;
+
+ for (i = qextra_limbs - 1; i >= 0; i--)
+ udiv_qrnnd(qp[i], n1, n1, 0, d);
+
+ np[0] = n1;
+ }
+ break;
+
+ case 2:
+ {
+ mpi_size_t i;
+ mpi_limb_t n1, n0, n2;
+ mpi_limb_t d1, d0;
+
+ np += nsize - 2;
+ d1 = dp[1];
+ d0 = dp[0];
+ n1 = np[1];
+ n0 = np[0];
+
+ if (n1 >= d1 && (n1 > d1 || n0 >= d0)) {
+ sub_ddmmss(n1, n0, n1, n0, d1, d0);
+ most_significant_q_limb = 1;
+ }
+
+ for (i = qextra_limbs + nsize - 2 - 1; i >= 0; i--) {
+ mpi_limb_t q;
+ mpi_limb_t r;
+
+ if (i >= qextra_limbs)
+ np--;
+ else
+ np[0] = 0;
+
+ if (n1 == d1) {
+ /* Q should be either 111..111 or 111..110. Need special
+ * treatment of this rare case as normal division would
+ * give overflow. */
+ q = ~(mpi_limb_t) 0;
+
+ r = n0 + d1;
+ if (r < d1) { /* Carry in the addition? */
+ add_ssaaaa(n1, n0, r - d0,
+ np[0], 0, d0);
+ qp[i] = q;
+ continue;
+ }
+ n1 = d0 - (d0 != 0 ? 1 : 0);
+ n0 = -d0;
+ } else {
+ udiv_qrnnd(q, r, n1, n0, d1);
+ umul_ppmm(n1, n0, d0, q);
+ }
+
+ n2 = np[0];
+q_test:
+ if (n1 > r || (n1 == r && n0 > n2)) {
+ /* The estimated Q was too large. */
+ q--;
+ sub_ddmmss(n1, n0, n1, n0, 0, d0);
+ r += d1;
+ if (r >= d1) /* If not carry, test Q again. */
+ goto q_test;
+ }
+
+ qp[i] = q;
+ sub_ddmmss(n1, n0, r, n2, n1, n0);
+ }
+ np[1] = n1;
+ np[0] = n0;
+ }
+ break;
+
+ default:
+ {
+ mpi_size_t i;
+ mpi_limb_t dX, d1, n0;
+
+ np += nsize - dsize;
+ dX = dp[dsize - 1];
+ d1 = dp[dsize - 2];
+ n0 = np[dsize - 1];
+
+ if (n0 >= dX) {
+ if (n0 > dX
+ || mpihelp_cmp(np, dp, dsize - 1) >= 0) {
+ mpihelp_sub_n(np, np, dp, dsize);
+ n0 = np[dsize - 1];
+ most_significant_q_limb = 1;
+ }
+ }
+
+ for (i = qextra_limbs + nsize - dsize - 1; i >= 0; i--) {
+ mpi_limb_t q;
+ mpi_limb_t n1, n2;
+ mpi_limb_t cy_limb;
+
+ if (i >= qextra_limbs) {
+ np--;
+ n2 = np[dsize];
+ } else {
+ n2 = np[dsize - 1];
+ MPN_COPY_DECR(np + 1, np, dsize - 1);
+ np[0] = 0;
+ }
+
+ if (n0 == dX) {
+ /* This might over-estimate q, but it's probably not worth
+ * the extra code here to find out. */
+ q = ~(mpi_limb_t) 0;
+ } else {
+ mpi_limb_t r;
+
+ udiv_qrnnd(q, r, n0, np[dsize - 1], dX);
+ umul_ppmm(n1, n0, d1, q);
+
+ while (n1 > r
+ || (n1 == r
+ && n0 > np[dsize - 2])) {
+ q--;
+ r += dX;
+ if (r < dX) /* I.e. "carry in previous addition?" */
+ break;
+ n1 -= n0 < d1;
+ n0 -= d1;
+ }
+ }
+
+ /* Possible optimization: We already have (q * n0) and (1 * n1)
+ * after the calculation of q. Taking advantage of that, we
+ * could make this loop make two iterations less. */
+ cy_limb = mpihelp_submul_1(np, dp, dsize, q);
+
+ if (n2 != cy_limb) {
+ mpihelp_add_n(np, np, dp, dsize);
+ q--;
+ }
+
+ qp[i] = q;
+ n0 = np[dsize - 1];
+ }
+ }
+ }
+
+ return most_significant_q_limb;
+}
+
+/****************
+ * Divide (DIVIDEND_PTR,,DIVIDEND_SIZE) by DIVISOR_LIMB.
+ * Write DIVIDEND_SIZE limbs of quotient at QUOT_PTR.
+ * Return the single-limb remainder.
+ * There are no constraints on the value of the divisor.
+ *
+ * QUOT_PTR and DIVIDEND_PTR might point to the same limb.
+ */
+
+mpi_limb_t
+mpihelp_divmod_1(mpi_ptr_t quot_ptr,
+ mpi_ptr_t dividend_ptr, mpi_size_t dividend_size,
+ mpi_limb_t divisor_limb)
+{
+ mpi_size_t i;
+ mpi_limb_t n1, n0, r;
+ mpi_limb_t dummy __maybe_unused;
+
+ if (!dividend_size)
+ return 0;
+
+ /* If multiplication is much faster than division, and the
+ * dividend is large, pre-invert the divisor, and use
+ * only multiplications in the inner loop.
+ *
+ * This test should be read:
+ * Does it ever help to use udiv_qrnnd_preinv?
+ * && Does what we save compensate for the inversion overhead?
+ */
+ if (UDIV_TIME > (2 * UMUL_TIME + 6)
+ && (UDIV_TIME - (2 * UMUL_TIME + 6)) * dividend_size > UDIV_TIME) {
+ int normalization_steps;
+
+ normalization_steps = count_leading_zeros(divisor_limb);
+ if (normalization_steps) {
+ mpi_limb_t divisor_limb_inverted;
+
+ divisor_limb <<= normalization_steps;
+
+ /* Compute (2**2N - 2**N * DIVISOR_LIMB) / DIVISOR_LIMB. The
+ * result is a (N+1)-bit approximation to 1/DIVISOR_LIMB, with the
+ * most significant bit (with weight 2**N) implicit.
+ */
+ /* Special case for DIVISOR_LIMB == 100...000. */
+ if (!(divisor_limb << 1))
+ divisor_limb_inverted = ~(mpi_limb_t)0;
+ else
+ udiv_qrnnd(divisor_limb_inverted, dummy,
+ -divisor_limb, 0, divisor_limb);
+
+ n1 = dividend_ptr[dividend_size - 1];
+ r = n1 >> (BITS_PER_MPI_LIMB - normalization_steps);
+
+ /* Possible optimization:
+ * if (r == 0
+ * && divisor_limb > ((n1 << normalization_steps)
+ * | (dividend_ptr[dividend_size - 2] >> ...)))
+ * ...one division less...
+ */
+ for (i = dividend_size - 2; i >= 0; i--) {
+ n0 = dividend_ptr[i];
+ UDIV_QRNND_PREINV(quot_ptr[i + 1], r, r,
+ ((n1 << normalization_steps)
+ | (n0 >> (BITS_PER_MPI_LIMB - normalization_steps))),
+ divisor_limb, divisor_limb_inverted);
+ n1 = n0;
+ }
+ UDIV_QRNND_PREINV(quot_ptr[0], r, r,
+ n1 << normalization_steps,
+ divisor_limb, divisor_limb_inverted);
+ return r >> normalization_steps;
+ } else {
+ mpi_limb_t divisor_limb_inverted;
+
+ /* Compute (2**2N - 2**N * DIVISOR_LIMB) / DIVISOR_LIMB. The
+ * result is a (N+1)-bit approximation to 1/DIVISOR_LIMB, with the
+ * most significant bit (with weight 2**N) implicit.
+ */
+ /* Special case for DIVISOR_LIMB == 100...000. */
+ if (!(divisor_limb << 1))
+ divisor_limb_inverted = ~(mpi_limb_t) 0;
+ else
+ udiv_qrnnd(divisor_limb_inverted, dummy,
+ -divisor_limb, 0, divisor_limb);
+
+ i = dividend_size - 1;
+ r = dividend_ptr[i];
+
+ if (r >= divisor_limb)
+ r = 0;
+ else
+ quot_ptr[i--] = 0;
+
+ for ( ; i >= 0; i--) {
+ n0 = dividend_ptr[i];
+ UDIV_QRNND_PREINV(quot_ptr[i], r, r,
+ n0, divisor_limb, divisor_limb_inverted);
+ }
+ return r;
+ }
+ } else {
+ if (UDIV_NEEDS_NORMALIZATION) {
+ int normalization_steps;
+
+ normalization_steps = count_leading_zeros(divisor_limb);
+ if (normalization_steps) {
+ divisor_limb <<= normalization_steps;
+
+ n1 = dividend_ptr[dividend_size - 1];
+ r = n1 >> (BITS_PER_MPI_LIMB - normalization_steps);
+
+ /* Possible optimization:
+ * if (r == 0
+ * && divisor_limb > ((n1 << normalization_steps)
+ * | (dividend_ptr[dividend_size - 2] >> ...)))
+ * ...one division less...
+ */
+ for (i = dividend_size - 2; i >= 0; i--) {
+ n0 = dividend_ptr[i];
+ udiv_qrnnd(quot_ptr[i + 1], r, r,
+ ((n1 << normalization_steps)
+ | (n0 >> (BITS_PER_MPI_LIMB - normalization_steps))),
+ divisor_limb);
+ n1 = n0;
+ }
+ udiv_qrnnd(quot_ptr[0], r, r,
+ n1 << normalization_steps,
+ divisor_limb);
+ return r >> normalization_steps;
+ }
+ }
+ /* No normalization needed, either because udiv_qrnnd doesn't require
+ * it, or because DIVISOR_LIMB is already normalized.
+ */
+ i = dividend_size - 1;
+ r = dividend_ptr[i];
+
+ if (r >= divisor_limb)
+ r = 0;
+ else
+ quot_ptr[i--] = 0;
+
+ for (; i >= 0; i--) {
+ n0 = dividend_ptr[i];
+ udiv_qrnnd(quot_ptr[i], r, r, n0, divisor_limb);
+ }
+ return r;
+ }
+}
diff --git a/lib/mpi/mpih-mul.c b/lib/crypto/mpi/mpih-mul.c
index a93647564054..e5f1c84e3c48 100644
--- a/lib/mpi/mpih-mul.c
+++ b/lib/crypto/mpi/mpih-mul.c
@@ -317,6 +317,31 @@ mpih_sqr_n(mpi_ptr_t prodp, mpi_ptr_t up, mpi_size_t size, mpi_ptr_t tspace)
}
}
+
+void mpihelp_mul_n(mpi_ptr_t prodp,
+ mpi_ptr_t up, mpi_ptr_t vp, mpi_size_t size)
+{
+ if (up == vp) {
+ if (size < KARATSUBA_THRESHOLD)
+ mpih_sqr_n_basecase(prodp, up, size);
+ else {
+ mpi_ptr_t tspace;
+ tspace = mpi_alloc_limb_space(2 * size);
+ mpih_sqr_n(prodp, up, size, tspace);
+ mpi_free_limb_space(tspace);
+ }
+ } else {
+ if (size < KARATSUBA_THRESHOLD)
+ mul_n_basecase(prodp, up, vp, size);
+ else {
+ mpi_ptr_t tspace;
+ tspace = mpi_alloc_limb_space(2 * size);
+ mul_n(prodp, up, vp, size, tspace);
+ mpi_free_limb_space(tspace);
+ }
+ }
+}
+
int
mpihelp_mul_karatsuba_case(mpi_ptr_t prodp,
mpi_ptr_t up, mpi_size_t usize,
diff --git a/lib/crypto/mpi/mpiutil.c b/lib/crypto/mpi/mpiutil.c
new file mode 100644
index 000000000000..aa8c46544af8
--- /dev/null
+++ b/lib/crypto/mpi/mpiutil.c
@@ -0,0 +1,330 @@
+/* mpiutil.ac - Utility functions for MPI
+ * Copyright (C) 1998, 1999 Free Software Foundation, Inc.
+ *
+ * This file is part of GnuPG.
+ *
+ * GnuPG is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * GnuPG is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
+ */
+
+#include "mpi-internal.h"
+
+/* Constants allocated right away at startup. */
+static MPI constants[MPI_NUMBER_OF_CONSTANTS];
+
+/* Initialize the MPI subsystem. This is called early and allows to
+ * do some initialization without taking care of threading issues.
+ */
+static int __init mpi_init(void)
+{
+ int idx;
+ unsigned long value;
+
+ for (idx = 0; idx < MPI_NUMBER_OF_CONSTANTS; idx++) {
+ switch (idx) {
+ case MPI_C_ZERO:
+ value = 0;
+ break;
+ case MPI_C_ONE:
+ value = 1;
+ break;
+ case MPI_C_TWO:
+ value = 2;
+ break;
+ case MPI_C_THREE:
+ value = 3;
+ break;
+ case MPI_C_FOUR:
+ value = 4;
+ break;
+ case MPI_C_EIGHT:
+ value = 8;
+ break;
+ default:
+ pr_err("MPI: invalid mpi_const selector %d\n", idx);
+ return -EFAULT;
+ }
+ constants[idx] = mpi_alloc_set_ui(value);
+ constants[idx]->flags = (16|32);
+ }
+
+ return 0;
+}
+postcore_initcall(mpi_init);
+
+/* Return a constant MPI descripbed by NO which is one of the
+ * MPI_C_xxx macros. There is no need to copy this returned value; it
+ * may be used directly.
+ */
+MPI mpi_const(enum gcry_mpi_constants no)
+{
+ if ((int)no < 0 || no > MPI_NUMBER_OF_CONSTANTS)
+ pr_err("MPI: invalid mpi_const selector %d\n", no);
+ if (!constants[no])
+ pr_err("MPI: MPI subsystem not initialized\n");
+ return constants[no];
+}
+EXPORT_SYMBOL_GPL(mpi_const);
+
+/****************
+ * Note: It was a bad idea to use the number of limbs to allocate
+ * because on a alpha the limbs are large but we normally need
+ * integers of n bits - So we should change this to bits (or bytes).
+ *
+ * But mpi_alloc is used in a lot of places :-)
+ */
+MPI mpi_alloc(unsigned nlimbs)
+{
+ MPI a;
+
+ a = kmalloc(sizeof *a, GFP_KERNEL);
+ if (!a)
+ return a;
+
+ if (nlimbs) {
+ a->d = mpi_alloc_limb_space(nlimbs);
+ if (!a->d) {
+ kfree(a);
+ return NULL;
+ }
+ } else {
+ a->d = NULL;
+ }
+
+ a->alloced = nlimbs;
+ a->nlimbs = 0;
+ a->sign = 0;
+ a->flags = 0;
+ a->nbits = 0;
+ return a;
+}
+EXPORT_SYMBOL_GPL(mpi_alloc);
+
+mpi_ptr_t mpi_alloc_limb_space(unsigned nlimbs)
+{
+ size_t len = nlimbs * sizeof(mpi_limb_t);
+
+ if (!len)
+ return NULL;
+
+ return kmalloc(len, GFP_KERNEL);
+}
+
+void mpi_free_limb_space(mpi_ptr_t a)
+{
+ if (!a)
+ return;
+
+ kfree_sensitive(a);
+}
+
+void mpi_assign_limb_space(MPI a, mpi_ptr_t ap, unsigned nlimbs)
+{
+ mpi_free_limb_space(a->d);
+ a->d = ap;
+ a->alloced = nlimbs;
+}
+
+/****************
+ * Resize the array of A to NLIMBS. the additional space is cleared
+ * (set to 0) [done by m_realloc()]
+ */
+int mpi_resize(MPI a, unsigned nlimbs)
+{
+ void *p;
+
+ if (nlimbs <= a->alloced)
+ return 0; /* no need to do it */
+
+ if (a->d) {
+ p = kcalloc(nlimbs, sizeof(mpi_limb_t), GFP_KERNEL);
+ if (!p)
+ return -ENOMEM;
+ memcpy(p, a->d, a->alloced * sizeof(mpi_limb_t));
+ kfree_sensitive(a->d);
+ a->d = p;
+ } else {
+ a->d = kcalloc(nlimbs, sizeof(mpi_limb_t), GFP_KERNEL);
+ if (!a->d)
+ return -ENOMEM;
+ }
+ a->alloced = nlimbs;
+ return 0;
+}
+
+void mpi_clear(MPI a)
+{
+ if (!a)
+ return;
+ a->nlimbs = 0;
+ a->flags = 0;
+}
+EXPORT_SYMBOL_GPL(mpi_clear);
+
+void mpi_free(MPI a)
+{
+ if (!a)
+ return;
+
+ if (a->flags & 4)
+ kfree_sensitive(a->d);
+ else
+ mpi_free_limb_space(a->d);
+
+ if (a->flags & ~7)
+ pr_info("invalid flag value in mpi\n");
+ kfree(a);
+}
+EXPORT_SYMBOL_GPL(mpi_free);
+
+/****************
+ * Note: This copy function should not interpret the MPI
+ * but copy it transparently.
+ */
+MPI mpi_copy(MPI a)
+{
+ int i;
+ MPI b;
+
+ if (a) {
+ b = mpi_alloc(a->nlimbs);
+ b->nlimbs = a->nlimbs;
+ b->sign = a->sign;
+ b->flags = a->flags;
+ b->flags &= ~(16|32); /* Reset the immutable and constant flags. */
+ for (i = 0; i < b->nlimbs; i++)
+ b->d[i] = a->d[i];
+ } else
+ b = NULL;
+ return b;
+}
+
+/****************
+ * This function allocates an MPI which is optimized to hold
+ * a value as large as the one given in the argument and allocates it
+ * with the same flags as A.
+ */
+MPI mpi_alloc_like(MPI a)
+{
+ MPI b;
+
+ if (a) {
+ b = mpi_alloc(a->nlimbs);
+ b->nlimbs = 0;
+ b->sign = 0;
+ b->flags = a->flags;
+ } else
+ b = NULL;
+
+ return b;
+}
+
+
+/* Set U into W and release U. If W is NULL only U will be released. */
+void mpi_snatch(MPI w, MPI u)
+{
+ if (w) {
+ mpi_assign_limb_space(w, u->d, u->alloced);
+ w->nlimbs = u->nlimbs;
+ w->sign = u->sign;
+ w->flags = u->flags;
+ u->alloced = 0;
+ u->nlimbs = 0;
+ u->d = NULL;
+ }
+ mpi_free(u);
+}
+
+
+MPI mpi_set(MPI w, MPI u)
+{
+ mpi_ptr_t wp, up;
+ mpi_size_t usize = u->nlimbs;
+ int usign = u->sign;
+
+ if (!w)
+ w = mpi_alloc(mpi_get_nlimbs(u));
+ RESIZE_IF_NEEDED(w, usize);
+ wp = w->d;
+ up = u->d;
+ MPN_COPY(wp, up, usize);
+ w->nlimbs = usize;
+ w->flags = u->flags;
+ w->flags &= ~(16|32); /* Reset the immutable and constant flags. */
+ w->sign = usign;
+ return w;
+}
+EXPORT_SYMBOL_GPL(mpi_set);
+
+MPI mpi_set_ui(MPI w, unsigned long u)
+{
+ if (!w)
+ w = mpi_alloc(1);
+ /* FIXME: If U is 0 we have no need to resize and thus possible
+ * allocating the limbs.
+ */
+ RESIZE_IF_NEEDED(w, 1);
+ w->d[0] = u;
+ w->nlimbs = u ? 1 : 0;
+ w->sign = 0;
+ w->flags = 0;
+ return w;
+}
+EXPORT_SYMBOL_GPL(mpi_set_ui);
+
+MPI mpi_alloc_set_ui(unsigned long u)
+{
+ MPI w = mpi_alloc(1);
+ w->d[0] = u;
+ w->nlimbs = u ? 1 : 0;
+ w->sign = 0;
+ return w;
+}
+
+/****************
+ * Swap the value of A and B, when SWAP is 1.
+ * Leave the value when SWAP is 0.
+ * This implementation should be constant-time regardless of SWAP.
+ */
+void mpi_swap_cond(MPI a, MPI b, unsigned long swap)
+{
+ mpi_size_t i;
+ mpi_size_t nlimbs;
+ mpi_limb_t mask = ((mpi_limb_t)0) - swap;
+ mpi_limb_t x;
+
+ if (a->alloced > b->alloced)
+ nlimbs = b->alloced;
+ else
+ nlimbs = a->alloced;
+ if (a->nlimbs > nlimbs || b->nlimbs > nlimbs)
+ return;
+
+ for (i = 0; i < nlimbs; i++) {
+ x = mask & (a->d[i] ^ b->d[i]);
+ a->d[i] = a->d[i] ^ x;
+ b->d[i] = b->d[i] ^ x;
+ }
+
+ x = mask & (a->nlimbs ^ b->nlimbs);
+ a->nlimbs = a->nlimbs ^ x;
+ b->nlimbs = b->nlimbs ^ x;
+
+ x = mask & (a->sign ^ b->sign);
+ a->sign = a->sign ^ x;
+ b->sign = b->sign ^ x;
+}
+
+MODULE_DESCRIPTION("Multiprecision maths library");
+MODULE_LICENSE("GPL");
diff --git a/lib/crypto/poly1305-donna32.c b/lib/crypto/poly1305-donna32.c
new file mode 100644
index 000000000000..7fb71845cc84
--- /dev/null
+++ b/lib/crypto/poly1305-donna32.c
@@ -0,0 +1,205 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/*
+ * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ *
+ * This is based in part on Andrew Moon's poly1305-donna, which is in the
+ * public domain.
+ */
+
+#include <linux/kernel.h>
+#include <asm/unaligned.h>
+#include <crypto/internal/poly1305.h>
+
+void poly1305_core_setkey(struct poly1305_core_key *key,
+ const u8 raw_key[POLY1305_BLOCK_SIZE])
+{
+ /* r &= 0xffffffc0ffffffc0ffffffc0fffffff */
+ key->key.r[0] = (get_unaligned_le32(&raw_key[0])) & 0x3ffffff;
+ key->key.r[1] = (get_unaligned_le32(&raw_key[3]) >> 2) & 0x3ffff03;
+ key->key.r[2] = (get_unaligned_le32(&raw_key[6]) >> 4) & 0x3ffc0ff;
+ key->key.r[3] = (get_unaligned_le32(&raw_key[9]) >> 6) & 0x3f03fff;
+ key->key.r[4] = (get_unaligned_le32(&raw_key[12]) >> 8) & 0x00fffff;
+
+ /* s = 5*r */
+ key->precomputed_s.r[0] = key->key.r[1] * 5;
+ key->precomputed_s.r[1] = key->key.r[2] * 5;
+ key->precomputed_s.r[2] = key->key.r[3] * 5;
+ key->precomputed_s.r[3] = key->key.r[4] * 5;
+}
+EXPORT_SYMBOL(poly1305_core_setkey);
+
+void poly1305_core_blocks(struct poly1305_state *state,
+ const struct poly1305_core_key *key, const void *src,
+ unsigned int nblocks, u32 hibit)
+{
+ const u8 *input = src;
+ u32 r0, r1, r2, r3, r4;
+ u32 s1, s2, s3, s4;
+ u32 h0, h1, h2, h3, h4;
+ u64 d0, d1, d2, d3, d4;
+ u32 c;
+
+ if (!nblocks)
+ return;
+
+ hibit <<= 24;
+
+ r0 = key->key.r[0];
+ r1 = key->key.r[1];
+ r2 = key->key.r[2];
+ r3 = key->key.r[3];
+ r4 = key->key.r[4];
+
+ s1 = key->precomputed_s.r[0];
+ s2 = key->precomputed_s.r[1];
+ s3 = key->precomputed_s.r[2];
+ s4 = key->precomputed_s.r[3];
+
+ h0 = state->h[0];
+ h1 = state->h[1];
+ h2 = state->h[2];
+ h3 = state->h[3];
+ h4 = state->h[4];
+
+ do {
+ /* h += m[i] */
+ h0 += (get_unaligned_le32(&input[0])) & 0x3ffffff;
+ h1 += (get_unaligned_le32(&input[3]) >> 2) & 0x3ffffff;
+ h2 += (get_unaligned_le32(&input[6]) >> 4) & 0x3ffffff;
+ h3 += (get_unaligned_le32(&input[9]) >> 6) & 0x3ffffff;
+ h4 += (get_unaligned_le32(&input[12]) >> 8) | hibit;
+
+ /* h *= r */
+ d0 = ((u64)h0 * r0) + ((u64)h1 * s4) +
+ ((u64)h2 * s3) + ((u64)h3 * s2) +
+ ((u64)h4 * s1);
+ d1 = ((u64)h0 * r1) + ((u64)h1 * r0) +
+ ((u64)h2 * s4) + ((u64)h3 * s3) +
+ ((u64)h4 * s2);
+ d2 = ((u64)h0 * r2) + ((u64)h1 * r1) +
+ ((u64)h2 * r0) + ((u64)h3 * s4) +
+ ((u64)h4 * s3);
+ d3 = ((u64)h0 * r3) + ((u64)h1 * r2) +
+ ((u64)h2 * r1) + ((u64)h3 * r0) +
+ ((u64)h4 * s4);
+ d4 = ((u64)h0 * r4) + ((u64)h1 * r3) +
+ ((u64)h2 * r2) + ((u64)h3 * r1) +
+ ((u64)h4 * r0);
+
+ /* (partial) h %= p */
+ c = (u32)(d0 >> 26);
+ h0 = (u32)d0 & 0x3ffffff;
+ d1 += c;
+ c = (u32)(d1 >> 26);
+ h1 = (u32)d1 & 0x3ffffff;
+ d2 += c;
+ c = (u32)(d2 >> 26);
+ h2 = (u32)d2 & 0x3ffffff;
+ d3 += c;
+ c = (u32)(d3 >> 26);
+ h3 = (u32)d3 & 0x3ffffff;
+ d4 += c;
+ c = (u32)(d4 >> 26);
+ h4 = (u32)d4 & 0x3ffffff;
+ h0 += c * 5;
+ c = (h0 >> 26);
+ h0 = h0 & 0x3ffffff;
+ h1 += c;
+
+ input += POLY1305_BLOCK_SIZE;
+ } while (--nblocks);
+
+ state->h[0] = h0;
+ state->h[1] = h1;
+ state->h[2] = h2;
+ state->h[3] = h3;
+ state->h[4] = h4;
+}
+EXPORT_SYMBOL(poly1305_core_blocks);
+
+void poly1305_core_emit(const struct poly1305_state *state, const u32 nonce[4],
+ void *dst)
+{
+ u8 *mac = dst;
+ u32 h0, h1, h2, h3, h4, c;
+ u32 g0, g1, g2, g3, g4;
+ u64 f;
+ u32 mask;
+
+ /* fully carry h */
+ h0 = state->h[0];
+ h1 = state->h[1];
+ h2 = state->h[2];
+ h3 = state->h[3];
+ h4 = state->h[4];
+
+ c = h1 >> 26;
+ h1 = h1 & 0x3ffffff;
+ h2 += c;
+ c = h2 >> 26;
+ h2 = h2 & 0x3ffffff;
+ h3 += c;
+ c = h3 >> 26;
+ h3 = h3 & 0x3ffffff;
+ h4 += c;
+ c = h4 >> 26;
+ h4 = h4 & 0x3ffffff;
+ h0 += c * 5;
+ c = h0 >> 26;
+ h0 = h0 & 0x3ffffff;
+ h1 += c;
+
+ /* compute h + -p */
+ g0 = h0 + 5;
+ c = g0 >> 26;
+ g0 &= 0x3ffffff;
+ g1 = h1 + c;
+ c = g1 >> 26;
+ g1 &= 0x3ffffff;
+ g2 = h2 + c;
+ c = g2 >> 26;
+ g2 &= 0x3ffffff;
+ g3 = h3 + c;
+ c = g3 >> 26;
+ g3 &= 0x3ffffff;
+ g4 = h4 + c - (1UL << 26);
+
+ /* select h if h < p, or h + -p if h >= p */
+ mask = (g4 >> ((sizeof(u32) * 8) - 1)) - 1;
+ g0 &= mask;
+ g1 &= mask;
+ g2 &= mask;
+ g3 &= mask;
+ g4 &= mask;
+ mask = ~mask;
+
+ h0 = (h0 & mask) | g0;
+ h1 = (h1 & mask) | g1;
+ h2 = (h2 & mask) | g2;
+ h3 = (h3 & mask) | g3;
+ h4 = (h4 & mask) | g4;
+
+ /* h = h % (2^128) */
+ h0 = ((h0) | (h1 << 26)) & 0xffffffff;
+ h1 = ((h1 >> 6) | (h2 << 20)) & 0xffffffff;
+ h2 = ((h2 >> 12) | (h3 << 14)) & 0xffffffff;
+ h3 = ((h3 >> 18) | (h4 << 8)) & 0xffffffff;
+
+ if (likely(nonce)) {
+ /* mac = (h + nonce) % (2^128) */
+ f = (u64)h0 + nonce[0];
+ h0 = (u32)f;
+ f = (u64)h1 + nonce[1] + (f >> 32);
+ h1 = (u32)f;
+ f = (u64)h2 + nonce[2] + (f >> 32);
+ h2 = (u32)f;
+ f = (u64)h3 + nonce[3] + (f >> 32);
+ h3 = (u32)f;
+ }
+
+ put_unaligned_le32(h0, &mac[0]);
+ put_unaligned_le32(h1, &mac[4]);
+ put_unaligned_le32(h2, &mac[8]);
+ put_unaligned_le32(h3, &mac[12]);
+}
+EXPORT_SYMBOL(poly1305_core_emit);
diff --git a/lib/crypto/poly1305-donna64.c b/lib/crypto/poly1305-donna64.c
new file mode 100644
index 000000000000..988702c9b3b2
--- /dev/null
+++ b/lib/crypto/poly1305-donna64.c
@@ -0,0 +1,184 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/*
+ * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ *
+ * This is based in part on Andrew Moon's poly1305-donna, which is in the
+ * public domain.
+ */
+
+#include <linux/kernel.h>
+#include <asm/unaligned.h>
+#include <crypto/internal/poly1305.h>
+
+void poly1305_core_setkey(struct poly1305_core_key *key,
+ const u8 raw_key[POLY1305_BLOCK_SIZE])
+{
+ u64 t0, t1;
+
+ /* r &= 0xffffffc0ffffffc0ffffffc0fffffff */
+ t0 = get_unaligned_le64(&raw_key[0]);
+ t1 = get_unaligned_le64(&raw_key[8]);
+
+ key->key.r64[0] = t0 & 0xffc0fffffffULL;
+ key->key.r64[1] = ((t0 >> 44) | (t1 << 20)) & 0xfffffc0ffffULL;
+ key->key.r64[2] = ((t1 >> 24)) & 0x00ffffffc0fULL;
+
+ /* s = 20*r */
+ key->precomputed_s.r64[0] = key->key.r64[1] * 20;
+ key->precomputed_s.r64[1] = key->key.r64[2] * 20;
+}
+EXPORT_SYMBOL(poly1305_core_setkey);
+
+void poly1305_core_blocks(struct poly1305_state *state,
+ const struct poly1305_core_key *key, const void *src,
+ unsigned int nblocks, u32 hibit)
+{
+ const u8 *input = src;
+ u64 hibit64;
+ u64 r0, r1, r2;
+ u64 s1, s2;
+ u64 h0, h1, h2;
+ u64 c;
+ u128 d0, d1, d2, d;
+
+ if (!nblocks)
+ return;
+
+ hibit64 = ((u64)hibit) << 40;
+
+ r0 = key->key.r64[0];
+ r1 = key->key.r64[1];
+ r2 = key->key.r64[2];
+
+ h0 = state->h64[0];
+ h1 = state->h64[1];
+ h2 = state->h64[2];
+
+ s1 = key->precomputed_s.r64[0];
+ s2 = key->precomputed_s.r64[1];
+
+ do {
+ u64 t0, t1;
+
+ /* h += m[i] */
+ t0 = get_unaligned_le64(&input[0]);
+ t1 = get_unaligned_le64(&input[8]);
+
+ h0 += t0 & 0xfffffffffffULL;
+ h1 += ((t0 >> 44) | (t1 << 20)) & 0xfffffffffffULL;
+ h2 += (((t1 >> 24)) & 0x3ffffffffffULL) | hibit64;
+
+ /* h *= r */
+ d0 = (u128)h0 * r0;
+ d = (u128)h1 * s2;
+ d0 += d;
+ d = (u128)h2 * s1;
+ d0 += d;
+ d1 = (u128)h0 * r1;
+ d = (u128)h1 * r0;
+ d1 += d;
+ d = (u128)h2 * s2;
+ d1 += d;
+ d2 = (u128)h0 * r2;
+ d = (u128)h1 * r1;
+ d2 += d;
+ d = (u128)h2 * r0;
+ d2 += d;
+
+ /* (partial) h %= p */
+ c = (u64)(d0 >> 44);
+ h0 = (u64)d0 & 0xfffffffffffULL;
+ d1 += c;
+ c = (u64)(d1 >> 44);
+ h1 = (u64)d1 & 0xfffffffffffULL;
+ d2 += c;
+ c = (u64)(d2 >> 42);
+ h2 = (u64)d2 & 0x3ffffffffffULL;
+ h0 += c * 5;
+ c = h0 >> 44;
+ h0 = h0 & 0xfffffffffffULL;
+ h1 += c;
+
+ input += POLY1305_BLOCK_SIZE;
+ } while (--nblocks);
+
+ state->h64[0] = h0;
+ state->h64[1] = h1;
+ state->h64[2] = h2;
+}
+EXPORT_SYMBOL(poly1305_core_blocks);
+
+void poly1305_core_emit(const struct poly1305_state *state, const u32 nonce[4],
+ void *dst)
+{
+ u8 *mac = dst;
+ u64 h0, h1, h2, c;
+ u64 g0, g1, g2;
+ u64 t0, t1;
+
+ /* fully carry h */
+ h0 = state->h64[0];
+ h1 = state->h64[1];
+ h2 = state->h64[2];
+
+ c = h1 >> 44;
+ h1 &= 0xfffffffffffULL;
+ h2 += c;
+ c = h2 >> 42;
+ h2 &= 0x3ffffffffffULL;
+ h0 += c * 5;
+ c = h0 >> 44;
+ h0 &= 0xfffffffffffULL;
+ h1 += c;
+ c = h1 >> 44;
+ h1 &= 0xfffffffffffULL;
+ h2 += c;
+ c = h2 >> 42;
+ h2 &= 0x3ffffffffffULL;
+ h0 += c * 5;
+ c = h0 >> 44;
+ h0 &= 0xfffffffffffULL;
+ h1 += c;
+
+ /* compute h + -p */
+ g0 = h0 + 5;
+ c = g0 >> 44;
+ g0 &= 0xfffffffffffULL;
+ g1 = h1 + c;
+ c = g1 >> 44;
+ g1 &= 0xfffffffffffULL;
+ g2 = h2 + c - (1ULL << 42);
+
+ /* select h if h < p, or h + -p if h >= p */
+ c = (g2 >> ((sizeof(u64) * 8) - 1)) - 1;
+ g0 &= c;
+ g1 &= c;
+ g2 &= c;
+ c = ~c;
+ h0 = (h0 & c) | g0;
+ h1 = (h1 & c) | g1;
+ h2 = (h2 & c) | g2;
+
+ if (likely(nonce)) {
+ /* h = (h + nonce) */
+ t0 = ((u64)nonce[1] << 32) | nonce[0];
+ t1 = ((u64)nonce[3] << 32) | nonce[2];
+
+ h0 += t0 & 0xfffffffffffULL;
+ c = h0 >> 44;
+ h0 &= 0xfffffffffffULL;
+ h1 += (((t0 >> 44) | (t1 << 20)) & 0xfffffffffffULL) + c;
+ c = h1 >> 44;
+ h1 &= 0xfffffffffffULL;
+ h2 += (((t1 >> 24)) & 0x3ffffffffffULL) + c;
+ h2 &= 0x3ffffffffffULL;
+ }
+
+ /* mac = h % (2^128) */
+ h0 = h0 | (h1 << 44);
+ h1 = (h1 >> 20) | (h2 << 24);
+
+ put_unaligned_le64(h0, &mac[0]);
+ put_unaligned_le64(h1, &mac[8]);
+}
+EXPORT_SYMBOL(poly1305_core_emit);
diff --git a/lib/crypto/poly1305.c b/lib/crypto/poly1305.c
new file mode 100644
index 000000000000..26d87fc3823e
--- /dev/null
+++ b/lib/crypto/poly1305.c
@@ -0,0 +1,78 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Poly1305 authenticator algorithm, RFC7539
+ *
+ * Copyright (C) 2015 Martin Willi
+ *
+ * Based on public domain code by Andrew Moon and Daniel J. Bernstein.
+ */
+
+#include <crypto/internal/poly1305.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <asm/unaligned.h>
+
+void poly1305_init_generic(struct poly1305_desc_ctx *desc,
+ const u8 key[POLY1305_KEY_SIZE])
+{
+ poly1305_core_setkey(&desc->core_r, key);
+ desc->s[0] = get_unaligned_le32(key + 16);
+ desc->s[1] = get_unaligned_le32(key + 20);
+ desc->s[2] = get_unaligned_le32(key + 24);
+ desc->s[3] = get_unaligned_le32(key + 28);
+ poly1305_core_init(&desc->h);
+ desc->buflen = 0;
+ desc->sset = true;
+ desc->rset = 2;
+}
+EXPORT_SYMBOL_GPL(poly1305_init_generic);
+
+void poly1305_update_generic(struct poly1305_desc_ctx *desc, const u8 *src,
+ unsigned int nbytes)
+{
+ unsigned int bytes;
+
+ if (unlikely(desc->buflen)) {
+ bytes = min(nbytes, POLY1305_BLOCK_SIZE - desc->buflen);
+ memcpy(desc->buf + desc->buflen, src, bytes);
+ src += bytes;
+ nbytes -= bytes;
+ desc->buflen += bytes;
+
+ if (desc->buflen == POLY1305_BLOCK_SIZE) {
+ poly1305_core_blocks(&desc->h, &desc->core_r, desc->buf,
+ 1, 1);
+ desc->buflen = 0;
+ }
+ }
+
+ if (likely(nbytes >= POLY1305_BLOCK_SIZE)) {
+ poly1305_core_blocks(&desc->h, &desc->core_r, src,
+ nbytes / POLY1305_BLOCK_SIZE, 1);
+ src += nbytes - (nbytes % POLY1305_BLOCK_SIZE);
+ nbytes %= POLY1305_BLOCK_SIZE;
+ }
+
+ if (unlikely(nbytes)) {
+ desc->buflen = nbytes;
+ memcpy(desc->buf, src, nbytes);
+ }
+}
+EXPORT_SYMBOL_GPL(poly1305_update_generic);
+
+void poly1305_final_generic(struct poly1305_desc_ctx *desc, u8 *dst)
+{
+ if (unlikely(desc->buflen)) {
+ desc->buf[desc->buflen++] = 1;
+ memset(desc->buf + desc->buflen, 0,
+ POLY1305_BLOCK_SIZE - desc->buflen);
+ poly1305_core_blocks(&desc->h, &desc->core_r, desc->buf, 1, 0);
+ }
+
+ poly1305_core_emit(&desc->h, desc->s, dst);
+ *desc = (struct poly1305_desc_ctx){};
+}
+EXPORT_SYMBOL_GPL(poly1305_final_generic);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Martin Willi <martin@strongswan.org>");
diff --git a/lib/sha1.c b/lib/crypto/sha1.c
index 1d96d2c02b82..1aebe7be9401 100644
--- a/lib/sha1.c
+++ b/lib/crypto/sha1.c
@@ -8,8 +8,10 @@
#include <linux/kernel.h>
#include <linux/export.h>
+#include <linux/module.h>
#include <linux/bitops.h>
-#include <linux/cryptohash.h>
+#include <linux/string.h>
+#include <crypto/sha1.h>
#include <asm/unaligned.h>
/*
@@ -55,7 +57,8 @@
#define SHA_ROUND(t, input, fn, constant, A, B, C, D, E) do { \
__u32 TEMP = input(t); setW(t, TEMP); \
E += TEMP + rol32(A,5) + (fn) + (constant); \
- B = ror32(B, 2); } while (0)
+ B = ror32(B, 2); \
+ TEMP = E; E = D; D = C; C = B; B = A; A = TEMP; } while (0)
#define T_0_15(t, A, B, C, D, E) SHA_ROUND(t, SHA_SRC, (((C^D)&B)^D) , 0x5a827999, A, B, C, D, E )
#define T_16_19(t, A, B, C, D, E) SHA_ROUND(t, SHA_MIX, (((C^D)&B)^D) , 0x5a827999, A, B, C, D, E )
@@ -64,24 +67,27 @@
#define T_60_79(t, A, B, C, D, E) SHA_ROUND(t, SHA_MIX, (B^C^D) , 0xca62c1d6, A, B, C, D, E )
/**
- * sha_transform - single block SHA1 transform
+ * sha1_transform - single block SHA1 transform (deprecated)
*
* @digest: 160 bit digest to update
* @data: 512 bits of data to hash
* @array: 16 words of workspace (see note)
*
- * This function generates a SHA1 digest for a single 512-bit block.
- * Be warned, it does not handle padding and message digest, do not
- * confuse it with the full FIPS 180-1 digest algorithm for variable
- * length messages.
+ * This function executes SHA-1's internal compression function. It updates the
+ * 160-bit internal state (@digest) with a single 512-bit data block (@data).
+ *
+ * Don't use this function. SHA-1 is no longer considered secure. And even if
+ * you do have to use SHA-1, this isn't the correct way to hash something with
+ * SHA-1 as this doesn't handle padding and finalization.
*
* Note: If the hash is security sensitive, the caller should be sure
* to clear the workspace. This is left to the caller to avoid
* unnecessary clears between chained hashing operations.
*/
-void sha_transform(__u32 *digest, const char *data, __u32 *array)
+void sha1_transform(__u32 *digest, const char *data, __u32 *array)
{
__u32 A, B, C, D, E;
+ unsigned int i = 0;
A = digest[0];
B = digest[1];
@@ -90,94 +96,24 @@ void sha_transform(__u32 *digest, const char *data, __u32 *array)
E = digest[4];
/* Round 1 - iterations 0-16 take their input from 'data' */
- T_0_15( 0, A, B, C, D, E);
- T_0_15( 1, E, A, B, C, D);
- T_0_15( 2, D, E, A, B, C);
- T_0_15( 3, C, D, E, A, B);
- T_0_15( 4, B, C, D, E, A);
- T_0_15( 5, A, B, C, D, E);
- T_0_15( 6, E, A, B, C, D);
- T_0_15( 7, D, E, A, B, C);
- T_0_15( 8, C, D, E, A, B);
- T_0_15( 9, B, C, D, E, A);
- T_0_15(10, A, B, C, D, E);
- T_0_15(11, E, A, B, C, D);
- T_0_15(12, D, E, A, B, C);
- T_0_15(13, C, D, E, A, B);
- T_0_15(14, B, C, D, E, A);
- T_0_15(15, A, B, C, D, E);
+ for (; i < 16; ++i)
+ T_0_15(i, A, B, C, D, E);
/* Round 1 - tail. Input from 512-bit mixing array */
- T_16_19(16, E, A, B, C, D);
- T_16_19(17, D, E, A, B, C);
- T_16_19(18, C, D, E, A, B);
- T_16_19(19, B, C, D, E, A);
+ for (; i < 20; ++i)
+ T_16_19(i, A, B, C, D, E);
/* Round 2 */
- T_20_39(20, A, B, C, D, E);
- T_20_39(21, E, A, B, C, D);
- T_20_39(22, D, E, A, B, C);
- T_20_39(23, C, D, E, A, B);
- T_20_39(24, B, C, D, E, A);
- T_20_39(25, A, B, C, D, E);
- T_20_39(26, E, A, B, C, D);
- T_20_39(27, D, E, A, B, C);
- T_20_39(28, C, D, E, A, B);
- T_20_39(29, B, C, D, E, A);
- T_20_39(30, A, B, C, D, E);
- T_20_39(31, E, A, B, C, D);
- T_20_39(32, D, E, A, B, C);
- T_20_39(33, C, D, E, A, B);
- T_20_39(34, B, C, D, E, A);
- T_20_39(35, A, B, C, D, E);
- T_20_39(36, E, A, B, C, D);
- T_20_39(37, D, E, A, B, C);
- T_20_39(38, C, D, E, A, B);
- T_20_39(39, B, C, D, E, A);
+ for (; i < 40; ++i)
+ T_20_39(i, A, B, C, D, E);
/* Round 3 */
- T_40_59(40, A, B, C, D, E);
- T_40_59(41, E, A, B, C, D);
- T_40_59(42, D, E, A, B, C);
- T_40_59(43, C, D, E, A, B);
- T_40_59(44, B, C, D, E, A);
- T_40_59(45, A, B, C, D, E);
- T_40_59(46, E, A, B, C, D);
- T_40_59(47, D, E, A, B, C);
- T_40_59(48, C, D, E, A, B);
- T_40_59(49, B, C, D, E, A);
- T_40_59(50, A, B, C, D, E);
- T_40_59(51, E, A, B, C, D);
- T_40_59(52, D, E, A, B, C);
- T_40_59(53, C, D, E, A, B);
- T_40_59(54, B, C, D, E, A);
- T_40_59(55, A, B, C, D, E);
- T_40_59(56, E, A, B, C, D);
- T_40_59(57, D, E, A, B, C);
- T_40_59(58, C, D, E, A, B);
- T_40_59(59, B, C, D, E, A);
+ for (; i < 60; ++i)
+ T_40_59(i, A, B, C, D, E);
/* Round 4 */
- T_60_79(60, A, B, C, D, E);
- T_60_79(61, E, A, B, C, D);
- T_60_79(62, D, E, A, B, C);
- T_60_79(63, C, D, E, A, B);
- T_60_79(64, B, C, D, E, A);
- T_60_79(65, A, B, C, D, E);
- T_60_79(66, E, A, B, C, D);
- T_60_79(67, D, E, A, B, C);
- T_60_79(68, C, D, E, A, B);
- T_60_79(69, B, C, D, E, A);
- T_60_79(70, A, B, C, D, E);
- T_60_79(71, E, A, B, C, D);
- T_60_79(72, D, E, A, B, C);
- T_60_79(73, C, D, E, A, B);
- T_60_79(74, B, C, D, E, A);
- T_60_79(75, A, B, C, D, E);
- T_60_79(76, E, A, B, C, D);
- T_60_79(77, D, E, A, B, C);
- T_60_79(78, C, D, E, A, B);
- T_60_79(79, B, C, D, E, A);
+ for (; i < 80; ++i)
+ T_60_79(i, A, B, C, D, E);
digest[0] += A;
digest[1] += B;
@@ -185,13 +121,13 @@ void sha_transform(__u32 *digest, const char *data, __u32 *array)
digest[3] += D;
digest[4] += E;
}
-EXPORT_SYMBOL(sha_transform);
+EXPORT_SYMBOL(sha1_transform);
/**
- * sha_init - initialize the vectors for a SHA1 digest
+ * sha1_init - initialize the vectors for a SHA1 digest
* @buf: vector to initialize
*/
-void sha_init(__u32 *buf)
+void sha1_init(__u32 *buf)
{
buf[0] = 0x67452301;
buf[1] = 0xefcdab89;
@@ -199,4 +135,6 @@ void sha_init(__u32 *buf)
buf[3] = 0x10325476;
buf[4] = 0xc3d2e1f0;
}
-EXPORT_SYMBOL(sha_init);
+EXPORT_SYMBOL(sha1_init);
+
+MODULE_LICENSE("GPL");
diff --git a/lib/crypto/sha256.c b/lib/crypto/sha256.c
new file mode 100644
index 000000000000..3ac1ef8677db
--- /dev/null
+++ b/lib/crypto/sha256.c
@@ -0,0 +1,168 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * SHA-256, as specified in
+ * http://csrc.nist.gov/groups/STM/cavp/documents/shs/sha256-384-512.pdf
+ *
+ * SHA-256 code by Jean-Luc Cooke <jlcooke@certainkey.com>.
+ *
+ * Copyright (c) Jean-Luc Cooke <jlcooke@certainkey.com>
+ * Copyright (c) Andrew McDonald <andrew@mcdonald.org.uk>
+ * Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
+ * Copyright (c) 2014 Red Hat Inc.
+ */
+
+#include <asm/unaligned.h>
+#include <crypto/sha256_base.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/string.h>
+
+static const u32 SHA256_K[] = {
+ 0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5,
+ 0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5,
+ 0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3,
+ 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174,
+ 0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc,
+ 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da,
+ 0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7,
+ 0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967,
+ 0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13,
+ 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85,
+ 0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3,
+ 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070,
+ 0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5,
+ 0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3,
+ 0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208,
+ 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2,
+};
+
+static inline u32 Ch(u32 x, u32 y, u32 z)
+{
+ return z ^ (x & (y ^ z));
+}
+
+static inline u32 Maj(u32 x, u32 y, u32 z)
+{
+ return (x & y) | (z & (x | y));
+}
+
+#define e0(x) (ror32(x, 2) ^ ror32(x, 13) ^ ror32(x, 22))
+#define e1(x) (ror32(x, 6) ^ ror32(x, 11) ^ ror32(x, 25))
+#define s0(x) (ror32(x, 7) ^ ror32(x, 18) ^ (x >> 3))
+#define s1(x) (ror32(x, 17) ^ ror32(x, 19) ^ (x >> 10))
+
+static inline void LOAD_OP(int I, u32 *W, const u8 *input)
+{
+ W[I] = get_unaligned_be32((__u32 *)input + I);
+}
+
+static inline void BLEND_OP(int I, u32 *W)
+{
+ W[I] = s1(W[I-2]) + W[I-7] + s0(W[I-15]) + W[I-16];
+}
+
+#define SHA256_ROUND(i, a, b, c, d, e, f, g, h) do { \
+ u32 t1, t2; \
+ t1 = h + e1(e) + Ch(e, f, g) + SHA256_K[i] + W[i]; \
+ t2 = e0(a) + Maj(a, b, c); \
+ d += t1; \
+ h = t1 + t2; \
+} while (0)
+
+static void sha256_transform(u32 *state, const u8 *input, u32 *W)
+{
+ u32 a, b, c, d, e, f, g, h;
+ int i;
+
+ /* load the input */
+ for (i = 0; i < 16; i += 8) {
+ LOAD_OP(i + 0, W, input);
+ LOAD_OP(i + 1, W, input);
+ LOAD_OP(i + 2, W, input);
+ LOAD_OP(i + 3, W, input);
+ LOAD_OP(i + 4, W, input);
+ LOAD_OP(i + 5, W, input);
+ LOAD_OP(i + 6, W, input);
+ LOAD_OP(i + 7, W, input);
+ }
+
+ /* now blend */
+ for (i = 16; i < 64; i += 8) {
+ BLEND_OP(i + 0, W);
+ BLEND_OP(i + 1, W);
+ BLEND_OP(i + 2, W);
+ BLEND_OP(i + 3, W);
+ BLEND_OP(i + 4, W);
+ BLEND_OP(i + 5, W);
+ BLEND_OP(i + 6, W);
+ BLEND_OP(i + 7, W);
+ }
+
+ /* load the state into our registers */
+ a = state[0]; b = state[1]; c = state[2]; d = state[3];
+ e = state[4]; f = state[5]; g = state[6]; h = state[7];
+
+ /* now iterate */
+ for (i = 0; i < 64; i += 8) {
+ SHA256_ROUND(i + 0, a, b, c, d, e, f, g, h);
+ SHA256_ROUND(i + 1, h, a, b, c, d, e, f, g);
+ SHA256_ROUND(i + 2, g, h, a, b, c, d, e, f);
+ SHA256_ROUND(i + 3, f, g, h, a, b, c, d, e);
+ SHA256_ROUND(i + 4, e, f, g, h, a, b, c, d);
+ SHA256_ROUND(i + 5, d, e, f, g, h, a, b, c);
+ SHA256_ROUND(i + 6, c, d, e, f, g, h, a, b);
+ SHA256_ROUND(i + 7, b, c, d, e, f, g, h, a);
+ }
+
+ state[0] += a; state[1] += b; state[2] += c; state[3] += d;
+ state[4] += e; state[5] += f; state[6] += g; state[7] += h;
+}
+
+static void sha256_transform_blocks(struct sha256_state *sctx,
+ const u8 *input, int blocks)
+{
+ u32 W[64];
+
+ do {
+ sha256_transform(sctx->state, input, W);
+ input += SHA256_BLOCK_SIZE;
+ } while (--blocks);
+
+ memzero_explicit(W, sizeof(W));
+}
+
+void sha256_update(struct sha256_state *sctx, const u8 *data, unsigned int len)
+{
+ lib_sha256_base_do_update(sctx, data, len, sha256_transform_blocks);
+}
+EXPORT_SYMBOL(sha256_update);
+
+static void __sha256_final(struct sha256_state *sctx, u8 *out, int digest_size)
+{
+ lib_sha256_base_do_finalize(sctx, sha256_transform_blocks);
+ lib_sha256_base_finish(sctx, out, digest_size);
+}
+
+void sha256_final(struct sha256_state *sctx, u8 *out)
+{
+ __sha256_final(sctx, out, 32);
+}
+EXPORT_SYMBOL(sha256_final);
+
+void sha224_final(struct sha256_state *sctx, u8 *out)
+{
+ __sha256_final(sctx, out, 28);
+}
+EXPORT_SYMBOL(sha224_final);
+
+void sha256(const u8 *data, unsigned int len, u8 *out)
+{
+ struct sha256_state sctx;
+
+ sha256_init(&sctx);
+ sha256_update(&sctx, data, len);
+ sha256_final(&sctx, out);
+}
+EXPORT_SYMBOL(sha256);
+
+MODULE_LICENSE("GPL");
diff --git a/lib/crypto/utils.c b/lib/crypto/utils.c
new file mode 100644
index 000000000000..c852c7151b0a
--- /dev/null
+++ b/lib/crypto/utils.c
@@ -0,0 +1,88 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Crypto library utility functions
+ *
+ * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
+ */
+
+#include <asm/unaligned.h>
+#include <crypto/utils.h>
+#include <linux/module.h>
+
+/*
+ * XOR @len bytes from @src1 and @src2 together, writing the result to @dst
+ * (which may alias one of the sources). Don't call this directly; call
+ * crypto_xor() or crypto_xor_cpy() instead.
+ */
+void __crypto_xor(u8 *dst, const u8 *src1, const u8 *src2, unsigned int len)
+{
+ int relalign = 0;
+
+ if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) {
+ int size = sizeof(unsigned long);
+ int d = (((unsigned long)dst ^ (unsigned long)src1) |
+ ((unsigned long)dst ^ (unsigned long)src2)) &
+ (size - 1);
+
+ relalign = d ? 1 << __ffs(d) : size;
+
+ /*
+ * If we care about alignment, process as many bytes as
+ * needed to advance dst and src to values whose alignments
+ * equal their relative alignment. This will allow us to
+ * process the remainder of the input using optimal strides.
+ */
+ while (((unsigned long)dst & (relalign - 1)) && len > 0) {
+ *dst++ = *src1++ ^ *src2++;
+ len--;
+ }
+ }
+
+ while (IS_ENABLED(CONFIG_64BIT) && len >= 8 && !(relalign & 7)) {
+ if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) {
+ u64 l = get_unaligned((u64 *)src1) ^
+ get_unaligned((u64 *)src2);
+ put_unaligned(l, (u64 *)dst);
+ } else {
+ *(u64 *)dst = *(u64 *)src1 ^ *(u64 *)src2;
+ }
+ dst += 8;
+ src1 += 8;
+ src2 += 8;
+ len -= 8;
+ }
+
+ while (len >= 4 && !(relalign & 3)) {
+ if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) {
+ u32 l = get_unaligned((u32 *)src1) ^
+ get_unaligned((u32 *)src2);
+ put_unaligned(l, (u32 *)dst);
+ } else {
+ *(u32 *)dst = *(u32 *)src1 ^ *(u32 *)src2;
+ }
+ dst += 4;
+ src1 += 4;
+ src2 += 4;
+ len -= 4;
+ }
+
+ while (len >= 2 && !(relalign & 1)) {
+ if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) {
+ u16 l = get_unaligned((u16 *)src1) ^
+ get_unaligned((u16 *)src2);
+ put_unaligned(l, (u16 *)dst);
+ } else {
+ *(u16 *)dst = *(u16 *)src1 ^ *(u16 *)src2;
+ }
+ dst += 2;
+ src1 += 2;
+ src2 += 2;
+ len -= 2;
+ }
+
+ while (len--)
+ *dst++ = *src1++ ^ *src2++;
+}
+EXPORT_SYMBOL_GPL(__crypto_xor);
+
+MODULE_LICENSE("GPL");
diff --git a/lib/debug_info.c b/lib/debug_info.c
index 36daf753293c..cc4723c74af5 100644
--- a/lib/debug_info.c
+++ b/lib/debug_info.c
@@ -5,8 +5,6 @@
* CONFIG_DEBUG_INFO_REDUCED. Please do not add actual code. However,
* adding appropriate #includes is fine.
*/
-#include <stdarg.h>
-
#include <linux/cred.h>
#include <linux/crypto.h>
#include <linux/dcache.h>
@@ -22,6 +20,7 @@
#include <linux/net.h>
#include <linux/sched.h>
#include <linux/slab.h>
+#include <linux/stdarg.h>
#include <linux/types.h>
#include <net/addrconf.h>
#include <net/sock.h>
diff --git a/lib/debugobjects.c b/lib/debugobjects.c
index 55437fd5128b..fb12a9bacd2f 100644
--- a/lib/debugobjects.c
+++ b/lib/debugobjects.c
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Generic infrastructure for lifetime debugging of objects.
*
- * Started by Thomas Gleixner
- *
* Copyright (C) 2008, Thomas Gleixner <tglx@linutronix.de>
- *
- * For licencing details see kernel-base/COPYING
*/
#define pr_fmt(fmt) "ODEBUG: " fmt
@@ -19,22 +16,44 @@
#include <linux/slab.h>
#include <linux/hash.h>
#include <linux/kmemleak.h>
+#include <linux/cpu.h>
#define ODEBUG_HASH_BITS 14
#define ODEBUG_HASH_SIZE (1 << ODEBUG_HASH_BITS)
#define ODEBUG_POOL_SIZE 1024
#define ODEBUG_POOL_MIN_LEVEL 256
+#define ODEBUG_POOL_PERCPU_SIZE 64
+#define ODEBUG_BATCH_SIZE 16
#define ODEBUG_CHUNK_SHIFT PAGE_SHIFT
#define ODEBUG_CHUNK_SIZE (1 << ODEBUG_CHUNK_SHIFT)
#define ODEBUG_CHUNK_MASK (~(ODEBUG_CHUNK_SIZE - 1))
+/*
+ * We limit the freeing of debug objects via workqueue at a maximum
+ * frequency of 10Hz and about 1024 objects for each freeing operation.
+ * So it is freeing at most 10k debug objects per second.
+ */
+#define ODEBUG_FREE_WORK_MAX 1024
+#define ODEBUG_FREE_WORK_DELAY DIV_ROUND_UP(HZ, 10)
+
struct debug_bucket {
struct hlist_head list;
raw_spinlock_t lock;
};
+/*
+ * Debug object percpu free list
+ * Access is protected by disabling irq
+ */
+struct debug_percpu_free {
+ struct hlist_head free_objs;
+ int obj_free;
+};
+
+static DEFINE_PER_CPU(struct debug_percpu_free, percpu_obj_pool);
+
static struct debug_bucket obj_hash[ODEBUG_HASH_SIZE];
static struct debug_obj obj_static_pool[ODEBUG_POOL_SIZE] __initdata;
@@ -44,13 +63,20 @@ static DEFINE_RAW_SPINLOCK(pool_lock);
static HLIST_HEAD(obj_pool);
static HLIST_HEAD(obj_to_free);
+/*
+ * Because of the presence of percpu free pools, obj_pool_free will
+ * under-count those in the percpu free pools. Similarly, obj_pool_used
+ * will over-count those in the percpu free pools. Adjustments will be
+ * made at debug_stats_show(). Both obj_pool_min_free and obj_pool_max_used
+ * can be off.
+ */
static int obj_pool_min_free = ODEBUG_POOL_SIZE;
static int obj_pool_free = ODEBUG_POOL_SIZE;
static int obj_pool_used;
static int obj_pool_max_used;
+static bool obj_freeing;
/* The number of objs on the global free list */
static int obj_nr_tofree;
-static struct kmem_cache *obj_cache;
static int debug_objects_maxchain __read_mostly;
static int __maybe_unused debug_objects_maxchecked __read_mostly;
@@ -62,7 +88,8 @@ static int debug_objects_pool_size __read_mostly
= ODEBUG_POOL_SIZE;
static int debug_objects_pool_min_level __read_mostly
= ODEBUG_POOL_MIN_LEVEL;
-static struct debug_obj_descr *descr_test __read_mostly;
+static const struct debug_obj_descr *descr_test __read_mostly;
+static struct kmem_cache *obj_cache __ro_after_init;
/*
* Track numbers of kmem_cache_alloc()/free() calls done.
@@ -71,7 +98,7 @@ static int debug_objects_allocated;
static int debug_objects_freed;
static void free_obj_work(struct work_struct *work);
-static DECLARE_WORK(debug_obj_work, free_obj_work);
+static DECLARE_DELAYED_WORK(debug_obj_work, free_obj_work);
static int __init enable_object_debug(char *str)
{
@@ -99,29 +126,33 @@ static const char *obj_states[ODEBUG_STATE_MAX] = {
static void fill_pool(void)
{
- gfp_t gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN;
- struct debug_obj *new, *obj;
+ gfp_t gfp = __GFP_HIGH | __GFP_NOWARN;
+ struct debug_obj *obj;
unsigned long flags;
- if (likely(obj_pool_free >= debug_objects_pool_min_level))
+ if (likely(READ_ONCE(obj_pool_free) >= debug_objects_pool_min_level))
return;
/*
* Reuse objs from the global free list; they will be reinitialized
* when allocating.
+ *
+ * Both obj_nr_tofree and obj_pool_free are checked locklessly; the
+ * READ_ONCE()s pair with the WRITE_ONCE()s in pool_lock critical
+ * sections.
*/
- while (obj_nr_tofree && (obj_pool_free < obj_pool_min_free)) {
+ while (READ_ONCE(obj_nr_tofree) && (READ_ONCE(obj_pool_free) < obj_pool_min_free)) {
raw_spin_lock_irqsave(&pool_lock, flags);
/*
* Recheck with the lock held as the worker thread might have
* won the race and freed the global free list already.
*/
- if (obj_nr_tofree) {
+ while (obj_nr_tofree && (obj_pool_free < obj_pool_min_free)) {
obj = hlist_entry(obj_to_free.first, typeof(*obj), node);
hlist_del(&obj->node);
- obj_nr_tofree--;
+ WRITE_ONCE(obj_nr_tofree, obj_nr_tofree - 1);
hlist_add_head(&obj->node, &obj_pool);
- obj_pool_free++;
+ WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
}
raw_spin_unlock_irqrestore(&pool_lock, flags);
}
@@ -129,16 +160,24 @@ static void fill_pool(void)
if (unlikely(!obj_cache))
return;
- while (obj_pool_free < debug_objects_pool_min_level) {
+ while (READ_ONCE(obj_pool_free) < debug_objects_pool_min_level) {
+ struct debug_obj *new[ODEBUG_BATCH_SIZE];
+ int cnt;
- new = kmem_cache_zalloc(obj_cache, gfp);
- if (!new)
+ for (cnt = 0; cnt < ODEBUG_BATCH_SIZE; cnt++) {
+ new[cnt] = kmem_cache_zalloc(obj_cache, gfp);
+ if (!new[cnt])
+ break;
+ }
+ if (!cnt)
return;
raw_spin_lock_irqsave(&pool_lock, flags);
- hlist_add_head(&new->node, &obj_pool);
- debug_objects_allocated++;
- obj_pool_free++;
+ while (cnt) {
+ hlist_add_head(&new[--cnt]->node, &obj_pool);
+ debug_objects_allocated++;
+ WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
+ }
raw_spin_unlock_irqrestore(&pool_lock, flags);
}
}
@@ -163,36 +202,77 @@ static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b)
}
/*
- * Allocate a new object. If the pool is empty, switch off the debugger.
- * Must be called with interrupts disabled.
+ * Allocate a new object from the hlist
*/
-static struct debug_obj *
-alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr)
+static struct debug_obj *__alloc_object(struct hlist_head *list)
{
struct debug_obj *obj = NULL;
- raw_spin_lock(&pool_lock);
- if (obj_pool.first) {
- obj = hlist_entry(obj_pool.first, typeof(*obj), node);
-
- obj->object = addr;
- obj->descr = descr;
- obj->state = ODEBUG_STATE_NONE;
- obj->astate = 0;
+ if (list->first) {
+ obj = hlist_entry(list->first, typeof(*obj), node);
hlist_del(&obj->node);
+ }
- hlist_add_head(&obj->node, &b->list);
+ return obj;
+}
+
+static struct debug_obj *
+alloc_object(void *addr, struct debug_bucket *b, const struct debug_obj_descr *descr)
+{
+ struct debug_percpu_free *percpu_pool = this_cpu_ptr(&percpu_obj_pool);
+ struct debug_obj *obj;
+ if (likely(obj_cache)) {
+ obj = __alloc_object(&percpu_pool->free_objs);
+ if (obj) {
+ percpu_pool->obj_free--;
+ goto init_obj;
+ }
+ }
+
+ raw_spin_lock(&pool_lock);
+ obj = __alloc_object(&obj_pool);
+ if (obj) {
obj_pool_used++;
+ WRITE_ONCE(obj_pool_free, obj_pool_free - 1);
+
+ /*
+ * Looking ahead, allocate one batch of debug objects and
+ * put them into the percpu free pool.
+ */
+ if (likely(obj_cache)) {
+ int i;
+
+ for (i = 0; i < ODEBUG_BATCH_SIZE; i++) {
+ struct debug_obj *obj2;
+
+ obj2 = __alloc_object(&obj_pool);
+ if (!obj2)
+ break;
+ hlist_add_head(&obj2->node,
+ &percpu_pool->free_objs);
+ percpu_pool->obj_free++;
+ obj_pool_used++;
+ WRITE_ONCE(obj_pool_free, obj_pool_free - 1);
+ }
+ }
+
if (obj_pool_used > obj_pool_max_used)
obj_pool_max_used = obj_pool_used;
- obj_pool_free--;
if (obj_pool_free < obj_pool_min_free)
obj_pool_min_free = obj_pool_free;
}
raw_spin_unlock(&pool_lock);
+init_obj:
+ if (obj) {
+ obj->object = addr;
+ obj->descr = descr;
+ obj->state = ODEBUG_STATE_NONE;
+ obj->astate = 0;
+ hlist_add_head(&obj->node, &b->list);
+ }
return obj;
}
@@ -209,22 +289,31 @@ static void free_obj_work(struct work_struct *work)
unsigned long flags;
HLIST_HEAD(tofree);
+ WRITE_ONCE(obj_freeing, false);
if (!raw_spin_trylock_irqsave(&pool_lock, flags))
return;
+ if (obj_pool_free >= debug_objects_pool_size)
+ goto free_objs;
+
/*
* The objs on the pool list might be allocated before the work is
* run, so recheck if pool list it full or not, if not fill pool
- * list from the global free list
+ * list from the global free list. As it is likely that a workload
+ * may be gearing up to use more and more objects, don't free any
+ * of them until the next round.
*/
while (obj_nr_tofree && obj_pool_free < debug_objects_pool_size) {
obj = hlist_entry(obj_to_free.first, typeof(*obj), node);
hlist_del(&obj->node);
hlist_add_head(&obj->node, &obj_pool);
- obj_pool_free++;
- obj_nr_tofree--;
+ WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
+ WRITE_ONCE(obj_nr_tofree, obj_nr_tofree - 1);
}
+ raw_spin_unlock_irqrestore(&pool_lock, flags);
+ return;
+free_objs:
/*
* Pool list is already full and there are still objs on the free
* list. Move remaining free objs to a temporary list to free the
@@ -233,7 +322,7 @@ static void free_obj_work(struct work_struct *work)
if (obj_nr_tofree) {
hlist_move_list(&obj_to_free, &tofree);
debug_objects_freed += obj_nr_tofree;
- obj_nr_tofree = 0;
+ WRITE_ONCE(obj_nr_tofree, 0);
}
raw_spin_unlock_irqrestore(&pool_lock, flags);
@@ -243,24 +332,86 @@ static void free_obj_work(struct work_struct *work)
}
}
-static bool __free_object(struct debug_obj *obj)
+static void __free_object(struct debug_obj *obj)
{
+ struct debug_obj *objs[ODEBUG_BATCH_SIZE];
+ struct debug_percpu_free *percpu_pool;
+ int lookahead_count = 0;
unsigned long flags;
bool work;
- raw_spin_lock_irqsave(&pool_lock, flags);
- work = (obj_pool_free > debug_objects_pool_size) && obj_cache;
+ local_irq_save(flags);
+ if (!obj_cache)
+ goto free_to_obj_pool;
+
+ /*
+ * Try to free it into the percpu pool first.
+ */
+ percpu_pool = this_cpu_ptr(&percpu_obj_pool);
+ if (percpu_pool->obj_free < ODEBUG_POOL_PERCPU_SIZE) {
+ hlist_add_head(&obj->node, &percpu_pool->free_objs);
+ percpu_pool->obj_free++;
+ local_irq_restore(flags);
+ return;
+ }
+
+ /*
+ * As the percpu pool is full, look ahead and pull out a batch
+ * of objects from the percpu pool and free them as well.
+ */
+ for (; lookahead_count < ODEBUG_BATCH_SIZE; lookahead_count++) {
+ objs[lookahead_count] = __alloc_object(&percpu_pool->free_objs);
+ if (!objs[lookahead_count])
+ break;
+ percpu_pool->obj_free--;
+ }
+
+free_to_obj_pool:
+ raw_spin_lock(&pool_lock);
+ work = (obj_pool_free > debug_objects_pool_size) && obj_cache &&
+ (obj_nr_tofree < ODEBUG_FREE_WORK_MAX);
obj_pool_used--;
if (work) {
- obj_nr_tofree++;
+ WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + 1);
hlist_add_head(&obj->node, &obj_to_free);
+ if (lookahead_count) {
+ WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + lookahead_count);
+ obj_pool_used -= lookahead_count;
+ while (lookahead_count) {
+ hlist_add_head(&objs[--lookahead_count]->node,
+ &obj_to_free);
+ }
+ }
+
+ if ((obj_pool_free > debug_objects_pool_size) &&
+ (obj_nr_tofree < ODEBUG_FREE_WORK_MAX)) {
+ int i;
+
+ /*
+ * Free one more batch of objects from obj_pool.
+ */
+ for (i = 0; i < ODEBUG_BATCH_SIZE; i++) {
+ obj = __alloc_object(&obj_pool);
+ hlist_add_head(&obj->node, &obj_to_free);
+ WRITE_ONCE(obj_pool_free, obj_pool_free - 1);
+ WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + 1);
+ }
+ }
} else {
- obj_pool_free++;
+ WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
hlist_add_head(&obj->node, &obj_pool);
+ if (lookahead_count) {
+ WRITE_ONCE(obj_pool_free, obj_pool_free + lookahead_count);
+ obj_pool_used -= lookahead_count;
+ while (lookahead_count) {
+ hlist_add_head(&objs[--lookahead_count]->node,
+ &obj_pool);
+ }
+ }
}
- raw_spin_unlock_irqrestore(&pool_lock, flags);
- return work;
+ raw_spin_unlock(&pool_lock);
+ local_irq_restore(flags);
}
/*
@@ -269,9 +420,38 @@ static bool __free_object(struct debug_obj *obj)
*/
static void free_object(struct debug_obj *obj)
{
- if (__free_object(obj))
- schedule_work(&debug_obj_work);
+ __free_object(obj);
+ if (!READ_ONCE(obj_freeing) && READ_ONCE(obj_nr_tofree)) {
+ WRITE_ONCE(obj_freeing, true);
+ schedule_delayed_work(&debug_obj_work, ODEBUG_FREE_WORK_DELAY);
+ }
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+static int object_cpu_offline(unsigned int cpu)
+{
+ struct debug_percpu_free *percpu_pool;
+ struct hlist_node *tmp;
+ struct debug_obj *obj;
+ unsigned long flags;
+
+ /* Remote access is safe as the CPU is dead already */
+ percpu_pool = per_cpu_ptr(&percpu_obj_pool, cpu);
+ hlist_for_each_entry_safe(obj, tmp, &percpu_pool->free_objs, node) {
+ hlist_del(&obj->node);
+ kmem_cache_free(obj_cache, obj);
+ }
+
+ raw_spin_lock_irqsave(&pool_lock, flags);
+ obj_pool_used -= percpu_pool->obj_free;
+ debug_objects_freed += percpu_pool->obj_free;
+ raw_spin_unlock_irqrestore(&pool_lock, flags);
+
+ percpu_pool->obj_free = 0;
+
+ return 0;
}
+#endif
/*
* We run out of memory. That means we probably have tons of objects
@@ -315,17 +495,26 @@ static struct debug_bucket *get_bucket(unsigned long addr)
static void debug_print_object(struct debug_obj *obj, char *msg)
{
- struct debug_obj_descr *descr = obj->descr;
+ const struct debug_obj_descr *descr = obj->descr;
static int limit;
+ /*
+ * Don't report if lookup_object_or_alloc() by the current thread
+ * failed because lookup_object_or_alloc()/debug_objects_oom() by a
+ * concurrent thread turned off debug_objects_enabled and cleared
+ * the hash buckets.
+ */
+ if (!debug_objects_enabled)
+ return;
+
if (limit < 5 && descr != descr_test) {
void *hint = descr->debug_hint ?
descr->debug_hint(obj->object) : NULL;
limit++;
WARN(1, KERN_ERR "ODEBUG: %s %s (active state %u) "
- "object type: %s hint: %pS\n",
+ "object: %p object type: %s hint: %pS\n",
msg, obj_states[obj->state], obj->astate,
- descr->name, hint);
+ obj->object, descr->name, hint);
}
debug_objects_warnings++;
}
@@ -368,30 +557,84 @@ static void debug_object_is_on_stack(void *addr, int onstack)
WARN_ON(1);
}
+static struct debug_obj *lookup_object_or_alloc(void *addr, struct debug_bucket *b,
+ const struct debug_obj_descr *descr,
+ bool onstack, bool alloc_ifstatic)
+{
+ struct debug_obj *obj = lookup_object(addr, b);
+ enum debug_obj_state state = ODEBUG_STATE_NONE;
+
+ if (likely(obj))
+ return obj;
+
+ /*
+ * debug_object_init() unconditionally allocates untracked
+ * objects. It does not matter whether it is a static object or
+ * not.
+ *
+ * debug_object_assert_init() and debug_object_activate() allow
+ * allocation only if the descriptor callback confirms that the
+ * object is static and considered initialized. For non-static
+ * objects the allocation needs to be done from the fixup callback.
+ */
+ if (unlikely(alloc_ifstatic)) {
+ if (!descr->is_static_object || !descr->is_static_object(addr))
+ return ERR_PTR(-ENOENT);
+ /* Statically allocated objects are considered initialized */
+ state = ODEBUG_STATE_INIT;
+ }
+
+ obj = alloc_object(addr, b, descr);
+ if (likely(obj)) {
+ obj->state = state;
+ debug_object_is_on_stack(addr, onstack);
+ return obj;
+ }
+
+ /* Out of memory. Do the cleanup outside of the locked region */
+ debug_objects_enabled = 0;
+ return NULL;
+}
+
+static void debug_objects_fill_pool(void)
+{
+ /*
+ * On RT enabled kernels the pool refill must happen in preemptible
+ * context -- for !RT kernels we rely on the fact that spinlock_t and
+ * raw_spinlock_t are basically the same type and this lock-type
+ * inversion works just fine.
+ */
+ if (!IS_ENABLED(CONFIG_PREEMPT_RT) || preemptible()) {
+ /*
+ * Annotate away the spinlock_t inside raw_spinlock_t warning
+ * by temporarily raising the wait-type to WAIT_SLEEP, matching
+ * the preemptible() condition above.
+ */
+ static DEFINE_WAIT_OVERRIDE_MAP(fill_pool_map, LD_WAIT_SLEEP);
+ lock_map_acquire_try(&fill_pool_map);
+ fill_pool();
+ lock_map_release(&fill_pool_map);
+ }
+}
+
static void
-__debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack)
+__debug_object_init(void *addr, const struct debug_obj_descr *descr, int onstack)
{
- enum debug_obj_state state;
+ struct debug_obj *obj, o;
struct debug_bucket *db;
- struct debug_obj *obj;
unsigned long flags;
- fill_pool();
+ debug_objects_fill_pool();
db = get_bucket((unsigned long) addr);
raw_spin_lock_irqsave(&db->lock, flags);
- obj = lookup_object(addr, db);
- if (!obj) {
- obj = alloc_object(addr, db, descr);
- if (!obj) {
- debug_objects_enabled = 0;
- raw_spin_unlock_irqrestore(&db->lock, flags);
- debug_objects_oom();
- return;
- }
- debug_object_is_on_stack(addr, onstack);
+ obj = lookup_object_or_alloc(addr, db, descr, onstack, false);
+ if (unlikely(!obj)) {
+ raw_spin_unlock_irqrestore(&db->lock, flags);
+ debug_objects_oom();
+ return;
}
switch (obj->state) {
@@ -399,23 +642,18 @@ __debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack)
case ODEBUG_STATE_INIT:
case ODEBUG_STATE_INACTIVE:
obj->state = ODEBUG_STATE_INIT;
- break;
-
- case ODEBUG_STATE_ACTIVE:
- debug_print_object(obj, "init");
- state = obj->state;
raw_spin_unlock_irqrestore(&db->lock, flags);
- debug_object_fixup(descr->fixup_init, addr, state);
return;
-
- case ODEBUG_STATE_DESTROYED:
- debug_print_object(obj, "init");
- break;
default:
break;
}
+ o = *obj;
raw_spin_unlock_irqrestore(&db->lock, flags);
+ debug_print_object(&o, "init");
+
+ if (o.state == ODEBUG_STATE_ACTIVE)
+ debug_object_fixup(descr->fixup_init, addr, o.state);
}
/**
@@ -423,7 +661,7 @@ __debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack)
* @addr: address of the object
* @descr: pointer to an object specific debug description structure
*/
-void debug_object_init(void *addr, struct debug_obj_descr *descr)
+void debug_object_init(void *addr, const struct debug_obj_descr *descr)
{
if (!debug_objects_enabled)
return;
@@ -438,7 +676,7 @@ EXPORT_SYMBOL_GPL(debug_object_init);
* @addr: address of the object
* @descr: pointer to an object specific debug description structure
*/
-void debug_object_init_on_stack(void *addr, struct debug_obj_descr *descr)
+void debug_object_init_on_stack(void *addr, const struct debug_obj_descr *descr)
{
if (!debug_objects_enabled)
return;
@@ -453,71 +691,55 @@ EXPORT_SYMBOL_GPL(debug_object_init_on_stack);
* @descr: pointer to an object specific debug description structure
* Returns 0 for success, -EINVAL for check failed.
*/
-int debug_object_activate(void *addr, struct debug_obj_descr *descr)
+int debug_object_activate(void *addr, const struct debug_obj_descr *descr)
{
- enum debug_obj_state state;
+ struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr };
struct debug_bucket *db;
struct debug_obj *obj;
unsigned long flags;
- int ret;
- struct debug_obj o = { .object = addr,
- .state = ODEBUG_STATE_NOTAVAILABLE,
- .descr = descr };
if (!debug_objects_enabled)
return 0;
+ debug_objects_fill_pool();
+
db = get_bucket((unsigned long) addr);
raw_spin_lock_irqsave(&db->lock, flags);
- obj = lookup_object(addr, db);
- if (obj) {
+ obj = lookup_object_or_alloc(addr, db, descr, false, true);
+ if (unlikely(!obj)) {
+ raw_spin_unlock_irqrestore(&db->lock, flags);
+ debug_objects_oom();
+ return 0;
+ } else if (likely(!IS_ERR(obj))) {
switch (obj->state) {
- case ODEBUG_STATE_INIT:
- case ODEBUG_STATE_INACTIVE:
- obj->state = ODEBUG_STATE_ACTIVE;
- ret = 0;
- break;
-
case ODEBUG_STATE_ACTIVE:
- debug_print_object(obj, "activate");
- state = obj->state;
- raw_spin_unlock_irqrestore(&db->lock, flags);
- ret = debug_object_fixup(descr->fixup_activate, addr, state);
- return ret ? 0 : -EINVAL;
-
case ODEBUG_STATE_DESTROYED:
- debug_print_object(obj, "activate");
- ret = -EINVAL;
+ o = *obj;
break;
+ case ODEBUG_STATE_INIT:
+ case ODEBUG_STATE_INACTIVE:
+ obj->state = ODEBUG_STATE_ACTIVE;
+ fallthrough;
default:
- ret = 0;
- break;
+ raw_spin_unlock_irqrestore(&db->lock, flags);
+ return 0;
}
- raw_spin_unlock_irqrestore(&db->lock, flags);
- return ret;
}
raw_spin_unlock_irqrestore(&db->lock, flags);
- /*
- * We are here when a static object is activated. We
- * let the type specific code confirm whether this is
- * true or not. if true, we just make sure that the
- * static object is tracked in the object tracker. If
- * not, this must be a bug, so we try to fix it up.
- */
- if (descr->is_static_object && descr->is_static_object(addr)) {
- /* track this static object */
- debug_object_init(addr, descr);
- debug_object_activate(addr, descr);
- } else {
- debug_print_object(&o, "activate");
- ret = debug_object_fixup(descr->fixup_activate, addr,
- ODEBUG_STATE_NOTAVAILABLE);
- return ret ? 0 : -EINVAL;
+ debug_print_object(&o, "activate");
+
+ switch (o.state) {
+ case ODEBUG_STATE_ACTIVE:
+ case ODEBUG_STATE_NOTAVAILABLE:
+ if (debug_object_fixup(descr->fixup_activate, addr, o.state))
+ return 0;
+ fallthrough;
+ default:
+ return -EINVAL;
}
- return 0;
}
EXPORT_SYMBOL_GPL(debug_object_activate);
@@ -526,8 +748,9 @@ EXPORT_SYMBOL_GPL(debug_object_activate);
* @addr: address of the object
* @descr: pointer to an object specific debug description structure
*/
-void debug_object_deactivate(void *addr, struct debug_obj_descr *descr)
+void debug_object_deactivate(void *addr, const struct debug_obj_descr *descr)
{
+ struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr };
struct debug_bucket *db;
struct debug_obj *obj;
unsigned long flags;
@@ -542,30 +765,24 @@ void debug_object_deactivate(void *addr, struct debug_obj_descr *descr)
obj = lookup_object(addr, db);
if (obj) {
switch (obj->state) {
+ case ODEBUG_STATE_DESTROYED:
+ break;
case ODEBUG_STATE_INIT:
case ODEBUG_STATE_INACTIVE:
case ODEBUG_STATE_ACTIVE:
- if (!obj->astate)
- obj->state = ODEBUG_STATE_INACTIVE;
- else
- debug_print_object(obj, "deactivate");
- break;
-
- case ODEBUG_STATE_DESTROYED:
- debug_print_object(obj, "deactivate");
- break;
+ if (obj->astate)
+ break;
+ obj->state = ODEBUG_STATE_INACTIVE;
+ fallthrough;
default:
- break;
+ raw_spin_unlock_irqrestore(&db->lock, flags);
+ return;
}
- } else {
- struct debug_obj o = { .object = addr,
- .state = ODEBUG_STATE_NOTAVAILABLE,
- .descr = descr };
-
- debug_print_object(&o, "deactivate");
+ o = *obj;
}
raw_spin_unlock_irqrestore(&db->lock, flags);
+ debug_print_object(&o, "deactivate");
}
EXPORT_SYMBOL_GPL(debug_object_deactivate);
@@ -574,11 +791,10 @@ EXPORT_SYMBOL_GPL(debug_object_deactivate);
* @addr: address of the object
* @descr: pointer to an object specific debug description structure
*/
-void debug_object_destroy(void *addr, struct debug_obj_descr *descr)
+void debug_object_destroy(void *addr, const struct debug_obj_descr *descr)
{
- enum debug_obj_state state;
+ struct debug_obj *obj, o;
struct debug_bucket *db;
- struct debug_obj *obj;
unsigned long flags;
if (!debug_objects_enabled)
@@ -589,30 +805,31 @@ void debug_object_destroy(void *addr, struct debug_obj_descr *descr)
raw_spin_lock_irqsave(&db->lock, flags);
obj = lookup_object(addr, db);
- if (!obj)
- goto out_unlock;
+ if (!obj) {
+ raw_spin_unlock_irqrestore(&db->lock, flags);
+ return;
+ }
switch (obj->state) {
+ case ODEBUG_STATE_ACTIVE:
+ case ODEBUG_STATE_DESTROYED:
+ break;
case ODEBUG_STATE_NONE:
case ODEBUG_STATE_INIT:
case ODEBUG_STATE_INACTIVE:
obj->state = ODEBUG_STATE_DESTROYED;
- break;
- case ODEBUG_STATE_ACTIVE:
- debug_print_object(obj, "destroy");
- state = obj->state;
+ fallthrough;
+ default:
raw_spin_unlock_irqrestore(&db->lock, flags);
- debug_object_fixup(descr->fixup_destroy, addr, state);
return;
-
- case ODEBUG_STATE_DESTROYED:
- debug_print_object(obj, "destroy");
- break;
- default:
- break;
}
-out_unlock:
+
+ o = *obj;
raw_spin_unlock_irqrestore(&db->lock, flags);
+ debug_print_object(&o, "destroy");
+
+ if (o.state == ODEBUG_STATE_ACTIVE)
+ debug_object_fixup(descr->fixup_destroy, addr, o.state);
}
EXPORT_SYMBOL_GPL(debug_object_destroy);
@@ -621,11 +838,10 @@ EXPORT_SYMBOL_GPL(debug_object_destroy);
* @addr: address of the object
* @descr: pointer to an object specific debug description structure
*/
-void debug_object_free(void *addr, struct debug_obj_descr *descr)
+void debug_object_free(void *addr, const struct debug_obj_descr *descr)
{
- enum debug_obj_state state;
+ struct debug_obj *obj, o;
struct debug_bucket *db;
- struct debug_obj *obj;
unsigned long flags;
if (!debug_objects_enabled)
@@ -636,24 +852,26 @@ void debug_object_free(void *addr, struct debug_obj_descr *descr)
raw_spin_lock_irqsave(&db->lock, flags);
obj = lookup_object(addr, db);
- if (!obj)
- goto out_unlock;
+ if (!obj) {
+ raw_spin_unlock_irqrestore(&db->lock, flags);
+ return;
+ }
switch (obj->state) {
case ODEBUG_STATE_ACTIVE:
- debug_print_object(obj, "free");
- state = obj->state;
- raw_spin_unlock_irqrestore(&db->lock, flags);
- debug_object_fixup(descr->fixup_free, addr, state);
- return;
+ break;
default:
hlist_del(&obj->node);
raw_spin_unlock_irqrestore(&db->lock, flags);
free_object(obj);
return;
}
-out_unlock:
+
+ o = *obj;
raw_spin_unlock_irqrestore(&db->lock, flags);
+ debug_print_object(&o, "free");
+
+ debug_object_fixup(descr->fixup_free, addr, o.state);
}
EXPORT_SYMBOL_GPL(debug_object_free);
@@ -662,8 +880,9 @@ EXPORT_SYMBOL_GPL(debug_object_free);
* @addr: address of the object
* @descr: pointer to an object specific debug description structure
*/
-void debug_object_assert_init(void *addr, struct debug_obj_descr *descr)
+void debug_object_assert_init(void *addr, const struct debug_obj_descr *descr)
{
+ struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr };
struct debug_bucket *db;
struct debug_obj *obj;
unsigned long flags;
@@ -671,34 +890,25 @@ void debug_object_assert_init(void *addr, struct debug_obj_descr *descr)
if (!debug_objects_enabled)
return;
+ debug_objects_fill_pool();
+
db = get_bucket((unsigned long) addr);
raw_spin_lock_irqsave(&db->lock, flags);
+ obj = lookup_object_or_alloc(addr, db, descr, false, true);
+ raw_spin_unlock_irqrestore(&db->lock, flags);
+ if (likely(!IS_ERR_OR_NULL(obj)))
+ return;
- obj = lookup_object(addr, db);
+ /* If NULL the allocation has hit OOM */
if (!obj) {
- struct debug_obj o = { .object = addr,
- .state = ODEBUG_STATE_NOTAVAILABLE,
- .descr = descr };
-
- raw_spin_unlock_irqrestore(&db->lock, flags);
- /*
- * Maybe the object is static, and we let the type specific
- * code confirm. Track this static object if true, else invoke
- * fixup.
- */
- if (descr->is_static_object && descr->is_static_object(addr)) {
- /* Track this static object */
- debug_object_init(addr, descr);
- } else {
- debug_print_object(&o, "assert_init");
- debug_object_fixup(descr->fixup_assert_init, addr,
- ODEBUG_STATE_NOTAVAILABLE);
- }
+ debug_objects_oom();
return;
}
- raw_spin_unlock_irqrestore(&db->lock, flags);
+ /* Object is neither tracked nor static. It's not initialized. */
+ debug_print_object(&o, "assert_init");
+ debug_object_fixup(descr->fixup_assert_init, addr, ODEBUG_STATE_NOTAVAILABLE);
}
EXPORT_SYMBOL_GPL(debug_object_assert_init);
@@ -710,9 +920,10 @@ EXPORT_SYMBOL_GPL(debug_object_assert_init);
* @next: state to move to if expected state is found
*/
void
-debug_object_active_state(void *addr, struct debug_obj_descr *descr,
+debug_object_active_state(void *addr, const struct debug_obj_descr *descr,
unsigned int expect, unsigned int next)
{
+ struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr };
struct debug_bucket *db;
struct debug_obj *obj;
unsigned long flags;
@@ -728,25 +939,19 @@ debug_object_active_state(void *addr, struct debug_obj_descr *descr,
if (obj) {
switch (obj->state) {
case ODEBUG_STATE_ACTIVE:
- if (obj->astate == expect)
- obj->astate = next;
- else
- debug_print_object(obj, "active_state");
- break;
-
+ if (obj->astate != expect)
+ break;
+ obj->astate = next;
+ raw_spin_unlock_irqrestore(&db->lock, flags);
+ return;
default:
- debug_print_object(obj, "active_state");
break;
}
- } else {
- struct debug_obj o = { .object = addr,
- .state = ODEBUG_STATE_NOTAVAILABLE,
- .descr = descr };
-
- debug_print_object(&o, "active_state");
+ o = *obj;
}
raw_spin_unlock_irqrestore(&db->lock, flags);
+ debug_print_object(&o, "active_state");
}
EXPORT_SYMBOL_GPL(debug_object_active_state);
@@ -754,13 +959,10 @@ EXPORT_SYMBOL_GPL(debug_object_active_state);
static void __debug_check_no_obj_freed(const void *address, unsigned long size)
{
unsigned long flags, oaddr, saddr, eaddr, paddr, chunks;
- struct debug_obj_descr *descr;
- enum debug_obj_state state;
+ int cnt, objs_checked = 0;
+ struct debug_obj *obj, o;
struct debug_bucket *db;
struct hlist_node *tmp;
- struct debug_obj *obj;
- int cnt, objs_checked = 0;
- bool work = false;
saddr = (unsigned long) address;
eaddr = saddr + size;
@@ -782,16 +984,14 @@ repeat:
switch (obj->state) {
case ODEBUG_STATE_ACTIVE:
- debug_print_object(obj, "free");
- descr = obj->descr;
- state = obj->state;
+ o = *obj;
raw_spin_unlock_irqrestore(&db->lock, flags);
- debug_object_fixup(descr->fixup_free,
- (void *) oaddr, state);
+ debug_print_object(&o, "free");
+ debug_object_fixup(o.descr->fixup_free, (void *)oaddr, o.state);
goto repeat;
default:
hlist_del(&obj->node);
- work |= __free_object(obj);
+ __free_object(obj);
break;
}
}
@@ -807,8 +1007,10 @@ repeat:
debug_objects_maxchecked = objs_checked;
/* Schedule work to actually kmem_cache_free() objects */
- if (work)
- schedule_work(&debug_obj_work);
+ if (!READ_ONCE(obj_freeing) && READ_ONCE(obj_nr_tofree)) {
+ WRITE_ONCE(obj_freeing, true);
+ schedule_delayed_work(&debug_obj_work, ODEBUG_FREE_WORK_DELAY);
+ }
}
void debug_check_no_obj_freed(const void *address, unsigned long size)
@@ -822,54 +1024,39 @@ void debug_check_no_obj_freed(const void *address, unsigned long size)
static int debug_stats_show(struct seq_file *m, void *v)
{
+ int cpu, obj_percpu_free = 0;
+
+ for_each_possible_cpu(cpu)
+ obj_percpu_free += per_cpu(percpu_obj_pool.obj_free, cpu);
+
seq_printf(m, "max_chain :%d\n", debug_objects_maxchain);
seq_printf(m, "max_checked :%d\n", debug_objects_maxchecked);
seq_printf(m, "warnings :%d\n", debug_objects_warnings);
seq_printf(m, "fixups :%d\n", debug_objects_fixups);
- seq_printf(m, "pool_free :%d\n", obj_pool_free);
+ seq_printf(m, "pool_free :%d\n", READ_ONCE(obj_pool_free) + obj_percpu_free);
+ seq_printf(m, "pool_pcp_free :%d\n", obj_percpu_free);
seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free);
- seq_printf(m, "pool_used :%d\n", obj_pool_used);
+ seq_printf(m, "pool_used :%d\n", obj_pool_used - obj_percpu_free);
seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used);
- seq_printf(m, "on_free_list :%d\n", obj_nr_tofree);
+ seq_printf(m, "on_free_list :%d\n", READ_ONCE(obj_nr_tofree));
seq_printf(m, "objs_allocated:%d\n", debug_objects_allocated);
seq_printf(m, "objs_freed :%d\n", debug_objects_freed);
return 0;
}
-
-static int debug_stats_open(struct inode *inode, struct file *filp)
-{
- return single_open(filp, debug_stats_show, NULL);
-}
-
-static const struct file_operations debug_stats_fops = {
- .open = debug_stats_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(debug_stats);
static int __init debug_objects_init_debugfs(void)
{
- struct dentry *dbgdir, *dbgstats;
+ struct dentry *dbgdir;
if (!debug_objects_enabled)
return 0;
dbgdir = debugfs_create_dir("debug_objects", NULL);
- if (!dbgdir)
- return -ENOMEM;
- dbgstats = debugfs_create_file("stats", 0444, dbgdir, NULL,
- &debug_stats_fops);
- if (!dbgstats)
- goto err;
+ debugfs_create_file("stats", 0444, dbgdir, NULL, &debug_stats_fops);
return 0;
-
-err:
- debugfs_remove(dbgdir);
-
- return -ENOMEM;
}
__initcall(debug_objects_init_debugfs);
@@ -886,7 +1073,7 @@ struct self_test {
unsigned long dummy2[3];
};
-static __initdata struct debug_obj_descr descr_type_test;
+static __initconst const struct debug_obj_descr descr_type_test;
static bool __init is_static_object(void *addr)
{
@@ -1011,7 +1198,7 @@ out:
return res;
}
-static __initdata struct debug_obj_descr descr_type_test = {
+static __initconst const struct debug_obj_descr descr_type_test = {
.name = "selftest",
.is_static_object = is_static_object,
.fixup_init = fixup_init,
@@ -1130,6 +1317,8 @@ static int __init debug_objects_replace_static_objects(void)
hlist_add_head(&obj->node, &objects);
}
+ debug_objects_allocated += i;
+
/*
* debug_objects_mem_init() is now called early that only one CPU is up
* and interrupts have been disabled, so it is safe to replace the
@@ -1175,9 +1364,20 @@ free:
*/
void __init debug_objects_mem_init(void)
{
+ int cpu, extras;
+
if (!debug_objects_enabled)
return;
+ /*
+ * Initialize the percpu object pools
+ *
+ * Initialization is not strictly necessary, but was done for
+ * completeness.
+ */
+ for_each_possible_cpu(cpu)
+ INIT_HLIST_HEAD(&per_cpu(percpu_obj_pool.free_objs, cpu));
+
obj_cache = kmem_cache_create("debug_objects_cache",
sizeof (struct debug_obj), 0,
SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE,
@@ -1187,13 +1387,20 @@ void __init debug_objects_mem_init(void)
debug_objects_enabled = 0;
kmem_cache_destroy(obj_cache);
pr_warn("out of memory.\n");
+ return;
} else
debug_objects_selftest();
+#ifdef CONFIG_HOTPLUG_CPU
+ cpuhp_setup_state_nocalls(CPUHP_DEBUG_OBJ_DEAD, "object:offline", NULL,
+ object_cpu_offline);
+#endif
+
/*
* Increase the thresholds for allocating and freeing objects
* according to the number of possible CPUs available in the system.
*/
- debug_objects_pool_size += num_possible_cpus() * 32;
- debug_objects_pool_min_level += num_possible_cpus() * 4;
+ extras = num_possible_cpus() * ODEBUG_BATCH_SIZE;
+ debug_objects_pool_size += extras;
+ debug_objects_pool_min_level += extras;
}
diff --git a/lib/dec_and_lock.c b/lib/dec_and_lock.c
index 9555b68bb774..1dcca8f2e194 100644
--- a/lib/dec_and_lock.c
+++ b/lib/dec_and_lock.c
@@ -49,3 +49,34 @@ int _atomic_dec_and_lock_irqsave(atomic_t *atomic, spinlock_t *lock,
return 0;
}
EXPORT_SYMBOL(_atomic_dec_and_lock_irqsave);
+
+int _atomic_dec_and_raw_lock(atomic_t *atomic, raw_spinlock_t *lock)
+{
+ /* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */
+ if (atomic_add_unless(atomic, -1, 1))
+ return 0;
+
+ /* Otherwise do it the slow way */
+ raw_spin_lock(lock);
+ if (atomic_dec_and_test(atomic))
+ return 1;
+ raw_spin_unlock(lock);
+ return 0;
+}
+EXPORT_SYMBOL(_atomic_dec_and_raw_lock);
+
+int _atomic_dec_and_raw_lock_irqsave(atomic_t *atomic, raw_spinlock_t *lock,
+ unsigned long *flags)
+{
+ /* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */
+ if (atomic_add_unless(atomic, -1, 1))
+ return 0;
+
+ /* Otherwise do it the slow way */
+ raw_spin_lock_irqsave(lock, *flags);
+ if (atomic_dec_and_test(atomic))
+ return 1;
+ raw_spin_unlock_irqrestore(lock, *flags);
+ return 0;
+}
+EXPORT_SYMBOL(_atomic_dec_and_raw_lock_irqsave);
diff --git a/lib/decompress.c b/lib/decompress.c
index 857ab1af1ef3..ab3fc90ffc64 100644
--- a/lib/decompress.c
+++ b/lib/decompress.c
@@ -13,6 +13,7 @@
#include <linux/decompress/inflate.h>
#include <linux/decompress/unlzo.h>
#include <linux/decompress/unlz4.h>
+#include <linux/decompress/unzstd.h>
#include <linux/types.h>
#include <linux/string.h>
@@ -37,6 +38,9 @@
#ifndef CONFIG_DECOMPRESS_LZ4
# define unlz4 NULL
#endif
+#ifndef CONFIG_DECOMPRESS_ZSTD
+# define unzstd NULL
+#endif
struct compress_format {
unsigned char magic[2];
@@ -52,6 +56,7 @@ static const struct compress_format compressed_formats[] __initconst = {
{ {0xfd, 0x37}, "xz", unxz },
{ {0x89, 0x4c}, "lzo", unlzo },
{ {0x02, 0x21}, "lz4", unlz4 },
+ { {0x28, 0xb5}, "zstd", unzstd },
{ {0, 0}, NULL, NULL }
};
diff --git a/lib/decompress_bunzip2.c b/lib/decompress_bunzip2.c
index 7c4932eed748..3518e7394eca 100644
--- a/lib/decompress_bunzip2.c
+++ b/lib/decompress_bunzip2.c
@@ -34,7 +34,7 @@
Phone (337) 232-1234 or 1-800-738-2226
Fax (337) 232-1297
- http://www.hospiceacadiana.com/
+ https://www.hospiceacadiana.com/
Manuel
*/
@@ -80,7 +80,7 @@
/* This is what we know about each Huffman coding group */
struct group_data {
- /* We have an extra slot at the end of limit[] for a sentinal value. */
+ /* We have an extra slot at the end of limit[] for a sentinel value. */
int limit[MAX_HUFCODE_BITS+1];
int base[MAX_HUFCODE_BITS];
int permute[MAX_SYMBOLS];
@@ -337,7 +337,7 @@ static int INIT get_next_block(struct bunzip_data *bd)
pp <<= 1;
base[i+1] = pp-(t += temp[i]);
}
- limit[maxLen+1] = INT_MAX; /* Sentinal value for
+ limit[maxLen+1] = INT_MAX; /* Sentinel value for
* reading next sym. */
limit[maxLen] = pp+temp[maxLen]-1;
base[minLen] = 0;
@@ -385,12 +385,12 @@ static int INIT get_next_block(struct bunzip_data *bd)
bd->inbufBits =
(bd->inbufBits << 8)|bd->inbuf[bd->inbufPos++];
bd->inbufBitCount += 8;
- };
+ }
bd->inbufBitCount -= hufGroup->maxLen;
j = (bd->inbufBits >> bd->inbufBitCount)&
((1 << hufGroup->maxLen)-1);
got_huff_bits:
- /* Figure how how many bits are in next symbol and
+ /* Figure how many bits are in next symbol and
* unget extras */
i = hufGroup->minLen;
while (j > limit[i])
diff --git a/lib/decompress_inflate.c b/lib/decompress_inflate.c
index 63b4b7eee138..e19199f4a684 100644
--- a/lib/decompress_inflate.c
+++ b/lib/decompress_inflate.c
@@ -10,6 +10,10 @@
#include "zlib_inflate/inftrees.c"
#include "zlib_inflate/inffast.c"
#include "zlib_inflate/inflate.c"
+#ifdef CONFIG_ZLIB_DFLTCC
+#include "zlib_dfltcc/dfltcc.c"
+#include "zlib_dfltcc/dfltcc_inflate.c"
+#endif
#else /* STATIC */
/* initramfs et al: linked */
@@ -35,7 +39,7 @@ static long INIT nofill(void *buffer, unsigned long len)
}
/* Included from initramfs et al code */
-STATIC int INIT __gunzip(unsigned char *buf, long len,
+static int INIT __gunzip(unsigned char *buf, long len,
long (*fill)(void*, unsigned long),
long (*flush)(void*, unsigned long),
unsigned char *out_buf, long out_len,
@@ -76,7 +80,12 @@ STATIC int INIT __gunzip(unsigned char *buf, long len,
}
strm->workspace = malloc(flush ? zlib_inflate_workspacesize() :
+#ifdef CONFIG_ZLIB_DFLTCC
+ /* Always allocate the full workspace for DFLTCC */
+ zlib_inflate_workspacesize());
+#else
sizeof(struct inflate_state));
+#endif
if (strm->workspace == NULL) {
error("Out of memory while allocating workspace");
goto gunzip_nomem4;
@@ -123,10 +132,14 @@ STATIC int INIT __gunzip(unsigned char *buf, long len,
rc = zlib_inflateInit2(strm, -MAX_WBITS);
+#ifdef CONFIG_ZLIB_DFLTCC
+ /* Always keep the window for DFLTCC */
+#else
if (!flush) {
WS(strm)->inflate_state.wsize = 0;
WS(strm)->inflate_state.window = NULL;
}
+#endif
while (rc == Z_OK) {
if (strm->avail_in == 0) {
diff --git a/lib/decompress_unlz4.c b/lib/decompress_unlz4.c
index c0cfcfd486be..e6327391b6b6 100644
--- a/lib/decompress_unlz4.c
+++ b/lib/decompress_unlz4.c
@@ -112,6 +112,9 @@ STATIC inline int INIT unlz4(u8 *input, long in_len,
error("data corrupted");
goto exit_2;
}
+ } else if (size < 4) {
+ /* empty or end-of-file */
+ goto exit_3;
}
chunksize = get_unaligned_le32(inp);
@@ -125,6 +128,10 @@ STATIC inline int INIT unlz4(u8 *input, long in_len,
continue;
}
+ if (!fill && chunksize == 0) {
+ /* empty or end-of-file */
+ goto exit_3;
+ }
if (posp)
*posp += 4;
@@ -184,6 +191,7 @@ STATIC inline int INIT unlz4(u8 *input, long in_len,
}
}
+exit_3:
ret = 0;
exit_2:
if (!input)
diff --git a/lib/decompress_unlzma.c b/lib/decompress_unlzma.c
index ed7a1fd819f2..20a858031f12 100644
--- a/lib/decompress_unlzma.c
+++ b/lib/decompress_unlzma.c
@@ -8,7 +8,7 @@
*implementation for lzma.
*Copyright (C) 2006 Aurelien Jacobs < aurel@gnuage.org >
*
- *Based on LzmaDecode.c from the LZMA SDK 4.22 (http://www.7-zip.org/)
+ *Based on LzmaDecode.c from the LZMA SDK 4.22 (https://www.7-zip.org/)
*Copyright (C) 1999-2005 Igor Pavlov
*
*Copyrights of the parts, see headers below.
@@ -56,7 +56,7 @@ static long long INIT read_int(unsigned char *ptr, int size)
/* Small range coder implementation for lzma.
*Copyright (C) 2006 Aurelien Jacobs < aurel@gnuage.org >
*
- *Based on LzmaDecode.c from the LZMA SDK 4.22 (http://www.7-zip.org/)
+ *Based on LzmaDecode.c from the LZMA SDK 4.22 (https://www.7-zip.org/)
*Copyright (c) 1999-2005 Igor Pavlov
*/
@@ -213,7 +213,7 @@ rc_bit_tree_decode(struct rc *rc, uint16_t *p, int num_levels, int *symbol)
* Small lzma deflate implementation.
* Copyright (C) 2006 Aurelien Jacobs < aurel@gnuage.org >
*
- * Based on LzmaDecode.c from the LZMA SDK 4.22 (http://www.7-zip.org/)
+ * Based on LzmaDecode.c from the LZMA SDK 4.22 (https://www.7-zip.org/)
* Copyright (C) 1999-2005 Igor Pavlov
*/
@@ -391,7 +391,7 @@ static inline int INIT process_bit0(struct writer *wr, struct rc *rc,
static inline int INIT process_bit1(struct writer *wr, struct rc *rc,
struct cstate *cst, uint16_t *p,
int pos_state, uint16_t *prob) {
- int offset;
+ int offset;
uint16_t *prob_len;
int num_bits;
int len;
diff --git a/lib/decompress_unlzo.c b/lib/decompress_unlzo.c
index 1f439a622076..64c1358500ce 100644
--- a/lib/decompress_unlzo.c
+++ b/lib/decompress_unlzo.c
@@ -43,7 +43,6 @@ STATIC inline long INIT parse_header(u8 *input, long *skip, long in_len)
int l;
u8 *parse = input;
u8 *end = input + in_len;
- u8 level = 0;
u16 version;
/*
@@ -65,7 +64,7 @@ STATIC inline long INIT parse_header(u8 *input, long *skip, long in_len)
version = get_unaligned_be16(parse);
parse += 7;
if (version >= 0x0940)
- level = *parse++;
+ parse++;
if (get_unaligned_be32(parse) & HEADER_HAS_FILTER)
parse += 8; /* flags + filter info */
else
diff --git a/lib/decompress_unxz.c b/lib/decompress_unxz.c
index 25d59a95bd66..842894158944 100644
--- a/lib/decompress_unxz.c
+++ b/lib/decompress_unxz.c
@@ -20,10 +20,10 @@
*
* The worst case for in-place decompression is that the beginning of
* the file is compressed extremely well, and the rest of the file is
- * uncompressible. Thus, we must look for worst-case expansion when the
- * compressor is encoding uncompressible data.
+ * incompressible. Thus, we must look for worst-case expansion when the
+ * compressor is encoding incompressible data.
*
- * The structure of the .xz file in case of a compresed kernel is as follows.
+ * The structure of the .xz file in case of a compressed kernel is as follows.
* Sizes (as bytes) of the fields are in parenthesis.
*
* Stream Header (12)
@@ -58,7 +58,7 @@
* uncompressed size of the payload is in practice never less than the
* payload size itself. The LZMA2 format would allow uncompressed size
* to be less than the payload size, but no sane compressor creates such
- * files. LZMA2 supports storing uncompressible data in uncompressed form,
+ * files. LZMA2 supports storing incompressible data in uncompressed form,
* so there's never a need to create payloads whose uncompressed size is
* smaller than the compressed size.
*
@@ -102,6 +102,8 @@
*/
#ifdef STATIC
# define XZ_PREBOOT
+#else
+#include <linux/decompress/unxz.h>
#endif
#ifdef __KERNEL__
# include <linux/decompress/mm.h>
@@ -131,9 +133,6 @@
#ifdef CONFIG_ARM
# define XZ_DEC_ARM
#endif
-#ifdef CONFIG_IA64
-# define XZ_DEC_IA64
-#endif
#ifdef CONFIG_SPARC
# define XZ_DEC_SPARC
#endif
@@ -167,8 +166,8 @@
* memeq and memzero are not used much and any remotely sane implementation
* is fast enough. memcpy/memmove speed matters in multi-call mode, but
* the kernel image is decompressed in single-call mode, in which only
- * memcpy speed can matter and only if there is a lot of uncompressible data
- * (LZMA2 stores uncompressible chunks in uncompressed form). Thus, the
+ * memmove speed can matter and only if there is a lot of incompressible data
+ * (LZMA2 stores incompressible chunks in uncompressed form). Thus, the
* functions below should just be kept small; it's probably not worth
* optimizing for speed.
*/
diff --git a/lib/decompress_unzstd.c b/lib/decompress_unzstd.c
new file mode 100644
index 000000000000..bba2c0bb10cb
--- /dev/null
+++ b/lib/decompress_unzstd.c
@@ -0,0 +1,352 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Important notes about in-place decompression
+ *
+ * At least on x86, the kernel is decompressed in place: the compressed data
+ * is placed to the end of the output buffer, and the decompressor overwrites
+ * most of the compressed data. There must be enough safety margin to
+ * guarantee that the write position is always behind the read position.
+ *
+ * The safety margin for ZSTD with a 128 KB block size is calculated below.
+ * Note that the margin with ZSTD is bigger than with GZIP or XZ!
+ *
+ * The worst case for in-place decompression is that the beginning of
+ * the file is compressed extremely well, and the rest of the file is
+ * uncompressible. Thus, we must look for worst-case expansion when the
+ * compressor is encoding uncompressible data.
+ *
+ * The structure of the .zst file in case of a compressed kernel is as follows.
+ * Maximum sizes (as bytes) of the fields are in parenthesis.
+ *
+ * Frame Header: (18)
+ * Blocks: (N)
+ * Checksum: (4)
+ *
+ * The frame header and checksum overhead is at most 22 bytes.
+ *
+ * ZSTD stores the data in blocks. Each block has a header whose size is
+ * a 3 bytes. After the block header, there is up to 128 KB of payload.
+ * The maximum uncompressed size of the payload is 128 KB. The minimum
+ * uncompressed size of the payload is never less than the payload size
+ * (excluding the block header).
+ *
+ * The assumption, that the uncompressed size of the payload is never
+ * smaller than the payload itself, is valid only when talking about
+ * the payload as a whole. It is possible that the payload has parts where
+ * the decompressor consumes more input than it produces output. Calculating
+ * the worst case for this would be tricky. Instead of trying to do that,
+ * let's simply make sure that the decompressor never overwrites any bytes
+ * of the payload which it is currently reading.
+ *
+ * Now we have enough information to calculate the safety margin. We need
+ * - 22 bytes for the .zst file format headers;
+ * - 3 bytes per every 128 KiB of uncompressed size (one block header per
+ * block); and
+ * - 128 KiB (biggest possible zstd block size) to make sure that the
+ * decompressor never overwrites anything from the block it is currently
+ * reading.
+ *
+ * We get the following formula:
+ *
+ * safety_margin = 22 + uncompressed_size * 3 / 131072 + 131072
+ * <= 22 + (uncompressed_size >> 15) + 131072
+ */
+
+/*
+ * Preboot environments #include "path/to/decompress_unzstd.c".
+ * All of the source files we depend on must be #included.
+ * zstd's only source dependency is xxhash, which has no source
+ * dependencies.
+ *
+ * When UNZSTD_PREBOOT is defined we declare __decompress(), which is
+ * used for kernel decompression, instead of unzstd().
+ *
+ * Define __DISABLE_EXPORTS in preboot environments to prevent symbols
+ * from xxhash and zstd from being exported by the EXPORT_SYMBOL macro.
+ */
+#ifdef STATIC
+# define UNZSTD_PREBOOT
+# include "xxhash.c"
+# include "zstd/decompress_sources.h"
+#else
+#include <linux/decompress/unzstd.h>
+#endif
+
+#include <linux/decompress/mm.h>
+#include <linux/kernel.h>
+#include <linux/zstd.h>
+
+/* 128MB is the maximum window size supported by zstd. */
+#define ZSTD_WINDOWSIZE_MAX (1 << ZSTD_WINDOWLOG_MAX)
+/*
+ * Size of the input and output buffers in multi-call mode.
+ * Pick a larger size because it isn't used during kernel decompression,
+ * since that is single pass, and we have to allocate a large buffer for
+ * zstd's window anyway. The larger size speeds up initramfs decompression.
+ */
+#define ZSTD_IOBUF_SIZE (1 << 17)
+
+static int INIT handle_zstd_error(size_t ret, void (*error)(char *x))
+{
+ const zstd_error_code err = zstd_get_error_code(ret);
+
+ if (!zstd_is_error(ret))
+ return 0;
+
+ /*
+ * zstd_get_error_name() cannot be used because error takes a char *
+ * not a const char *
+ */
+ switch (err) {
+ case ZSTD_error_memory_allocation:
+ error("ZSTD decompressor ran out of memory");
+ break;
+ case ZSTD_error_prefix_unknown:
+ error("Input is not in the ZSTD format (wrong magic bytes)");
+ break;
+ case ZSTD_error_dstSize_tooSmall:
+ case ZSTD_error_corruption_detected:
+ case ZSTD_error_checksum_wrong:
+ error("ZSTD-compressed data is corrupt");
+ break;
+ default:
+ error("ZSTD-compressed data is probably corrupt");
+ break;
+ }
+ return -1;
+}
+
+/*
+ * Handle the case where we have the entire input and output in one segment.
+ * We can allocate less memory (no circular buffer for the sliding window),
+ * and avoid some memcpy() calls.
+ */
+static int INIT decompress_single(const u8 *in_buf, long in_len, u8 *out_buf,
+ long out_len, long *in_pos,
+ void (*error)(char *x))
+{
+ const size_t wksp_size = zstd_dctx_workspace_bound();
+ void *wksp = large_malloc(wksp_size);
+ zstd_dctx *dctx = zstd_init_dctx(wksp, wksp_size);
+ int err;
+ size_t ret;
+
+ if (dctx == NULL) {
+ error("Out of memory while allocating zstd_dctx");
+ err = -1;
+ goto out;
+ }
+ /*
+ * Find out how large the frame actually is, there may be junk at
+ * the end of the frame that zstd_decompress_dctx() can't handle.
+ */
+ ret = zstd_find_frame_compressed_size(in_buf, in_len);
+ err = handle_zstd_error(ret, error);
+ if (err)
+ goto out;
+ in_len = (long)ret;
+
+ ret = zstd_decompress_dctx(dctx, out_buf, out_len, in_buf, in_len);
+ err = handle_zstd_error(ret, error);
+ if (err)
+ goto out;
+
+ if (in_pos != NULL)
+ *in_pos = in_len;
+
+ err = 0;
+out:
+ if (wksp != NULL)
+ large_free(wksp);
+ return err;
+}
+
+static int INIT __unzstd(unsigned char *in_buf, long in_len,
+ long (*fill)(void*, unsigned long),
+ long (*flush)(void*, unsigned long),
+ unsigned char *out_buf, long out_len,
+ long *in_pos,
+ void (*error)(char *x))
+{
+ zstd_in_buffer in;
+ zstd_out_buffer out;
+ zstd_frame_header header;
+ void *in_allocated = NULL;
+ void *out_allocated = NULL;
+ void *wksp = NULL;
+ size_t wksp_size;
+ zstd_dstream *dstream;
+ int err;
+ size_t ret;
+
+ /*
+ * ZSTD decompression code won't be happy if the buffer size is so big
+ * that its end address overflows. When the size is not provided, make
+ * it as big as possible without having the end address overflow.
+ */
+ if (out_len == 0)
+ out_len = UINTPTR_MAX - (uintptr_t)out_buf;
+
+ if (fill == NULL && flush == NULL)
+ /*
+ * We can decompress faster and with less memory when we have a
+ * single chunk.
+ */
+ return decompress_single(in_buf, in_len, out_buf, out_len,
+ in_pos, error);
+
+ /*
+ * If in_buf is not provided, we must be using fill(), so allocate
+ * a large enough buffer. If it is provided, it must be at least
+ * ZSTD_IOBUF_SIZE large.
+ */
+ if (in_buf == NULL) {
+ in_allocated = large_malloc(ZSTD_IOBUF_SIZE);
+ if (in_allocated == NULL) {
+ error("Out of memory while allocating input buffer");
+ err = -1;
+ goto out;
+ }
+ in_buf = in_allocated;
+ in_len = 0;
+ }
+ /* Read the first chunk, since we need to decode the frame header. */
+ if (fill != NULL)
+ in_len = fill(in_buf, ZSTD_IOBUF_SIZE);
+ if (in_len < 0) {
+ error("ZSTD-compressed data is truncated");
+ err = -1;
+ goto out;
+ }
+ /* Set the first non-empty input buffer. */
+ in.src = in_buf;
+ in.pos = 0;
+ in.size = in_len;
+ /* Allocate the output buffer if we are using flush(). */
+ if (flush != NULL) {
+ out_allocated = large_malloc(ZSTD_IOBUF_SIZE);
+ if (out_allocated == NULL) {
+ error("Out of memory while allocating output buffer");
+ err = -1;
+ goto out;
+ }
+ out_buf = out_allocated;
+ out_len = ZSTD_IOBUF_SIZE;
+ }
+ /* Set the output buffer. */
+ out.dst = out_buf;
+ out.pos = 0;
+ out.size = out_len;
+
+ /*
+ * We need to know the window size to allocate the zstd_dstream.
+ * Since we are streaming, we need to allocate a buffer for the sliding
+ * window. The window size varies from 1 KB to ZSTD_WINDOWSIZE_MAX
+ * (8 MB), so it is important to use the actual value so as not to
+ * waste memory when it is smaller.
+ */
+ ret = zstd_get_frame_header(&header, in.src, in.size);
+ err = handle_zstd_error(ret, error);
+ if (err)
+ goto out;
+ if (ret != 0) {
+ error("ZSTD-compressed data has an incomplete frame header");
+ err = -1;
+ goto out;
+ }
+ if (header.windowSize > ZSTD_WINDOWSIZE_MAX) {
+ error("ZSTD-compressed data has too large a window size");
+ err = -1;
+ goto out;
+ }
+
+ /*
+ * Allocate the zstd_dstream now that we know how much memory is
+ * required.
+ */
+ wksp_size = zstd_dstream_workspace_bound(header.windowSize);
+ wksp = large_malloc(wksp_size);
+ dstream = zstd_init_dstream(header.windowSize, wksp, wksp_size);
+ if (dstream == NULL) {
+ error("Out of memory while allocating ZSTD_DStream");
+ err = -1;
+ goto out;
+ }
+
+ /*
+ * Decompression loop:
+ * Read more data if necessary (error if no more data can be read).
+ * Call the decompression function, which returns 0 when finished.
+ * Flush any data produced if using flush().
+ */
+ if (in_pos != NULL)
+ *in_pos = 0;
+ do {
+ /*
+ * If we need to reload data, either we have fill() and can
+ * try to get more data, or we don't and the input is truncated.
+ */
+ if (in.pos == in.size) {
+ if (in_pos != NULL)
+ *in_pos += in.pos;
+ in_len = fill ? fill(in_buf, ZSTD_IOBUF_SIZE) : -1;
+ if (in_len < 0) {
+ error("ZSTD-compressed data is truncated");
+ err = -1;
+ goto out;
+ }
+ in.pos = 0;
+ in.size = in_len;
+ }
+ /* Returns zero when the frame is complete. */
+ ret = zstd_decompress_stream(dstream, &out, &in);
+ err = handle_zstd_error(ret, error);
+ if (err)
+ goto out;
+ /* Flush all of the data produced if using flush(). */
+ if (flush != NULL && out.pos > 0) {
+ if (out.pos != flush(out.dst, out.pos)) {
+ error("Failed to flush()");
+ err = -1;
+ goto out;
+ }
+ out.pos = 0;
+ }
+ } while (ret != 0);
+
+ if (in_pos != NULL)
+ *in_pos += in.pos;
+
+ err = 0;
+out:
+ if (in_allocated != NULL)
+ large_free(in_allocated);
+ if (out_allocated != NULL)
+ large_free(out_allocated);
+ if (wksp != NULL)
+ large_free(wksp);
+ return err;
+}
+
+#ifndef UNZSTD_PREBOOT
+STATIC int INIT unzstd(unsigned char *buf, long len,
+ long (*fill)(void*, unsigned long),
+ long (*flush)(void*, unsigned long),
+ unsigned char *out_buf,
+ long *pos,
+ void (*error)(char *x))
+{
+ return __unzstd(buf, len, fill, flush, out_buf, 0, pos, error);
+}
+#else
+STATIC int INIT __decompress(unsigned char *buf, long len,
+ long (*fill)(void*, unsigned long),
+ long (*flush)(void*, unsigned long),
+ unsigned char *out_buf, long out_len,
+ long *pos,
+ void (*error)(char *x))
+{
+ return __unzstd(buf, len, fill, flush, out_buf, out_len, pos, error);
+}
+#endif
diff --git a/lib/devmem_is_allowed.c b/lib/devmem_is_allowed.c
new file mode 100644
index 000000000000..9c060c69f134
--- /dev/null
+++ b/lib/devmem_is_allowed.c
@@ -0,0 +1,28 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * A generic version of devmem_is_allowed.
+ *
+ * Based on arch/arm64/mm/mmap.c
+ *
+ * Copyright (C) 2020 Google, Inc.
+ * Copyright (C) 2012 ARM Ltd.
+ */
+
+#include <linux/mm.h>
+#include <linux/ioport.h>
+#include <linux/io.h>
+
+/*
+ * devmem_is_allowed() checks to see if /dev/mem access to a certain address
+ * is valid. The argument is a physical page number. We mimic x86 here by
+ * disallowing access to system RAM as well as device-exclusive MMIO regions.
+ * This effectively disable read()/write() on /dev/mem.
+ */
+int devmem_is_allowed(unsigned long pfn)
+{
+ if (iomem_is_exclusive(PFN_PHYS(pfn)))
+ return 0;
+ if (!page_is_ram(pfn))
+ return 1;
+ return 0;
+}
diff --git a/lib/devres.c b/lib/devres.c
index 6a0e9bd6524a..c44f104b58d5 100644
--- a/lib/devres.c
+++ b/lib/devres.c
@@ -8,8 +8,9 @@
enum devm_ioremap_type {
DEVM_IOREMAP = 0,
- DEVM_IOREMAP_NC,
+ DEVM_IOREMAP_UC,
DEVM_IOREMAP_WC,
+ DEVM_IOREMAP_NP,
};
void devm_ioremap_release(struct device *dev, void *res)
@@ -28,7 +29,8 @@ static void __iomem *__devm_ioremap(struct device *dev, resource_size_t offset,
{
void __iomem **ptr, *addr = NULL;
- ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL);
+ ptr = devres_alloc_node(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL,
+ dev_to_node(dev));
if (!ptr)
return NULL;
@@ -36,12 +38,15 @@ static void __iomem *__devm_ioremap(struct device *dev, resource_size_t offset,
case DEVM_IOREMAP:
addr = ioremap(offset, size);
break;
- case DEVM_IOREMAP_NC:
- addr = ioremap_nocache(offset, size);
+ case DEVM_IOREMAP_UC:
+ addr = ioremap_uc(offset, size);
break;
case DEVM_IOREMAP_WC:
addr = ioremap_wc(offset, size);
break;
+ case DEVM_IOREMAP_NP:
+ addr = ioremap_np(offset, size);
+ break;
}
if (addr) {
@@ -69,20 +74,19 @@ void __iomem *devm_ioremap(struct device *dev, resource_size_t offset,
EXPORT_SYMBOL(devm_ioremap);
/**
- * devm_ioremap_nocache - Managed ioremap_nocache()
+ * devm_ioremap_uc - Managed ioremap_uc()
* @dev: Generic device to remap IO address for
* @offset: Resource address to map
* @size: Size of map
*
- * Managed ioremap_nocache(). Map is automatically unmapped on driver
- * detach.
+ * Managed ioremap_uc(). Map is automatically unmapped on driver detach.
*/
-void __iomem *devm_ioremap_nocache(struct device *dev, resource_size_t offset,
- resource_size_t size)
+void __iomem *devm_ioremap_uc(struct device *dev, resource_size_t offset,
+ resource_size_t size)
{
- return __devm_ioremap(dev, offset, size, DEVM_IOREMAP_NC);
+ return __devm_ioremap(dev, offset, size, DEVM_IOREMAP_UC);
}
-EXPORT_SYMBOL(devm_ioremap_nocache);
+EXPORT_SYMBOL_GPL(devm_ioremap_uc);
/**
* devm_ioremap_wc - Managed ioremap_wc()
@@ -114,44 +118,42 @@ void devm_iounmap(struct device *dev, void __iomem *addr)
}
EXPORT_SYMBOL(devm_iounmap);
-/**
- * devm_ioremap_resource() - check, request region, and ioremap resource
- * @dev: generic device to handle the resource for
- * @res: resource to be handled
- *
- * Checks that a resource is a valid memory region, requests the memory
- * region and ioremaps it. All operations are managed and will be undone
- * on driver detach.
- *
- * Returns a pointer to the remapped memory or an ERR_PTR() encoded error code
- * on failure. Usage example:
- *
- * res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- * base = devm_ioremap_resource(&pdev->dev, res);
- * if (IS_ERR(base))
- * return PTR_ERR(base);
- */
-void __iomem *devm_ioremap_resource(struct device *dev,
- const struct resource *res)
+static void __iomem *
+__devm_ioremap_resource(struct device *dev, const struct resource *res,
+ enum devm_ioremap_type type)
{
resource_size_t size;
void __iomem *dest_ptr;
+ char *pretty_name;
BUG_ON(!dev);
if (!res || resource_type(res) != IORESOURCE_MEM) {
- dev_err(dev, "invalid resource\n");
+ dev_err(dev, "invalid resource %pR\n", res);
return IOMEM_ERR_PTR(-EINVAL);
}
+ if (type == DEVM_IOREMAP && res->flags & IORESOURCE_MEM_NONPOSTED)
+ type = DEVM_IOREMAP_NP;
+
size = resource_size(res);
- if (!devm_request_mem_region(dev, res->start, size, dev_name(dev))) {
+ if (res->name)
+ pretty_name = devm_kasprintf(dev, GFP_KERNEL, "%s %s",
+ dev_name(dev), res->name);
+ else
+ pretty_name = devm_kstrdup(dev, dev_name(dev), GFP_KERNEL);
+ if (!pretty_name) {
+ dev_err(dev, "can't generate pretty name for resource %pR\n", res);
+ return IOMEM_ERR_PTR(-ENOMEM);
+ }
+
+ if (!devm_request_mem_region(dev, res->start, size, pretty_name)) {
dev_err(dev, "can't request region for resource %pR\n", res);
return IOMEM_ERR_PTR(-EBUSY);
}
- dest_ptr = devm_ioremap(dev, res->start, size);
+ dest_ptr = __devm_ioremap(dev, res->start, size, type);
if (!dest_ptr) {
dev_err(dev, "ioremap failed for resource %pR\n", res);
devm_release_mem_region(dev, res->start, size);
@@ -160,8 +162,48 @@ void __iomem *devm_ioremap_resource(struct device *dev,
return dest_ptr;
}
+
+/**
+ * devm_ioremap_resource() - check, request region, and ioremap resource
+ * @dev: generic device to handle the resource for
+ * @res: resource to be handled
+ *
+ * Checks that a resource is a valid memory region, requests the memory
+ * region and ioremaps it. All operations are managed and will be undone
+ * on driver detach.
+ *
+ * Usage example:
+ *
+ * res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ * base = devm_ioremap_resource(&pdev->dev, res);
+ * if (IS_ERR(base))
+ * return PTR_ERR(base);
+ *
+ * Return: a pointer to the remapped memory or an ERR_PTR() encoded error code
+ * on failure.
+ */
+void __iomem *devm_ioremap_resource(struct device *dev,
+ const struct resource *res)
+{
+ return __devm_ioremap_resource(dev, res, DEVM_IOREMAP);
+}
EXPORT_SYMBOL(devm_ioremap_resource);
+/**
+ * devm_ioremap_resource_wc() - write-combined variant of
+ * devm_ioremap_resource()
+ * @dev: generic device to handle the resource for
+ * @res: resource to be handled
+ *
+ * Return: a pointer to the remapped memory or an ERR_PTR() encoded error code
+ * on failure.
+ */
+void __iomem *devm_ioremap_resource_wc(struct device *dev,
+ const struct resource *res)
+{
+ return __devm_ioremap_resource(dev, res, DEVM_IOREMAP_WC);
+}
+
/*
* devm_of_iomap - Requests a resource and maps the memory mapped IO
* for a given device_node managed by a given device
@@ -177,12 +219,20 @@ EXPORT_SYMBOL(devm_ioremap_resource);
* @node: The device-tree node where the resource resides
* @index: index of the MMIO range in the "reg" property
* @size: Returns the size of the resource (pass NULL if not needed)
- * Returns a pointer to the requested and mapped memory or an ERR_PTR() encoded
- * error code on failure. Usage example:
+ *
+ * Usage example:
*
* base = devm_of_iomap(&pdev->dev, node, 0, NULL);
* if (IS_ERR(base))
* return PTR_ERR(base);
+ *
+ * Please Note: This is not a one-to-one replacement for of_iomap() because the
+ * of_iomap() function does not track whether the region is already mapped. If
+ * two drivers try to map the same memory, the of_iomap() function will succeed
+ * but the devm_of_iomap() function will return -EBUSY.
+ *
+ * Return: a pointer to the requested and mapped memory or an ERR_PTR() encoded
+ * error code on failure.
*/
void __iomem *devm_of_iomap(struct device *dev, struct device_node *node, int index,
resource_size_t *size)
@@ -220,13 +270,16 @@ static int devm_ioport_map_match(struct device *dev, void *res,
*
* Managed ioport_map(). Map is automatically unmapped on driver
* detach.
+ *
+ * Return: a pointer to the remapped memory or NULL on failure.
*/
void __iomem *devm_ioport_map(struct device *dev, unsigned long port,
unsigned int nr)
{
void __iomem **ptr, *addr;
- ptr = devres_alloc(devm_ioport_map_release, sizeof(*ptr), GFP_KERNEL);
+ ptr = devres_alloc_node(devm_ioport_map_release, sizeof(*ptr), GFP_KERNEL,
+ dev_to_node(dev));
if (!ptr)
return NULL;
@@ -262,7 +315,7 @@ EXPORT_SYMBOL(devm_ioport_unmap);
/*
* PCI iomap devres
*/
-#define PCIM_IOMAP_MAX PCI_ROM_RESOURCE
+#define PCIM_IOMAP_MAX PCI_STD_NUM_BARS
struct pcim_iomap_devres {
void __iomem *table[PCIM_IOMAP_MAX];
@@ -289,7 +342,7 @@ static void pcim_iomap_release(struct device *gendev, void *res)
* detach.
*
* This function might sleep when the table is first allocated but can
- * be safely called without context and guaranteed to succed once
+ * be safely called without context and guaranteed to succeed once
* allocated.
*/
void __iomem * const *pcim_iomap_table(struct pci_dev *pdev)
@@ -300,7 +353,8 @@ void __iomem * const *pcim_iomap_table(struct pci_dev *pdev)
if (dr)
return dr->table;
- new_dr = devres_alloc(pcim_iomap_release, sizeof(*new_dr), GFP_KERNEL);
+ new_dr = devres_alloc_node(pcim_iomap_release, sizeof(*new_dr), GFP_KERNEL,
+ dev_to_node(&pdev->dev));
if (!new_dr)
return NULL;
dr = devres_get(&pdev->dev, new_dr, NULL, NULL);
@@ -462,3 +516,87 @@ void pcim_iounmap_regions(struct pci_dev *pdev, int mask)
}
EXPORT_SYMBOL(pcim_iounmap_regions);
#endif /* CONFIG_PCI */
+
+static void devm_arch_phys_ac_add_release(struct device *dev, void *res)
+{
+ arch_phys_wc_del(*((int *)res));
+}
+
+/**
+ * devm_arch_phys_wc_add - Managed arch_phys_wc_add()
+ * @dev: Managed device
+ * @base: Memory base address
+ * @size: Size of memory range
+ *
+ * Adds a WC MTRR using arch_phys_wc_add() and sets up a release callback.
+ * See arch_phys_wc_add() for more information.
+ */
+int devm_arch_phys_wc_add(struct device *dev, unsigned long base, unsigned long size)
+{
+ int *mtrr;
+ int ret;
+
+ mtrr = devres_alloc_node(devm_arch_phys_ac_add_release, sizeof(*mtrr), GFP_KERNEL,
+ dev_to_node(dev));
+ if (!mtrr)
+ return -ENOMEM;
+
+ ret = arch_phys_wc_add(base, size);
+ if (ret < 0) {
+ devres_free(mtrr);
+ return ret;
+ }
+
+ *mtrr = ret;
+ devres_add(dev, mtrr);
+
+ return ret;
+}
+EXPORT_SYMBOL(devm_arch_phys_wc_add);
+
+struct arch_io_reserve_memtype_wc_devres {
+ resource_size_t start;
+ resource_size_t size;
+};
+
+static void devm_arch_io_free_memtype_wc_release(struct device *dev, void *res)
+{
+ const struct arch_io_reserve_memtype_wc_devres *this = res;
+
+ arch_io_free_memtype_wc(this->start, this->size);
+}
+
+/**
+ * devm_arch_io_reserve_memtype_wc - Managed arch_io_reserve_memtype_wc()
+ * @dev: Managed device
+ * @start: Memory base address
+ * @size: Size of memory range
+ *
+ * Reserves a memory range with WC caching using arch_io_reserve_memtype_wc()
+ * and sets up a release callback See arch_io_reserve_memtype_wc() for more
+ * information.
+ */
+int devm_arch_io_reserve_memtype_wc(struct device *dev, resource_size_t start,
+ resource_size_t size)
+{
+ struct arch_io_reserve_memtype_wc_devres *dr;
+ int ret;
+
+ dr = devres_alloc_node(devm_arch_io_free_memtype_wc_release, sizeof(*dr), GFP_KERNEL,
+ dev_to_node(dev));
+ if (!dr)
+ return -ENOMEM;
+
+ ret = arch_io_reserve_memtype_wc(start, size);
+ if (ret < 0) {
+ devres_free(dr);
+ return ret;
+ }
+
+ dr->start = start;
+ dr->size = size;
+ devres_add(dev, dr);
+
+ return ret;
+}
+EXPORT_SYMBOL(devm_arch_io_reserve_memtype_wc);
diff --git a/lib/dhry.h b/lib/dhry.h
new file mode 100644
index 000000000000..e1a4db8e252c
--- /dev/null
+++ b/lib/dhry.h
@@ -0,0 +1,358 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ ****************************************************************************
+ *
+ * "DHRYSTONE" Benchmark Program
+ * -----------------------------
+ *
+ * Version: C, Version 2.1
+ *
+ * File: dhry.h (part 1 of 3)
+ *
+ * Date: May 25, 1988
+ *
+ * Author: Reinhold P. Weicker
+ * Siemens AG, AUT E 51
+ * Postfach 3220
+ * 8520 Erlangen
+ * Germany (West)
+ * Phone: [+49]-9131-7-20330
+ * (8-17 Central European Time)
+ * Usenet: ..!mcsun!unido!estevax!weicker
+ *
+ * Original Version (in Ada) published in
+ * "Communications of the ACM" vol. 27., no. 10 (Oct. 1984),
+ * pp. 1013 - 1030, together with the statistics
+ * on which the distribution of statements etc. is based.
+ *
+ * In this C version, the following C library functions are used:
+ * - strcpy, strcmp (inside the measurement loop)
+ * - printf, scanf (outside the measurement loop)
+ * In addition, Berkeley UNIX system calls "times ()" or "time ()"
+ * are used for execution time measurement. For measurements
+ * on other systems, these calls have to be changed.
+ *
+ * Collection of Results:
+ * Reinhold Weicker (address see above) and
+ *
+ * Rick Richardson
+ * PC Research. Inc.
+ * 94 Apple Orchard Drive
+ * Tinton Falls, NJ 07724
+ * Phone: (201) 389-8963 (9-17 EST)
+ * Usenet: ...!uunet!pcrat!rick
+ *
+ * Please send results to Rick Richardson and/or Reinhold Weicker.
+ * Complete information should be given on hardware and software used.
+ * Hardware information includes: Machine type, CPU, type and size
+ * of caches; for microprocessors: clock frequency, memory speed
+ * (number of wait states).
+ * Software information includes: Compiler (and runtime library)
+ * manufacturer and version, compilation switches, OS version.
+ * The Operating System version may give an indication about the
+ * compiler; Dhrystone itself performs no OS calls in the measurement loop.
+ *
+ * The complete output generated by the program should be mailed
+ * such that at least some checks for correctness can be made.
+ *
+ ***************************************************************************
+ *
+ * History: This version C/2.1 has been made for two reasons:
+ *
+ * 1) There is an obvious need for a common C version of
+ * Dhrystone, since C is at present the most popular system
+ * programming language for the class of processors
+ * (microcomputers, minicomputers) where Dhrystone is used most.
+ * There should be, as far as possible, only one C version of
+ * Dhrystone such that results can be compared without
+ * restrictions. In the past, the C versions distributed
+ * by Rick Richardson (Version 1.1) and by Reinhold Weicker
+ * had small (though not significant) differences.
+ *
+ * 2) As far as it is possible without changes to the Dhrystone
+ * statistics, optimizing compilers should be prevented from
+ * removing significant statements.
+ *
+ * This C version has been developed in cooperation with
+ * Rick Richardson (Tinton Falls, NJ), it incorporates many
+ * ideas from the "Version 1.1" distributed previously by
+ * him over the UNIX network Usenet.
+ * I also thank Chaim Benedelac (National Semiconductor),
+ * David Ditzel (SUN), Earl Killian and John Mashey (MIPS),
+ * Alan Smith and Rafael Saavedra-Barrera (UC at Berkeley)
+ * for their help with comments on earlier versions of the
+ * benchmark.
+ *
+ * Changes: In the initialization part, this version follows mostly
+ * Rick Richardson's version distributed via Usenet, not the
+ * version distributed earlier via floppy disk by Reinhold Weicker.
+ * As a concession to older compilers, names have been made
+ * unique within the first 8 characters.
+ * Inside the measurement loop, this version follows the
+ * version previously distributed by Reinhold Weicker.
+ *
+ * At several places in the benchmark, code has been added,
+ * but within the measurement loop only in branches that
+ * are not executed. The intention is that optimizing compilers
+ * should be prevented from moving code out of the measurement
+ * loop, or from removing code altogether. Since the statements
+ * that are executed within the measurement loop have NOT been
+ * changed, the numbers defining the "Dhrystone distribution"
+ * (distribution of statements, operand types and locality)
+ * still hold. Except for sophisticated optimizing compilers,
+ * execution times for this version should be the same as
+ * for previous versions.
+ *
+ * Since it has proven difficult to subtract the time for the
+ * measurement loop overhead in a correct way, the loop check
+ * has been made a part of the benchmark. This does have
+ * an impact - though a very minor one - on the distribution
+ * statistics which have been updated for this version.
+ *
+ * All changes within the measurement loop are described
+ * and discussed in the companion paper "Rationale for
+ * Dhrystone version 2".
+ *
+ * Because of the self-imposed limitation that the order and
+ * distribution of the executed statements should not be
+ * changed, there are still cases where optimizing compilers
+ * may not generate code for some statements. To a certain
+ * degree, this is unavoidable for small synthetic benchmarks.
+ * Users of the benchmark are advised to check code listings
+ * whether code is generated for all statements of Dhrystone.
+ *
+ * Version 2.1 is identical to version 2.0 distributed via
+ * the UNIX network Usenet in March 1988 except that it corrects
+ * some minor deficiencies that were found by users of version 2.0.
+ * The only change within the measurement loop is that a
+ * non-executed "else" part was added to the "if" statement in
+ * Func_3, and a non-executed "else" part removed from Proc_3.
+ *
+ ***************************************************************************
+ *
+ * Compilation model and measurement (IMPORTANT):
+ *
+ * This C version of Dhrystone consists of three files:
+ * - dhry.h (this file, containing global definitions and comments)
+ * - dhry_1.c (containing the code corresponding to Ada package Pack_1)
+ * - dhry_2.c (containing the code corresponding to Ada package Pack_2)
+ *
+ * The following "ground rules" apply for measurements:
+ * - Separate compilation
+ * - No procedure merging
+ * - Otherwise, compiler optimizations are allowed but should be indicated
+ * - Default results are those without register declarations
+ * See the companion paper "Rationale for Dhrystone Version 2" for a more
+ * detailed discussion of these ground rules.
+ *
+ * For 16-Bit processors (e.g. 80186, 80286), times for all compilation
+ * models ("small", "medium", "large" etc.) should be given if possible,
+ * together with a definition of these models for the compiler system used.
+ *
+ **************************************************************************
+ *
+ * Dhrystone (C version) statistics:
+ *
+ * [Comment from the first distribution, updated for version 2.
+ * Note that because of language differences, the numbers are slightly
+ * different from the Ada version.]
+ *
+ * The following program contains statements of a high level programming
+ * language (here: C) in a distribution considered representative:
+ *
+ * assignments 52 (51.0 %)
+ * control statements 33 (32.4 %)
+ * procedure, function calls 17 (16.7 %)
+ *
+ * 103 statements are dynamically executed. The program is balanced with
+ * respect to the three aspects:
+ *
+ * - statement type
+ * - operand type
+ * - operand locality
+ * operand global, local, parameter, or constant.
+ *
+ * The combination of these three aspects is balanced only approximately.
+ *
+ * 1. Statement Type:
+ * ----------------- number
+ *
+ * V1 = V2 9
+ * (incl. V1 = F(..)
+ * V = Constant 12
+ * Assignment, 7
+ * with array element
+ * Assignment, 6
+ * with record component
+ * --
+ * 34 34
+ *
+ * X = Y +|-|"&&"|"|" Z 5
+ * X = Y +|-|"==" Constant 6
+ * X = X +|- 1 3
+ * X = Y *|/ Z 2
+ * X = Expression, 1
+ * two operators
+ * X = Expression, 1
+ * three operators
+ * --
+ * 18 18
+ *
+ * if .... 14
+ * with "else" 7
+ * without "else" 7
+ * executed 3
+ * not executed 4
+ * for ... 7 | counted every time
+ * while ... 4 | the loop condition
+ * do ... while 1 | is evaluated
+ * switch ... 1
+ * break 1
+ * declaration with 1
+ * initialization
+ * --
+ * 34 34
+ *
+ * P (...) procedure call 11
+ * user procedure 10
+ * library procedure 1
+ * X = F (...)
+ * function call 6
+ * user function 5
+ * library function 1
+ * --
+ * 17 17
+ * ---
+ * 103
+ *
+ * The average number of parameters in procedure or function calls
+ * is 1.82 (not counting the function values as implicit parameters).
+ *
+ *
+ * 2. Operators
+ * ------------
+ * number approximate
+ * percentage
+ *
+ * Arithmetic 32 50.8
+ *
+ * + 21 33.3
+ * - 7 11.1
+ * * 3 4.8
+ * / (int div) 1 1.6
+ *
+ * Comparison 27 42.8
+ *
+ * == 9 14.3
+ * /= 4 6.3
+ * > 1 1.6
+ * < 3 4.8
+ * >= 1 1.6
+ * <= 9 14.3
+ *
+ * Logic 4 6.3
+ *
+ * && (AND-THEN) 1 1.6
+ * | (OR) 1 1.6
+ * ! (NOT) 2 3.2
+ *
+ * -- -----
+ * 63 100.1
+ *
+ *
+ * 3. Operand Type (counted once per operand reference):
+ * ---------------
+ * number approximate
+ * percentage
+ *
+ * Integer 175 72.3 %
+ * Character 45 18.6 %
+ * Pointer 12 5.0 %
+ * String30 6 2.5 %
+ * Array 2 0.8 %
+ * Record 2 0.8 %
+ * --- -------
+ * 242 100.0 %
+ *
+ * When there is an access path leading to the final operand (e.g. a record
+ * component), only the final data type on the access path is counted.
+ *
+ *
+ * 4. Operand Locality:
+ * -------------------
+ * number approximate
+ * percentage
+ *
+ * local variable 114 47.1 %
+ * global variable 22 9.1 %
+ * parameter 45 18.6 %
+ * value 23 9.5 %
+ * reference 22 9.1 %
+ * function result 6 2.5 %
+ * constant 55 22.7 %
+ * --- -------
+ * 242 100.0 %
+ *
+ *
+ * The program does not compute anything meaningful, but it is syntactically
+ * and semantically correct. All variables have a value assigned to them
+ * before they are used as a source operand.
+ *
+ * There has been no explicit effort to account for the effects of a
+ * cache, or to balance the use of long or short displacements for code or
+ * data.
+ *
+ ***************************************************************************
+ */
+
+typedef enum {
+ Ident_1,
+ Ident_2,
+ Ident_3,
+ Ident_4,
+ Ident_5
+} Enumeration; /* for boolean and enumeration types in Ada, Pascal */
+
+/* General definitions: */
+
+typedef int One_Thirty;
+typedef int One_Fifty;
+typedef char Capital_Letter;
+typedef int Boolean;
+typedef char Str_30[31];
+typedef int Arr_1_Dim[50];
+typedef int Arr_2_Dim[50][50];
+
+typedef struct record {
+ struct record *Ptr_Comp;
+ Enumeration Discr;
+ union {
+ struct {
+ Enumeration Enum_Comp;
+ int Int_Comp;
+ char Str_Comp[31];
+ } var_1;
+ struct {
+ Enumeration E_Comp_2;
+ char Str_2_Comp[31];
+ } var_2;
+ struct {
+ char Ch_1_Comp;
+ char Ch_2_Comp;
+ } var_3;
+ } variant;
+} Rec_Type, *Rec_Pointer;
+
+
+extern int Int_Glob;
+extern char Ch_1_Glob;
+
+void Proc_6(Enumeration Enum_Val_Par, Enumeration *Enum_Ref_Par);
+void Proc_7(One_Fifty Int_1_Par_Val, One_Fifty Int_2_Par_Val,
+ One_Fifty *Int_Par_Ref);
+void Proc_8(Arr_1_Dim Arr_1_Par_Ref, Arr_2_Dim Arr_2_Par_Ref,
+ int Int_1_Par_Val, int Int_2_Par_Val);
+Enumeration Func_1(Capital_Letter Ch_1_Par_Val, Capital_Letter Ch_2_Par_Val);
+Boolean Func_2(Str_30 Str_1_Par_Ref, Str_30 Str_2_Par_Ref);
+
+int dhry(int n);
diff --git a/lib/dhry_1.c b/lib/dhry_1.c
new file mode 100644
index 000000000000..08edbbb19f57
--- /dev/null
+++ b/lib/dhry_1.c
@@ -0,0 +1,290 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/*
+ ****************************************************************************
+ *
+ * "DHRYSTONE" Benchmark Program
+ * -----------------------------
+ *
+ * Version: C, Version 2.1
+ *
+ * File: dhry_1.c (part 2 of 3)
+ *
+ * Date: May 25, 1988
+ *
+ * Author: Reinhold P. Weicker
+ *
+ ****************************************************************************
+ */
+
+#include "dhry.h"
+
+#include <linux/ktime.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+
+/* Global Variables: */
+
+int Int_Glob;
+char Ch_1_Glob;
+
+static Rec_Pointer Ptr_Glob, Next_Ptr_Glob;
+static Boolean Bool_Glob;
+static char Ch_2_Glob;
+static int Arr_1_Glob[50];
+static int Arr_2_Glob[50][50];
+
+static void Proc_3(Rec_Pointer *Ptr_Ref_Par)
+/******************/
+/* executed once */
+/* Ptr_Ref_Par becomes Ptr_Glob */
+{
+ if (Ptr_Glob) {
+ /* then, executed */
+ *Ptr_Ref_Par = Ptr_Glob->Ptr_Comp;
+ }
+ Proc_7(10, Int_Glob, &Ptr_Glob->variant.var_1.Int_Comp);
+} /* Proc_3 */
+
+
+static void Proc_1(Rec_Pointer Ptr_Val_Par)
+/******************/
+/* executed once */
+{
+ Rec_Pointer Next_Record = Ptr_Val_Par->Ptr_Comp;
+ /* == Ptr_Glob_Next */
+ /* Local variable, initialized with Ptr_Val_Par->Ptr_Comp, */
+ /* corresponds to "rename" in Ada, "with" in Pascal */
+
+ *Ptr_Val_Par->Ptr_Comp = *Ptr_Glob;
+ Ptr_Val_Par->variant.var_1.Int_Comp = 5;
+ Next_Record->variant.var_1.Int_Comp =
+ Ptr_Val_Par->variant.var_1.Int_Comp;
+ Next_Record->Ptr_Comp = Ptr_Val_Par->Ptr_Comp;
+ Proc_3(&Next_Record->Ptr_Comp);
+ /* Ptr_Val_Par->Ptr_Comp->Ptr_Comp == Ptr_Glob->Ptr_Comp */
+ if (Next_Record->Discr == Ident_1) {
+ /* then, executed */
+ Next_Record->variant.var_1.Int_Comp = 6;
+ Proc_6(Ptr_Val_Par->variant.var_1.Enum_Comp,
+ &Next_Record->variant.var_1.Enum_Comp);
+ Next_Record->Ptr_Comp = Ptr_Glob->Ptr_Comp;
+ Proc_7(Next_Record->variant.var_1.Int_Comp, 10,
+ &Next_Record->variant.var_1.Int_Comp);
+ } else {
+ /* not executed */
+ *Ptr_Val_Par = *Ptr_Val_Par->Ptr_Comp;
+ }
+} /* Proc_1 */
+
+
+static void Proc_2(One_Fifty *Int_Par_Ref)
+/******************/
+/* executed once */
+/* *Int_Par_Ref == 1, becomes 4 */
+{
+ One_Fifty Int_Loc;
+ Enumeration Enum_Loc;
+
+ Int_Loc = *Int_Par_Ref + 10;
+ do {
+ /* executed once */
+ if (Ch_1_Glob == 'A') {
+ /* then, executed */
+ Int_Loc -= 1;
+ *Int_Par_Ref = Int_Loc - Int_Glob;
+ Enum_Loc = Ident_1;
+ } /* if */
+ } while (Enum_Loc != Ident_1); /* true */
+} /* Proc_2 */
+
+
+static void Proc_4(void)
+/*******/
+/* executed once */
+{
+ Boolean Bool_Loc;
+
+ Bool_Loc = Ch_1_Glob == 'A';
+ Bool_Glob = Bool_Loc | Bool_Glob;
+ Ch_2_Glob = 'B';
+} /* Proc_4 */
+
+
+static void Proc_5(void)
+/*******/
+/* executed once */
+{
+ Ch_1_Glob = 'A';
+ Bool_Glob = false;
+} /* Proc_5 */
+
+
+int dhry(int n)
+/*****/
+
+ /* main program, corresponds to procedures */
+ /* Main and Proc_0 in the Ada version */
+{
+ One_Fifty Int_1_Loc;
+ One_Fifty Int_2_Loc;
+ One_Fifty Int_3_Loc;
+ char Ch_Index;
+ Enumeration Enum_Loc;
+ Str_30 Str_1_Loc;
+ Str_30 Str_2_Loc;
+ int Run_Index;
+ int Number_Of_Runs;
+ ktime_t Begin_Time, End_Time;
+ u32 User_Time;
+
+ /* Initializations */
+
+ Next_Ptr_Glob = (Rec_Pointer)kzalloc(sizeof(Rec_Type), GFP_ATOMIC);
+ if (!Next_Ptr_Glob)
+ return -ENOMEM;
+
+ Ptr_Glob = (Rec_Pointer)kzalloc(sizeof(Rec_Type), GFP_ATOMIC);
+ if (!Ptr_Glob) {
+ kfree(Next_Ptr_Glob);
+ return -ENOMEM;
+ }
+
+ Ptr_Glob->Ptr_Comp = Next_Ptr_Glob;
+ Ptr_Glob->Discr = Ident_1;
+ Ptr_Glob->variant.var_1.Enum_Comp = Ident_3;
+ Ptr_Glob->variant.var_1.Int_Comp = 40;
+ strcpy(Ptr_Glob->variant.var_1.Str_Comp,
+ "DHRYSTONE PROGRAM, SOME STRING");
+ strcpy(Str_1_Loc, "DHRYSTONE PROGRAM, 1'ST STRING");
+
+ Arr_2_Glob[8][7] = 10;
+ /* Was missing in published program. Without this statement, */
+ /* Arr_2_Glob[8][7] would have an undefined value. */
+ /* Warning: With 16-Bit processors and Number_Of_Runs > 32000, */
+ /* overflow may occur for this array element. */
+
+ pr_debug("Dhrystone Benchmark, Version 2.1 (Language: C)\n");
+
+ Number_Of_Runs = n;
+
+ pr_debug("Execution starts, %d runs through Dhrystone\n",
+ Number_Of_Runs);
+
+ /***************/
+ /* Start timer */
+ /***************/
+
+ Begin_Time = ktime_get();
+
+ for (Run_Index = 1; Run_Index <= Number_Of_Runs; ++Run_Index) {
+ Proc_5();
+ Proc_4();
+ /* Ch_1_Glob == 'A', Ch_2_Glob == 'B', Bool_Glob == true */
+ Int_1_Loc = 2;
+ Int_2_Loc = 3;
+ strcpy(Str_2_Loc, "DHRYSTONE PROGRAM, 2'ND STRING");
+ Enum_Loc = Ident_2;
+ Bool_Glob = !Func_2(Str_1_Loc, Str_2_Loc);
+ /* Bool_Glob == 1 */
+ while (Int_1_Loc < Int_2_Loc) {
+ /* loop body executed once */
+ Int_3_Loc = 5 * Int_1_Loc - Int_2_Loc;
+ /* Int_3_Loc == 7 */
+ Proc_7(Int_1_Loc, Int_2_Loc, &Int_3_Loc);
+ /* Int_3_Loc == 7 */
+ Int_1_Loc += 1;
+ } /* while */
+ /* Int_1_Loc == 3, Int_2_Loc == 3, Int_3_Loc == 7 */
+ Proc_8(Arr_1_Glob, Arr_2_Glob, Int_1_Loc, Int_3_Loc);
+ /* Int_Glob == 5 */
+ Proc_1(Ptr_Glob);
+ for (Ch_Index = 'A'; Ch_Index <= Ch_2_Glob; ++Ch_Index) {
+ /* loop body executed twice */
+ if (Enum_Loc == Func_1(Ch_Index, 'C')) {
+ /* then, not executed */
+ Proc_6(Ident_1, &Enum_Loc);
+ strcpy(Str_2_Loc, "DHRYSTONE PROGRAM, 3'RD STRING");
+ Int_2_Loc = Run_Index;
+ Int_Glob = Run_Index;
+ }
+ }
+ /* Int_1_Loc == 3, Int_2_Loc == 3, Int_3_Loc == 7 */
+ Int_2_Loc = Int_2_Loc * Int_1_Loc;
+ Int_1_Loc = Int_2_Loc / Int_3_Loc;
+ Int_2_Loc = 7 * (Int_2_Loc - Int_3_Loc) - Int_1_Loc;
+ /* Int_1_Loc == 1, Int_2_Loc == 13, Int_3_Loc == 7 */
+ Proc_2(&Int_1_Loc);
+ /* Int_1_Loc == 5 */
+
+ } /* loop "for Run_Index" */
+
+ /**************/
+ /* Stop timer */
+ /**************/
+
+ End_Time = ktime_get();
+
+#define dhry_assert_int_eq(val, expected) \
+ if (val != expected) \
+ pr_err("%s: %d (FAIL, expected %d)\n", #val, val, \
+ expected); \
+ else \
+ pr_debug("%s: %d (OK)\n", #val, val)
+
+#define dhry_assert_char_eq(val, expected) \
+ if (val != expected) \
+ pr_err("%s: %c (FAIL, expected %c)\n", #val, val, \
+ expected); \
+ else \
+ pr_debug("%s: %c (OK)\n", #val, val)
+
+#define dhry_assert_string_eq(val, expected) \
+ if (strcmp(val, expected)) \
+ pr_err("%s: %s (FAIL, expected %s)\n", #val, val, \
+ expected); \
+ else \
+ pr_debug("%s: %s (OK)\n", #val, val)
+
+ pr_debug("Execution ends\n");
+ pr_debug("Final values of the variables used in the benchmark:\n");
+ dhry_assert_int_eq(Int_Glob, 5);
+ dhry_assert_int_eq(Bool_Glob, 1);
+ dhry_assert_char_eq(Ch_1_Glob, 'A');
+ dhry_assert_char_eq(Ch_2_Glob, 'B');
+ dhry_assert_int_eq(Arr_1_Glob[8], 7);
+ dhry_assert_int_eq(Arr_2_Glob[8][7], Number_Of_Runs + 10);
+ pr_debug("Ptr_Comp: %px\n", Ptr_Glob->Ptr_Comp);
+ dhry_assert_int_eq(Ptr_Glob->Discr, 0);
+ dhry_assert_int_eq(Ptr_Glob->variant.var_1.Enum_Comp, 2);
+ dhry_assert_int_eq(Ptr_Glob->variant.var_1.Int_Comp, 17);
+ dhry_assert_string_eq(Ptr_Glob->variant.var_1.Str_Comp,
+ "DHRYSTONE PROGRAM, SOME STRING");
+ if (Next_Ptr_Glob->Ptr_Comp != Ptr_Glob->Ptr_Comp)
+ pr_err("Next_Ptr_Glob->Ptr_Comp: %px (expected %px)\n",
+ Next_Ptr_Glob->Ptr_Comp, Ptr_Glob->Ptr_Comp);
+ else
+ pr_debug("Next_Ptr_Glob->Ptr_Comp: %px\n",
+ Next_Ptr_Glob->Ptr_Comp);
+ dhry_assert_int_eq(Next_Ptr_Glob->Discr, 0);
+ dhry_assert_int_eq(Next_Ptr_Glob->variant.var_1.Enum_Comp, 1);
+ dhry_assert_int_eq(Next_Ptr_Glob->variant.var_1.Int_Comp, 18);
+ dhry_assert_string_eq(Next_Ptr_Glob->variant.var_1.Str_Comp,
+ "DHRYSTONE PROGRAM, SOME STRING");
+ dhry_assert_int_eq(Int_1_Loc, 5);
+ dhry_assert_int_eq(Int_2_Loc, 13);
+ dhry_assert_int_eq(Int_3_Loc, 7);
+ dhry_assert_int_eq(Enum_Loc, 1);
+ dhry_assert_string_eq(Str_1_Loc, "DHRYSTONE PROGRAM, 1'ST STRING");
+ dhry_assert_string_eq(Str_2_Loc, "DHRYSTONE PROGRAM, 2'ND STRING");
+
+ User_Time = ktime_to_ms(ktime_sub(End_Time, Begin_Time));
+
+ kfree(Ptr_Glob);
+ kfree(Next_Ptr_Glob);
+
+ /* Measurements should last at least 2 seconds */
+ if (User_Time < 2 * MSEC_PER_SEC)
+ return -EAGAIN;
+
+ return div_u64(mul_u32_u32(MSEC_PER_SEC, Number_Of_Runs), User_Time);
+}
diff --git a/lib/dhry_2.c b/lib/dhry_2.c
new file mode 100644
index 000000000000..c19e661f37d3
--- /dev/null
+++ b/lib/dhry_2.c
@@ -0,0 +1,175 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/*
+ ****************************************************************************
+ *
+ * "DHRYSTONE" Benchmark Program
+ * -----------------------------
+ *
+ * Version: C, Version 2.1
+ *
+ * File: dhry_2.c (part 3 of 3)
+ *
+ * Date: May 25, 1988
+ *
+ * Author: Reinhold P. Weicker
+ *
+ ****************************************************************************
+ */
+
+#include "dhry.h"
+
+#include <linux/string.h>
+
+
+static Boolean Func_3(Enumeration Enum_Par_Val)
+/***************************/
+/* executed once */
+/* Enum_Par_Val == Ident_3 */
+{
+ Enumeration Enum_Loc;
+
+ Enum_Loc = Enum_Par_Val;
+ if (Enum_Loc == Ident_3) {
+ /* then, executed */
+ return true;
+ } else {
+ /* not executed */
+ return false;
+ }
+} /* Func_3 */
+
+
+void Proc_6(Enumeration Enum_Val_Par, Enumeration *Enum_Ref_Par)
+/*********************************/
+/* executed once */
+/* Enum_Val_Par == Ident_3, Enum_Ref_Par becomes Ident_2 */
+{
+ *Enum_Ref_Par = Enum_Val_Par;
+ if (!Func_3(Enum_Val_Par)) {
+ /* then, not executed */
+ *Enum_Ref_Par = Ident_4;
+ }
+ switch (Enum_Val_Par) {
+ case Ident_1:
+ *Enum_Ref_Par = Ident_1;
+ break;
+ case Ident_2:
+ if (Int_Glob > 100) {
+ /* then */
+ *Enum_Ref_Par = Ident_1;
+ } else {
+ *Enum_Ref_Par = Ident_4;
+ }
+ break;
+ case Ident_3: /* executed */
+ *Enum_Ref_Par = Ident_2;
+ break;
+ case Ident_4:
+ break;
+ case Ident_5:
+ *Enum_Ref_Par = Ident_3;
+ break;
+ } /* switch */
+} /* Proc_6 */
+
+
+void Proc_7(One_Fifty Int_1_Par_Val, One_Fifty Int_2_Par_Val, One_Fifty *Int_Par_Ref)
+/**********************************************/
+/* executed three times */
+/* first call: Int_1_Par_Val == 2, Int_2_Par_Val == 3, */
+/* Int_Par_Ref becomes 7 */
+/* second call: Int_1_Par_Val == 10, Int_2_Par_Val == 5, */
+/* Int_Par_Ref becomes 17 */
+/* third call: Int_1_Par_Val == 6, Int_2_Par_Val == 10, */
+/* Int_Par_Ref becomes 18 */
+{
+ One_Fifty Int_Loc;
+
+ Int_Loc = Int_1_Par_Val + 2;
+ *Int_Par_Ref = Int_2_Par_Val + Int_Loc;
+} /* Proc_7 */
+
+
+void Proc_8(Arr_1_Dim Arr_1_Par_Ref, Arr_2_Dim Arr_2_Par_Ref, int Int_1_Par_Val, int Int_2_Par_Val)
+/*********************************************************************/
+/* executed once */
+/* Int_Par_Val_1 == 3 */
+/* Int_Par_Val_2 == 7 */
+{
+ One_Fifty Int_Index;
+ One_Fifty Int_Loc;
+
+ Int_Loc = Int_1_Par_Val + 5;
+ Arr_1_Par_Ref[Int_Loc] = Int_2_Par_Val;
+ Arr_1_Par_Ref[Int_Loc+1] = Arr_1_Par_Ref[Int_Loc];
+ Arr_1_Par_Ref[Int_Loc+30] = Int_Loc;
+ for (Int_Index = Int_Loc; Int_Index <= Int_Loc+1; ++Int_Index)
+ Arr_2_Par_Ref[Int_Loc][Int_Index] = Int_Loc;
+ Arr_2_Par_Ref[Int_Loc][Int_Loc-1] += 1;
+ Arr_2_Par_Ref[Int_Loc+20][Int_Loc] = Arr_1_Par_Ref[Int_Loc];
+ Int_Glob = 5;
+} /* Proc_8 */
+
+
+Enumeration Func_1(Capital_Letter Ch_1_Par_Val, Capital_Letter Ch_2_Par_Val)
+/*************************************************/
+/* executed three times */
+/* first call: Ch_1_Par_Val == 'H', Ch_2_Par_Val == 'R' */
+/* second call: Ch_1_Par_Val == 'A', Ch_2_Par_Val == 'C' */
+/* third call: Ch_1_Par_Val == 'B', Ch_2_Par_Val == 'C' */
+{
+ Capital_Letter Ch_1_Loc;
+ Capital_Letter Ch_2_Loc;
+
+ Ch_1_Loc = Ch_1_Par_Val;
+ Ch_2_Loc = Ch_1_Loc;
+ if (Ch_2_Loc != Ch_2_Par_Val) {
+ /* then, executed */
+ return Ident_1;
+ } else {
+ /* not executed */
+ Ch_1_Glob = Ch_1_Loc;
+ return Ident_2;
+ }
+} /* Func_1 */
+
+
+Boolean Func_2(Str_30 Str_1_Par_Ref, Str_30 Str_2_Par_Ref)
+/*************************************************/
+/* executed once */
+/* Str_1_Par_Ref == "DHRYSTONE PROGRAM, 1'ST STRING" */
+/* Str_2_Par_Ref == "DHRYSTONE PROGRAM, 2'ND STRING" */
+{
+ One_Thirty Int_Loc;
+ Capital_Letter Ch_Loc;
+
+ Int_Loc = 2;
+ while (Int_Loc <= 2) {
+ /* loop body executed once */
+ if (Func_1(Str_1_Par_Ref[Int_Loc],
+ Str_2_Par_Ref[Int_Loc+1]) == Ident_1) {
+ /* then, executed */
+ Ch_Loc = 'A';
+ Int_Loc += 1;
+ }
+ } /* if, while */
+ if (Ch_Loc >= 'W' && Ch_Loc < 'Z') {
+ /* then, not executed */
+ Int_Loc = 7;
+ }
+ if (Ch_Loc == 'R') {
+ /* then, not executed */
+ return true;
+ } else {
+ /* executed */
+ if (strcmp(Str_1_Par_Ref, Str_2_Par_Ref) > 0) {
+ /* then, not executed */
+ Int_Loc += 7;
+ Int_Glob = Int_Loc;
+ return true;
+ } else {
+ /* executed */
+ return false;
+ }
+ } /* if Ch_Loc */
+} /* Func_2 */
diff --git a/lib/dhry_run.c b/lib/dhry_run.c
new file mode 100644
index 000000000000..f15ac666e9d3
--- /dev/null
+++ b/lib/dhry_run.c
@@ -0,0 +1,87 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Dhrystone benchmark test module
+ *
+ * Copyright (C) 2022 Glider bv
+ */
+
+#include "dhry.h"
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/mutex.h>
+#include <linux/smp.h>
+
+#define DHRY_VAX 1757
+
+static int dhry_run_set(const char *val, const struct kernel_param *kp);
+static const struct kernel_param_ops run_ops = {
+ .flags = KERNEL_PARAM_OPS_FL_NOARG,
+ .set = dhry_run_set,
+};
+static bool dhry_run;
+module_param_cb(run, &run_ops, &dhry_run, 0200);
+MODULE_PARM_DESC(run, "Run the test (default: false)");
+
+static int iterations = -1;
+module_param(iterations, int, 0644);
+MODULE_PARM_DESC(iterations,
+ "Number of iterations through the benchmark (default: auto)");
+
+static void dhry_benchmark(void)
+{
+ unsigned int cpu = get_cpu();
+ int i, n;
+
+ if (iterations > 0) {
+ n = dhry(iterations);
+ goto report;
+ }
+
+ for (i = DHRY_VAX; i > 0; i <<= 1) {
+ n = dhry(i);
+ if (n != -EAGAIN)
+ break;
+ }
+
+report:
+ put_cpu();
+ if (n >= 0)
+ pr_info("CPU%u: Dhrystones per Second: %d (%d DMIPS)\n", cpu,
+ n, n / DHRY_VAX);
+ else if (n == -EAGAIN)
+ pr_err("Please increase the number of iterations\n");
+ else
+ pr_err("Dhrystone benchmark failed error %pe\n", ERR_PTR(n));
+}
+
+static int dhry_run_set(const char *val, const struct kernel_param *kp)
+{
+ int ret;
+
+ if (val) {
+ ret = param_set_bool(val, kp);
+ if (ret)
+ return ret;
+ } else {
+ dhry_run = true;
+ }
+
+ if (dhry_run && system_state == SYSTEM_RUNNING)
+ dhry_benchmark();
+
+ return 0;
+}
+
+static int __init dhry_init(void)
+{
+ if (dhry_run)
+ dhry_benchmark();
+
+ return 0;
+}
+module_init(dhry_init);
+
+MODULE_AUTHOR("Geert Uytterhoeven <geert+renesas@glider.be>");
+MODULE_LICENSE("GPL");
diff --git a/lib/digsig.c b/lib/digsig.c
index 3cf89c775ab2..04b5e55ed95f 100644
--- a/lib/digsig.c
+++ b/lib/digsig.c
@@ -20,7 +20,7 @@
#include <linux/key.h>
#include <linux/crypto.h>
#include <crypto/hash.h>
-#include <crypto/sha.h>
+#include <crypto/sha1.h>
#include <keys/user-type.h>
#include <linux/mpi.h>
#include <linux/digsig.h>
@@ -218,7 +218,7 @@ int digsig_verify(struct key *keyring, const char *sig, int siglen,
/* search in specific keyring */
key_ref_t kref;
kref = keyring_search(make_key_ref(keyring, 1UL),
- &key_type_user, name);
+ &key_type_user, name, true);
if (IS_ERR(kref))
key = ERR_CAST(kref);
else
diff --git a/lib/dim/Makefile b/lib/dim/Makefile
new file mode 100644
index 000000000000..1d6858a108cb
--- /dev/null
+++ b/lib/dim/Makefile
@@ -0,0 +1,7 @@
+#
+# DIM Dynamic Interrupt Moderation library
+#
+
+obj-$(CONFIG_DIMLIB) += dim.o
+
+dim-y := dim.o net_dim.o rdma_dim.o
diff --git a/lib/dim/dim.c b/lib/dim/dim.c
new file mode 100644
index 000000000000..e89aaf07bde5
--- /dev/null
+++ b/lib/dim/dim.c
@@ -0,0 +1,84 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/*
+ * Copyright (c) 2019, Mellanox Technologies inc. All rights reserved.
+ */
+
+#include <linux/dim.h>
+
+bool dim_on_top(struct dim *dim)
+{
+ switch (dim->tune_state) {
+ case DIM_PARKING_ON_TOP:
+ case DIM_PARKING_TIRED:
+ return true;
+ case DIM_GOING_RIGHT:
+ return (dim->steps_left > 1) && (dim->steps_right == 1);
+ default: /* DIM_GOING_LEFT */
+ return (dim->steps_right > 1) && (dim->steps_left == 1);
+ }
+}
+EXPORT_SYMBOL(dim_on_top);
+
+void dim_turn(struct dim *dim)
+{
+ switch (dim->tune_state) {
+ case DIM_PARKING_ON_TOP:
+ case DIM_PARKING_TIRED:
+ break;
+ case DIM_GOING_RIGHT:
+ dim->tune_state = DIM_GOING_LEFT;
+ dim->steps_left = 0;
+ break;
+ case DIM_GOING_LEFT:
+ dim->tune_state = DIM_GOING_RIGHT;
+ dim->steps_right = 0;
+ break;
+ }
+}
+EXPORT_SYMBOL(dim_turn);
+
+void dim_park_on_top(struct dim *dim)
+{
+ dim->steps_right = 0;
+ dim->steps_left = 0;
+ dim->tired = 0;
+ dim->tune_state = DIM_PARKING_ON_TOP;
+}
+EXPORT_SYMBOL(dim_park_on_top);
+
+void dim_park_tired(struct dim *dim)
+{
+ dim->steps_right = 0;
+ dim->steps_left = 0;
+ dim->tune_state = DIM_PARKING_TIRED;
+}
+EXPORT_SYMBOL(dim_park_tired);
+
+bool dim_calc_stats(struct dim_sample *start, struct dim_sample *end,
+ struct dim_stats *curr_stats)
+{
+ /* u32 holds up to 71 minutes, should be enough */
+ u32 delta_us = ktime_us_delta(end->time, start->time);
+ u32 npkts = BIT_GAP(BITS_PER_TYPE(u32), end->pkt_ctr, start->pkt_ctr);
+ u32 nbytes = BIT_GAP(BITS_PER_TYPE(u32), end->byte_ctr,
+ start->byte_ctr);
+ u32 ncomps = BIT_GAP(BITS_PER_TYPE(u32), end->comp_ctr,
+ start->comp_ctr);
+
+ if (!delta_us)
+ return false;
+
+ curr_stats->ppms = DIV_ROUND_UP(npkts * USEC_PER_MSEC, delta_us);
+ curr_stats->bpms = DIV_ROUND_UP(nbytes * USEC_PER_MSEC, delta_us);
+ curr_stats->epms = DIV_ROUND_UP(DIM_NEVENTS * USEC_PER_MSEC,
+ delta_us);
+ curr_stats->cpms = DIV_ROUND_UP(ncomps * USEC_PER_MSEC, delta_us);
+ if (curr_stats->epms != 0)
+ curr_stats->cpe_ratio = DIV_ROUND_DOWN_ULL(
+ curr_stats->cpms * 100, curr_stats->epms);
+ else
+ curr_stats->cpe_ratio = 0;
+
+ return true;
+}
+EXPORT_SYMBOL(dim_calc_stats);
diff --git a/lib/dim/net_dim.c b/lib/dim/net_dim.c
new file mode 100644
index 000000000000..4e32f7aaac86
--- /dev/null
+++ b/lib/dim/net_dim.c
@@ -0,0 +1,247 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/*
+ * Copyright (c) 2018, Mellanox Technologies inc. All rights reserved.
+ */
+
+#include <linux/dim.h>
+
+/*
+ * Net DIM profiles:
+ * There are different set of profiles for each CQ period mode.
+ * There are different set of profiles for RX/TX CQs.
+ * Each profile size must be of NET_DIM_PARAMS_NUM_PROFILES
+ */
+#define NET_DIM_PARAMS_NUM_PROFILES 5
+#define NET_DIM_DEFAULT_RX_CQ_PKTS_FROM_EQE 256
+#define NET_DIM_DEFAULT_TX_CQ_PKTS_FROM_EQE 128
+#define NET_DIM_DEF_PROFILE_CQE 1
+#define NET_DIM_DEF_PROFILE_EQE 1
+
+#define NET_DIM_RX_EQE_PROFILES { \
+ {.usec = 1, .pkts = NET_DIM_DEFAULT_RX_CQ_PKTS_FROM_EQE,}, \
+ {.usec = 8, .pkts = NET_DIM_DEFAULT_RX_CQ_PKTS_FROM_EQE,}, \
+ {.usec = 64, .pkts = NET_DIM_DEFAULT_RX_CQ_PKTS_FROM_EQE,}, \
+ {.usec = 128, .pkts = NET_DIM_DEFAULT_RX_CQ_PKTS_FROM_EQE,}, \
+ {.usec = 256, .pkts = NET_DIM_DEFAULT_RX_CQ_PKTS_FROM_EQE,} \
+}
+
+#define NET_DIM_RX_CQE_PROFILES { \
+ {.usec = 2, .pkts = 256,}, \
+ {.usec = 8, .pkts = 128,}, \
+ {.usec = 16, .pkts = 64,}, \
+ {.usec = 32, .pkts = 64,}, \
+ {.usec = 64, .pkts = 64,} \
+}
+
+#define NET_DIM_TX_EQE_PROFILES { \
+ {.usec = 1, .pkts = NET_DIM_DEFAULT_TX_CQ_PKTS_FROM_EQE,}, \
+ {.usec = 8, .pkts = NET_DIM_DEFAULT_TX_CQ_PKTS_FROM_EQE,}, \
+ {.usec = 32, .pkts = NET_DIM_DEFAULT_TX_CQ_PKTS_FROM_EQE,}, \
+ {.usec = 64, .pkts = NET_DIM_DEFAULT_TX_CQ_PKTS_FROM_EQE,}, \
+ {.usec = 128, .pkts = NET_DIM_DEFAULT_TX_CQ_PKTS_FROM_EQE,} \
+}
+
+#define NET_DIM_TX_CQE_PROFILES { \
+ {.usec = 5, .pkts = 128,}, \
+ {.usec = 8, .pkts = 64,}, \
+ {.usec = 16, .pkts = 32,}, \
+ {.usec = 32, .pkts = 32,}, \
+ {.usec = 64, .pkts = 32,} \
+}
+
+static const struct dim_cq_moder
+rx_profile[DIM_CQ_PERIOD_NUM_MODES][NET_DIM_PARAMS_NUM_PROFILES] = {
+ NET_DIM_RX_EQE_PROFILES,
+ NET_DIM_RX_CQE_PROFILES,
+};
+
+static const struct dim_cq_moder
+tx_profile[DIM_CQ_PERIOD_NUM_MODES][NET_DIM_PARAMS_NUM_PROFILES] = {
+ NET_DIM_TX_EQE_PROFILES,
+ NET_DIM_TX_CQE_PROFILES,
+};
+
+struct dim_cq_moder
+net_dim_get_rx_moderation(u8 cq_period_mode, int ix)
+{
+ struct dim_cq_moder cq_moder = rx_profile[cq_period_mode][ix];
+
+ cq_moder.cq_period_mode = cq_period_mode;
+ return cq_moder;
+}
+EXPORT_SYMBOL(net_dim_get_rx_moderation);
+
+struct dim_cq_moder
+net_dim_get_def_rx_moderation(u8 cq_period_mode)
+{
+ u8 profile_ix = cq_period_mode == DIM_CQ_PERIOD_MODE_START_FROM_CQE ?
+ NET_DIM_DEF_PROFILE_CQE : NET_DIM_DEF_PROFILE_EQE;
+
+ return net_dim_get_rx_moderation(cq_period_mode, profile_ix);
+}
+EXPORT_SYMBOL(net_dim_get_def_rx_moderation);
+
+struct dim_cq_moder
+net_dim_get_tx_moderation(u8 cq_period_mode, int ix)
+{
+ struct dim_cq_moder cq_moder = tx_profile[cq_period_mode][ix];
+
+ cq_moder.cq_period_mode = cq_period_mode;
+ return cq_moder;
+}
+EXPORT_SYMBOL(net_dim_get_tx_moderation);
+
+struct dim_cq_moder
+net_dim_get_def_tx_moderation(u8 cq_period_mode)
+{
+ u8 profile_ix = cq_period_mode == DIM_CQ_PERIOD_MODE_START_FROM_CQE ?
+ NET_DIM_DEF_PROFILE_CQE : NET_DIM_DEF_PROFILE_EQE;
+
+ return net_dim_get_tx_moderation(cq_period_mode, profile_ix);
+}
+EXPORT_SYMBOL(net_dim_get_def_tx_moderation);
+
+static int net_dim_step(struct dim *dim)
+{
+ if (dim->tired == (NET_DIM_PARAMS_NUM_PROFILES * 2))
+ return DIM_TOO_TIRED;
+
+ switch (dim->tune_state) {
+ case DIM_PARKING_ON_TOP:
+ case DIM_PARKING_TIRED:
+ break;
+ case DIM_GOING_RIGHT:
+ if (dim->profile_ix == (NET_DIM_PARAMS_NUM_PROFILES - 1))
+ return DIM_ON_EDGE;
+ dim->profile_ix++;
+ dim->steps_right++;
+ break;
+ case DIM_GOING_LEFT:
+ if (dim->profile_ix == 0)
+ return DIM_ON_EDGE;
+ dim->profile_ix--;
+ dim->steps_left++;
+ break;
+ }
+
+ dim->tired++;
+ return DIM_STEPPED;
+}
+
+static void net_dim_exit_parking(struct dim *dim)
+{
+ dim->tune_state = dim->profile_ix ? DIM_GOING_LEFT : DIM_GOING_RIGHT;
+ net_dim_step(dim);
+}
+
+static int net_dim_stats_compare(struct dim_stats *curr,
+ struct dim_stats *prev)
+{
+ if (!prev->bpms)
+ return curr->bpms ? DIM_STATS_BETTER : DIM_STATS_SAME;
+
+ if (IS_SIGNIFICANT_DIFF(curr->bpms, prev->bpms))
+ return (curr->bpms > prev->bpms) ? DIM_STATS_BETTER :
+ DIM_STATS_WORSE;
+
+ if (!prev->ppms)
+ return curr->ppms ? DIM_STATS_BETTER :
+ DIM_STATS_SAME;
+
+ if (IS_SIGNIFICANT_DIFF(curr->ppms, prev->ppms))
+ return (curr->ppms > prev->ppms) ? DIM_STATS_BETTER :
+ DIM_STATS_WORSE;
+
+ if (!prev->epms)
+ return DIM_STATS_SAME;
+
+ if (IS_SIGNIFICANT_DIFF(curr->epms, prev->epms))
+ return (curr->epms < prev->epms) ? DIM_STATS_BETTER :
+ DIM_STATS_WORSE;
+
+ return DIM_STATS_SAME;
+}
+
+static bool net_dim_decision(struct dim_stats *curr_stats, struct dim *dim)
+{
+ int prev_state = dim->tune_state;
+ int prev_ix = dim->profile_ix;
+ int stats_res;
+ int step_res;
+
+ switch (dim->tune_state) {
+ case DIM_PARKING_ON_TOP:
+ stats_res = net_dim_stats_compare(curr_stats,
+ &dim->prev_stats);
+ if (stats_res != DIM_STATS_SAME)
+ net_dim_exit_parking(dim);
+ break;
+
+ case DIM_PARKING_TIRED:
+ dim->tired--;
+ if (!dim->tired)
+ net_dim_exit_parking(dim);
+ break;
+
+ case DIM_GOING_RIGHT:
+ case DIM_GOING_LEFT:
+ stats_res = net_dim_stats_compare(curr_stats,
+ &dim->prev_stats);
+ if (stats_res != DIM_STATS_BETTER)
+ dim_turn(dim);
+
+ if (dim_on_top(dim)) {
+ dim_park_on_top(dim);
+ break;
+ }
+
+ step_res = net_dim_step(dim);
+ switch (step_res) {
+ case DIM_ON_EDGE:
+ dim_park_on_top(dim);
+ break;
+ case DIM_TOO_TIRED:
+ dim_park_tired(dim);
+ break;
+ }
+
+ break;
+ }
+
+ if (prev_state != DIM_PARKING_ON_TOP ||
+ dim->tune_state != DIM_PARKING_ON_TOP)
+ dim->prev_stats = *curr_stats;
+
+ return dim->profile_ix != prev_ix;
+}
+
+void net_dim(struct dim *dim, struct dim_sample end_sample)
+{
+ struct dim_stats curr_stats;
+ u16 nevents;
+
+ switch (dim->state) {
+ case DIM_MEASURE_IN_PROGRESS:
+ nevents = BIT_GAP(BITS_PER_TYPE(u16),
+ end_sample.event_ctr,
+ dim->start_sample.event_ctr);
+ if (nevents < DIM_NEVENTS)
+ break;
+ if (!dim_calc_stats(&dim->start_sample, &end_sample, &curr_stats))
+ break;
+ if (net_dim_decision(&curr_stats, dim)) {
+ dim->state = DIM_APPLY_NEW_PROFILE;
+ schedule_work(&dim->work);
+ break;
+ }
+ fallthrough;
+ case DIM_START_MEASURE:
+ dim_update_sample(end_sample.event_ctr, end_sample.pkt_ctr,
+ end_sample.byte_ctr, &dim->start_sample);
+ dim->state = DIM_MEASURE_IN_PROGRESS;
+ break;
+ case DIM_APPLY_NEW_PROFILE:
+ break;
+ }
+}
+EXPORT_SYMBOL(net_dim);
diff --git a/lib/dim/rdma_dim.c b/lib/dim/rdma_dim.c
new file mode 100644
index 000000000000..88f779486707
--- /dev/null
+++ b/lib/dim/rdma_dim.c
@@ -0,0 +1,109 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/*
+ * Copyright (c) 2019, Mellanox Technologies inc. All rights reserved.
+ */
+
+#include <linux/dim.h>
+
+static int rdma_dim_step(struct dim *dim)
+{
+ if (dim->tune_state == DIM_GOING_RIGHT) {
+ if (dim->profile_ix == (RDMA_DIM_PARAMS_NUM_PROFILES - 1))
+ return DIM_ON_EDGE;
+ dim->profile_ix++;
+ dim->steps_right++;
+ }
+ if (dim->tune_state == DIM_GOING_LEFT) {
+ if (dim->profile_ix == 0)
+ return DIM_ON_EDGE;
+ dim->profile_ix--;
+ dim->steps_left++;
+ }
+
+ return DIM_STEPPED;
+}
+
+static int rdma_dim_stats_compare(struct dim_stats *curr,
+ struct dim_stats *prev)
+{
+ /* first stat */
+ if (!prev->cpms)
+ return DIM_STATS_SAME;
+
+ if (IS_SIGNIFICANT_DIFF(curr->cpms, prev->cpms))
+ return (curr->cpms > prev->cpms) ? DIM_STATS_BETTER :
+ DIM_STATS_WORSE;
+
+ if (IS_SIGNIFICANT_DIFF(curr->cpe_ratio, prev->cpe_ratio))
+ return (curr->cpe_ratio > prev->cpe_ratio) ? DIM_STATS_BETTER :
+ DIM_STATS_WORSE;
+
+ return DIM_STATS_SAME;
+}
+
+static bool rdma_dim_decision(struct dim_stats *curr_stats, struct dim *dim)
+{
+ int prev_ix = dim->profile_ix;
+ u8 state = dim->tune_state;
+ int stats_res;
+ int step_res;
+
+ if (state != DIM_PARKING_ON_TOP && state != DIM_PARKING_TIRED) {
+ stats_res = rdma_dim_stats_compare(curr_stats,
+ &dim->prev_stats);
+
+ switch (stats_res) {
+ case DIM_STATS_SAME:
+ if (curr_stats->cpe_ratio <= 50 * prev_ix)
+ dim->profile_ix = 0;
+ break;
+ case DIM_STATS_WORSE:
+ dim_turn(dim);
+ fallthrough;
+ case DIM_STATS_BETTER:
+ step_res = rdma_dim_step(dim);
+ if (step_res == DIM_ON_EDGE)
+ dim_turn(dim);
+ break;
+ }
+ }
+
+ dim->prev_stats = *curr_stats;
+
+ return dim->profile_ix != prev_ix;
+}
+
+void rdma_dim(struct dim *dim, u64 completions)
+{
+ struct dim_sample *curr_sample = &dim->measuring_sample;
+ struct dim_stats curr_stats;
+ u32 nevents;
+
+ dim_update_sample_with_comps(curr_sample->event_ctr + 1, 0, 0,
+ curr_sample->comp_ctr + completions,
+ &dim->measuring_sample);
+
+ switch (dim->state) {
+ case DIM_MEASURE_IN_PROGRESS:
+ nevents = curr_sample->event_ctr - dim->start_sample.event_ctr;
+ if (nevents < DIM_NEVENTS)
+ break;
+ if (!dim_calc_stats(&dim->start_sample, curr_sample, &curr_stats))
+ break;
+ if (rdma_dim_decision(&curr_stats, dim)) {
+ dim->state = DIM_APPLY_NEW_PROFILE;
+ schedule_work(&dim->work);
+ break;
+ }
+ fallthrough;
+ case DIM_START_MEASURE:
+ dim->state = DIM_MEASURE_IN_PROGRESS;
+ dim_update_sample_with_comps(curr_sample->event_ctr, 0, 0,
+ curr_sample->comp_ctr,
+ &dim->start_sample);
+ break;
+ case DIM_APPLY_NEW_PROFILE:
+ break;
+ }
+}
+EXPORT_SYMBOL(rdma_dim);
diff --git a/lib/dump_stack.c b/lib/dump_stack.c
index 5cff72f18c4a..83471e81501a 100644
--- a/lib/dump_stack.c
+++ b/lib/dump_stack.c
@@ -5,6 +5,7 @@
*/
#include <linux/kernel.h>
+#include <linux/buildid.h>
#include <linux/export.h>
#include <linux/sched.h>
#include <linux/sched/debug.h>
@@ -12,6 +13,7 @@
#include <linux/atomic.h>
#include <linux/kexec.h>
#include <linux/utsname.h>
+#include <linux/stop_machine.h>
static char dump_stack_arch_desc_str[128];
@@ -35,6 +37,14 @@ void __init dump_stack_set_arch_desc(const char *fmt, ...)
va_end(args);
}
+#if IS_ENABLED(CONFIG_STACKTRACE_BUILD_ID)
+#define BUILD_ID_FMT " %20phN"
+#define BUILD_ID_VAL vmlinux_build_id
+#else
+#define BUILD_ID_FMT "%s"
+#define BUILD_ID_VAL ""
+#endif
+
/**
* dump_stack_print_info - print generic debug info for dump_stack()
* @log_lvl: log level
@@ -44,19 +54,20 @@ void __init dump_stack_set_arch_desc(const char *fmt, ...)
*/
void dump_stack_print_info(const char *log_lvl)
{
- printk("%sCPU: %d PID: %d Comm: %.20s %s%s %s %.*s\n",
+ printk("%sCPU: %d PID: %d Comm: %.20s %s%s %s %.*s" BUILD_ID_FMT "\n",
log_lvl, raw_smp_processor_id(), current->pid, current->comm,
kexec_crash_loaded() ? "Kdump: loaded " : "",
print_tainted(),
init_utsname()->release,
(int)strcspn(init_utsname()->version, " "),
- init_utsname()->version);
+ init_utsname()->version, BUILD_ID_VAL);
if (dump_stack_arch_desc_str[0] != '\0')
printk("%sHardware name: %s\n",
log_lvl, dump_stack_arch_desc_str);
print_worker_info(log_lvl, current);
+ print_stop_info(log_lvl, current);
}
/**
@@ -71,56 +82,34 @@ void show_regs_print_info(const char *log_lvl)
dump_stack_print_info(log_lvl);
}
-static void __dump_stack(void)
+static void __dump_stack(const char *log_lvl)
{
- dump_stack_print_info(KERN_DEFAULT);
- show_stack(NULL, NULL);
+ dump_stack_print_info(log_lvl);
+ show_stack(NULL, NULL, log_lvl);
}
/**
- * dump_stack - dump the current task information and its stack trace
+ * dump_stack_lvl - dump the current task information and its stack trace
+ * @log_lvl: log level
*
* Architectures can override this implementation by implementing its own.
*/
-#ifdef CONFIG_SMP
-static atomic_t dump_lock = ATOMIC_INIT(-1);
-
-asmlinkage __visible void dump_stack(void)
+asmlinkage __visible void dump_stack_lvl(const char *log_lvl)
{
unsigned long flags;
- int was_locked;
- int old;
- int cpu;
/*
* Permit this cpu to perform nested stack dumps while serialising
* against other CPUs
*/
-retry:
- local_irq_save(flags);
- cpu = smp_processor_id();
- old = atomic_cmpxchg(&dump_lock, -1, cpu);
- if (old == -1) {
- was_locked = 0;
- } else if (old == cpu) {
- was_locked = 1;
- } else {
- local_irq_restore(flags);
- cpu_relax();
- goto retry;
- }
-
- __dump_stack();
-
- if (!was_locked)
- atomic_set(&dump_lock, -1);
-
- local_irq_restore(flags);
+ printk_cpu_sync_get_irqsave(flags);
+ __dump_stack(log_lvl);
+ printk_cpu_sync_put_irqrestore(flags);
}
-#else
+EXPORT_SYMBOL(dump_stack_lvl);
+
asmlinkage __visible void dump_stack(void)
{
- __dump_stack();
+ dump_stack_lvl(KERN_DEFAULT);
}
-#endif
EXPORT_SYMBOL(dump_stack);
diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c
index 8a16c2d498e9..6fba6423cc10 100644
--- a/lib/dynamic_debug.c
+++ b/lib/dynamic_debug.c
@@ -11,7 +11,7 @@
* Copyright (C) 2013 Du, Changbin <changbin.du@gmail.com>
*/
-#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
+#define pr_fmt(fmt) "dyndbg: " fmt
#include <linux/kernel.h>
#include <linux/module.h>
@@ -39,11 +39,13 @@
#include <rdma/ib_verbs.h>
-extern struct _ddebug __start___verbose[];
-extern struct _ddebug __stop___verbose[];
+extern struct _ddebug __start___dyndbg[];
+extern struct _ddebug __stop___dyndbg[];
+extern struct ddebug_class_map __start___dyndbg_classes[];
+extern struct ddebug_class_map __stop___dyndbg_classes[];
struct ddebug_table {
- struct list_head link;
+ struct list_head link, maps;
const char *mod_name;
unsigned int num_ddebugs;
struct _ddebug *ddebugs;
@@ -54,18 +56,26 @@ struct ddebug_query {
const char *module;
const char *function;
const char *format;
+ const char *class_string;
unsigned int first_lineno, last_lineno;
};
struct ddebug_iter {
struct ddebug_table *table;
- unsigned int idx;
+ int idx;
+};
+
+struct flag_settings {
+ unsigned int flags;
+ unsigned int mask;
};
static DEFINE_MUTEX(ddebug_lock);
static LIST_HEAD(ddebug_tables);
static int verbose;
module_param(verbose, int, 0644);
+MODULE_PARM_DESC(verbose, " dynamic_debug/control processing "
+ "( 0 = off (default), 1 = module add/rm, 2 = >control summary, 3 = parsing, 4 = per-site changes)");
/* Return the path relative to source root */
static inline const char *trim_prefix(const char *path)
@@ -78,39 +88,45 @@ static inline const char *trim_prefix(const char *path)
return path + skip;
}
-static struct { unsigned flag:8; char opt_char; } opt_array[] = {
+static const struct { unsigned flag:8; char opt_char; } opt_array[] = {
{ _DPRINTK_FLAGS_PRINT, 'p' },
{ _DPRINTK_FLAGS_INCL_MODNAME, 'm' },
{ _DPRINTK_FLAGS_INCL_FUNCNAME, 'f' },
+ { _DPRINTK_FLAGS_INCL_SOURCENAME, 's' },
{ _DPRINTK_FLAGS_INCL_LINENO, 'l' },
{ _DPRINTK_FLAGS_INCL_TID, 't' },
{ _DPRINTK_FLAGS_NONE, '_' },
};
+struct flagsbuf { char buf[ARRAY_SIZE(opt_array)+1]; };
+
/* format a string into buf[] which describes the _ddebug's flags */
-static char *ddebug_describe_flags(struct _ddebug *dp, char *buf,
- size_t maxlen)
+static char *ddebug_describe_flags(unsigned int flags, struct flagsbuf *fb)
{
- char *p = buf;
+ char *p = fb->buf;
int i;
- BUG_ON(maxlen < 6);
for (i = 0; i < ARRAY_SIZE(opt_array); ++i)
- if (dp->flags & opt_array[i].flag)
+ if (flags & opt_array[i].flag)
*p++ = opt_array[i].opt_char;
- if (p == buf)
+ if (p == fb->buf)
*p++ = '_';
*p = '\0';
- return buf;
+ return fb->buf;
}
-#define vpr_info(fmt, ...) \
+#define vnpr_info(lvl, fmt, ...) \
do { \
- if (verbose) \
+ if (verbose >= lvl) \
pr_info(fmt, ##__VA_ARGS__); \
} while (0)
+#define vpr_info(fmt, ...) vnpr_info(1, fmt, ##__VA_ARGS__)
+#define v2pr_info(fmt, ...) vnpr_info(2, fmt, ##__VA_ARGS__)
+#define v3pr_info(fmt, ...) vnpr_info(3, fmt, ##__VA_ARGS__)
+#define v4pr_info(fmt, ...) vnpr_info(4, fmt, ##__VA_ARGS__)
+
static void vpr_info_dq(const struct ddebug_query *query, const char *msg)
{
/* trim any trailing newlines */
@@ -122,15 +138,33 @@ static void vpr_info_dq(const struct ddebug_query *query, const char *msg)
fmtlen--;
}
- vpr_info("%s: func=\"%s\" file=\"%s\" module=\"%s\" format=\"%.*s\" lineno=%u-%u\n",
- msg,
- query->function ? query->function : "",
- query->filename ? query->filename : "",
- query->module ? query->module : "",
- fmtlen, query->format ? query->format : "",
- query->first_lineno, query->last_lineno);
+ v3pr_info("%s: func=\"%s\" file=\"%s\" module=\"%s\" format=\"%.*s\" lineno=%u-%u class=%s\n",
+ msg,
+ query->function ?: "",
+ query->filename ?: "",
+ query->module ?: "",
+ fmtlen, query->format ?: "",
+ query->first_lineno, query->last_lineno, query->class_string);
+}
+
+static struct ddebug_class_map *ddebug_find_valid_class(struct ddebug_table const *dt,
+ const char *class_string, int *class_id)
+{
+ struct ddebug_class_map *map;
+ int idx;
+
+ list_for_each_entry(map, &dt->maps, link) {
+ idx = match_string(map->class_names, map->length, class_string);
+ if (idx >= 0) {
+ *class_id = idx + map->base;
+ return map;
+ }
+ }
+ *class_id = -ENOENT;
+ return NULL;
}
+#define __outvar /* filled by callee */
/*
* Search the tables for _ddebug's which match the given `query' and
* apply the `flags' and `mask' to them. Returns number of matching
@@ -138,13 +172,15 @@ static void vpr_info_dq(const struct ddebug_query *query, const char *msg)
* logs the changes. Takes ddebug_lock.
*/
static int ddebug_change(const struct ddebug_query *query,
- unsigned int flags, unsigned int mask)
+ struct flag_settings *modifiers)
{
int i;
struct ddebug_table *dt;
unsigned int newflags;
unsigned int nfound = 0;
- char flagbuf[10];
+ struct flagsbuf fbuf, nbuf;
+ struct ddebug_class_map *map = NULL;
+ int __outvar valid_class;
/* search for matching ddebugs */
mutex_lock(&ddebug_lock);
@@ -155,9 +191,22 @@ static int ddebug_change(const struct ddebug_query *query,
!match_wildcard(query->module, dt->mod_name))
continue;
+ if (query->class_string) {
+ map = ddebug_find_valid_class(dt, query->class_string, &valid_class);
+ if (!map)
+ continue;
+ } else {
+ /* constrain query, do not touch class'd callsites */
+ valid_class = _DPRINTK_CLASS_DFLT;
+ }
+
for (i = 0; i < dt->num_ddebugs; i++) {
struct _ddebug *dp = &dt->ddebugs[i];
+ /* match site against query-class */
+ if (dp->class_id != valid_class)
+ continue;
+
/* match against the source filename */
if (query->filename &&
!match_wildcard(query->filename, dp->filename) &&
@@ -173,9 +222,16 @@ static int ddebug_change(const struct ddebug_query *query,
continue;
/* match against the format */
- if (query->format &&
- !strstr(dp->format, query->format))
- continue;
+ if (query->format) {
+ if (*query->format == '^') {
+ char *p;
+ /* anchored search. match must be at beginning */
+ p = strstr(dp->format, query->format+1);
+ if (p != dp->format)
+ continue;
+ } else if (!strstr(dp->format, query->format))
+ continue;
+ }
/* match against the line number range */
if (query->first_lineno &&
@@ -187,22 +243,23 @@ static int ddebug_change(const struct ddebug_query *query,
nfound++;
- newflags = (dp->flags & mask) | flags;
+ newflags = (dp->flags & modifiers->mask) | modifiers->flags;
if (newflags == dp->flags)
continue;
#ifdef CONFIG_JUMP_LABEL
if (dp->flags & _DPRINTK_FLAGS_PRINT) {
- if (!(flags & _DPRINTK_FLAGS_PRINT))
+ if (!(newflags & _DPRINTK_FLAGS_PRINT))
static_branch_disable(&dp->key.dd_key_true);
- } else if (flags & _DPRINTK_FLAGS_PRINT)
+ } else if (newflags & _DPRINTK_FLAGS_PRINT) {
static_branch_enable(&dp->key.dd_key_true);
+ }
#endif
+ v4pr_info("changed %s:%d [%s]%s %s => %s\n",
+ trim_prefix(dp->filename), dp->lineno,
+ dt->mod_name, dp->function,
+ ddebug_describe_flags(dp->flags, &fbuf),
+ ddebug_describe_flags(newflags, &nbuf));
dp->flags = newflags;
- vpr_info("changed %s:%d [%s]%s =%s\n",
- trim_prefix(dp->filename), dp->lineno,
- dt->mod_name, dp->function,
- ddebug_describe_flags(dp, flagbuf,
- sizeof(flagbuf)));
}
}
mutex_unlock(&ddebug_lock);
@@ -259,7 +316,7 @@ static int ddebug_tokenize(char *buf, char *words[], int maxwords)
buf = end;
}
- if (verbose) {
+ if (verbose >= 3) {
int i;
pr_info("split into words:");
for (i = 0; i < nwords; i++)
@@ -289,6 +346,41 @@ static inline int parse_lineno(const char *str, unsigned int *val)
return 0;
}
+static int parse_linerange(struct ddebug_query *query, const char *first)
+{
+ char *last = strchr(first, '-');
+
+ if (query->first_lineno || query->last_lineno) {
+ pr_err("match-spec: line used 2x\n");
+ return -EINVAL;
+ }
+ if (last)
+ *last++ = '\0';
+ if (parse_lineno(first, &query->first_lineno) < 0)
+ return -EINVAL;
+ if (last) {
+ /* range <first>-<last> */
+ if (parse_lineno(last, &query->last_lineno) < 0)
+ return -EINVAL;
+
+ /* special case for last lineno not specified */
+ if (query->last_lineno == 0)
+ query->last_lineno = UINT_MAX;
+
+ if (query->last_lineno < query->first_lineno) {
+ pr_err("last-line:%d < 1st-line:%d\n",
+ query->last_lineno,
+ query->first_lineno);
+ return -EINVAL;
+ }
+ } else {
+ query->last_lineno = query->first_lineno;
+ }
+ v3pr_info("parsed line %d-%d\n", query->first_lineno,
+ query->last_lineno);
+ return 0;
+}
+
static int check_set(const char **dest, char *src, char *name)
{
int rc = 0;
@@ -322,66 +414,63 @@ static int ddebug_parse_query(char *words[], int nwords,
{
unsigned int i;
int rc = 0;
+ char *fline;
/* check we have an even number of words */
if (nwords % 2 != 0) {
pr_err("expecting pairs of match-spec <value>\n");
return -EINVAL;
}
- memset(query, 0, sizeof(*query));
-
- if (modname)
- /* support $modname.dyndbg=<multiple queries> */
- query->module = modname;
for (i = 0; i < nwords; i += 2) {
- if (!strcmp(words[i], "func")) {
- rc = check_set(&query->function, words[i+1], "func");
- } else if (!strcmp(words[i], "file")) {
- rc = check_set(&query->filename, words[i+1], "file");
- } else if (!strcmp(words[i], "module")) {
- rc = check_set(&query->module, words[i+1], "module");
- } else if (!strcmp(words[i], "format")) {
- string_unescape_inplace(words[i+1], UNESCAPE_SPACE |
- UNESCAPE_OCTAL |
- UNESCAPE_SPECIAL);
- rc = check_set(&query->format, words[i+1], "format");
- } else if (!strcmp(words[i], "line")) {
- char *first = words[i+1];
- char *last = strchr(first, '-');
- if (query->first_lineno || query->last_lineno) {
- pr_err("match-spec: line used 2x\n");
- return -EINVAL;
- }
- if (last)
- *last++ = '\0';
- if (parse_lineno(first, &query->first_lineno) < 0)
- return -EINVAL;
- if (last) {
- /* range <first>-<last> */
- if (parse_lineno(last, &query->last_lineno) < 0)
- return -EINVAL;
+ char *keyword = words[i];
+ char *arg = words[i+1];
- /* special case for last lineno not specified */
- if (query->last_lineno == 0)
- query->last_lineno = UINT_MAX;
+ if (!strcmp(keyword, "func")) {
+ rc = check_set(&query->function, arg, "func");
+ } else if (!strcmp(keyword, "file")) {
+ if (check_set(&query->filename, arg, "file"))
+ return -EINVAL;
- if (query->last_lineno < query->first_lineno) {
- pr_err("last-line:%d < 1st-line:%d\n",
- query->last_lineno,
- query->first_lineno);
+ /* tail :$info is function or line-range */
+ fline = strchr(query->filename, ':');
+ if (!fline)
+ continue;
+ *fline++ = '\0';
+ if (isalpha(*fline) || *fline == '*' || *fline == '?') {
+ /* take as function name */
+ if (check_set(&query->function, fline, "func"))
return -EINVAL;
- }
} else {
- query->last_lineno = query->first_lineno;
+ if (parse_linerange(query, fline))
+ return -EINVAL;
}
+ } else if (!strcmp(keyword, "module")) {
+ rc = check_set(&query->module, arg, "module");
+ } else if (!strcmp(keyword, "format")) {
+ string_unescape_inplace(arg, UNESCAPE_SPACE |
+ UNESCAPE_OCTAL |
+ UNESCAPE_SPECIAL);
+ rc = check_set(&query->format, arg, "format");
+ } else if (!strcmp(keyword, "line")) {
+ if (parse_linerange(query, arg))
+ return -EINVAL;
+ } else if (!strcmp(keyword, "class")) {
+ rc = check_set(&query->class_string, arg, "class");
} else {
- pr_err("unknown keyword \"%s\"\n", words[i]);
+ pr_err("unknown keyword \"%s\"\n", keyword);
return -EINVAL;
}
if (rc)
return rc;
}
+ if (!query->module && modname)
+ /*
+ * support $modname.dyndbg=<multiple queries>, when
+ * not given in the query itself
+ */
+ query->module = modname;
+
vpr_info_dq(query, "parsed");
return 0;
}
@@ -392,11 +481,9 @@ static int ddebug_parse_query(char *words[], int nwords,
* flags fields of matched _ddebug's. Returns 0 on success
* or <0 on error.
*/
-static int ddebug_parse_flags(const char *str, unsigned int *flagsp,
- unsigned int *maskp)
+static int ddebug_parse_flags(const char *str, struct flag_settings *modifiers)
{
- unsigned flags = 0;
- int op = '=', i;
+ int op, i;
switch (*str) {
case '+':
@@ -408,45 +495,45 @@ static int ddebug_parse_flags(const char *str, unsigned int *flagsp,
pr_err("bad flag-op %c, at start of %s\n", *str, str);
return -EINVAL;
}
- vpr_info("op='%c'\n", op);
+ v3pr_info("op='%c'\n", op);
for (; *str ; ++str) {
for (i = ARRAY_SIZE(opt_array) - 1; i >= 0; i--) {
if (*str == opt_array[i].opt_char) {
- flags |= opt_array[i].flag;
+ modifiers->flags |= opt_array[i].flag;
break;
}
}
if (i < 0) {
- pr_err("unknown flag '%c' in \"%s\"\n", *str, str);
+ pr_err("unknown flag '%c'\n", *str);
return -EINVAL;
}
}
- vpr_info("flags=0x%x\n", flags);
+ v3pr_info("flags=0x%x\n", modifiers->flags);
- /* calculate final *flagsp, *maskp according to mask and op */
+ /* calculate final flags, mask based upon op */
switch (op) {
case '=':
- *maskp = 0;
- *flagsp = flags;
+ /* modifiers->flags already set */
+ modifiers->mask = 0;
break;
case '+':
- *maskp = ~0U;
- *flagsp = flags;
+ modifiers->mask = ~0U;
break;
case '-':
- *maskp = ~flags;
- *flagsp = 0;
+ modifiers->mask = ~modifiers->flags;
+ modifiers->flags = 0;
break;
}
- vpr_info("*flagsp=0x%x *maskp=0x%x\n", *flagsp, *maskp);
+ v3pr_info("*flagsp=0x%x *maskp=0x%x\n", modifiers->flags, modifiers->mask);
+
return 0;
}
static int ddebug_exec_query(char *query_string, const char *modname)
{
- unsigned int flags = 0, mask = 0;
- struct ddebug_query query;
+ struct flag_settings modifiers = {};
+ struct ddebug_query query = {};
#define MAXWORDS 9
int nwords, nfound;
char *words[MAXWORDS];
@@ -457,7 +544,7 @@ static int ddebug_exec_query(char *query_string, const char *modname)
return -EINVAL;
}
/* check flags 1st (last arg) so query is pairs of spec,val */
- if (ddebug_parse_flags(words[nwords-1], &flags, &mask)) {
+ if (ddebug_parse_flags(words[nwords-1], &modifiers)) {
pr_err("flags parse failed\n");
return -EINVAL;
}
@@ -466,7 +553,7 @@ static int ddebug_exec_query(char *query_string, const char *modname)
return -EINVAL;
}
/* actually go and implement the change */
- nfound = ddebug_change(&query, flags, mask);
+ nfound = ddebug_change(&query, &modifiers);
vpr_info_dq(&query, nfound ? "applied" : "no-match");
return nfound;
@@ -490,7 +577,7 @@ static int ddebug_exec_queries(char *query, const char *modname)
if (!query || !*query || *query == '#')
continue;
- vpr_info("query %d: \"%s\"\n", i, query);
+ vpr_info("query %d: \"%s\" mod:%s\n", i, query, modname ?: "*");
rc = ddebug_exec_query(query, modname);
if (rc < 0) {
@@ -501,15 +588,228 @@ static int ddebug_exec_queries(char *query, const char *modname)
}
i++;
}
- vpr_info("processed %d queries, with %d matches, %d errs\n",
- i, nfound, errs);
+ if (i)
+ v2pr_info("processed %d queries, with %d matches, %d errs\n",
+ i, nfound, errs);
if (exitcode)
return exitcode;
return nfound;
}
-#define PREFIX_SIZE 64
+/* apply a new bitmap to the sys-knob's current bit-state */
+static int ddebug_apply_class_bitmap(const struct ddebug_class_param *dcp,
+ unsigned long *new_bits, unsigned long *old_bits)
+{
+#define QUERY_SIZE 128
+ char query[QUERY_SIZE];
+ const struct ddebug_class_map *map = dcp->map;
+ int matches = 0;
+ int bi, ct;
+
+ v2pr_info("apply: 0x%lx to: 0x%lx\n", *new_bits, *old_bits);
+
+ for (bi = 0; bi < map->length; bi++) {
+ if (test_bit(bi, new_bits) == test_bit(bi, old_bits))
+ continue;
+
+ snprintf(query, QUERY_SIZE, "class %s %c%s", map->class_names[bi],
+ test_bit(bi, new_bits) ? '+' : '-', dcp->flags);
+
+ ct = ddebug_exec_queries(query, NULL);
+ matches += ct;
+
+ v2pr_info("bit_%d: %d matches on class: %s -> 0x%lx\n", bi,
+ ct, map->class_names[bi], *new_bits);
+ }
+ return matches;
+}
+
+/* stub to later conditionally add "$module." prefix where not already done */
+#define KP_NAME(kp) kp->name
+
+#define CLASSMAP_BITMASK(width) ((1UL << (width)) - 1)
+
+/* accept comma-separated-list of [+-] classnames */
+static int param_set_dyndbg_classnames(const char *instr, const struct kernel_param *kp)
+{
+ const struct ddebug_class_param *dcp = kp->arg;
+ const struct ddebug_class_map *map = dcp->map;
+ unsigned long curr_bits, old_bits;
+ char *cl_str, *p, *tmp;
+ int cls_id, totct = 0;
+ bool wanted;
+
+ cl_str = tmp = kstrdup(instr, GFP_KERNEL);
+ p = strchr(cl_str, '\n');
+ if (p)
+ *p = '\0';
+
+ /* start with previously set state-bits, then modify */
+ curr_bits = old_bits = *dcp->bits;
+ vpr_info("\"%s\" > %s:0x%lx\n", cl_str, KP_NAME(kp), curr_bits);
+
+ for (; cl_str; cl_str = p) {
+ p = strchr(cl_str, ',');
+ if (p)
+ *p++ = '\0';
+
+ if (*cl_str == '-') {
+ wanted = false;
+ cl_str++;
+ } else {
+ wanted = true;
+ if (*cl_str == '+')
+ cl_str++;
+ }
+ cls_id = match_string(map->class_names, map->length, cl_str);
+ if (cls_id < 0) {
+ pr_err("%s unknown to %s\n", cl_str, KP_NAME(kp));
+ continue;
+ }
+
+ /* have one or more valid class_ids of one *_NAMES type */
+ switch (map->map_type) {
+ case DD_CLASS_TYPE_DISJOINT_NAMES:
+ /* the +/- pertains to a single bit */
+ if (test_bit(cls_id, &curr_bits) == wanted) {
+ v3pr_info("no change on %s\n", cl_str);
+ continue;
+ }
+ curr_bits ^= BIT(cls_id);
+ totct += ddebug_apply_class_bitmap(dcp, &curr_bits, dcp->bits);
+ *dcp->bits = curr_bits;
+ v2pr_info("%s: changed bit %d:%s\n", KP_NAME(kp), cls_id,
+ map->class_names[cls_id]);
+ break;
+ case DD_CLASS_TYPE_LEVEL_NAMES:
+ /* cls_id = N in 0..max. wanted +/- determines N or N-1 */
+ old_bits = CLASSMAP_BITMASK(*dcp->lvl);
+ curr_bits = CLASSMAP_BITMASK(cls_id + (wanted ? 1 : 0 ));
+
+ totct += ddebug_apply_class_bitmap(dcp, &curr_bits, &old_bits);
+ *dcp->lvl = (cls_id + (wanted ? 1 : 0));
+ v2pr_info("%s: changed bit-%d: \"%s\" %lx->%lx\n", KP_NAME(kp), cls_id,
+ map->class_names[cls_id], old_bits, curr_bits);
+ break;
+ default:
+ pr_err("illegal map-type value %d\n", map->map_type);
+ }
+ }
+ kfree(tmp);
+ vpr_info("total matches: %d\n", totct);
+ return 0;
+}
+
+/**
+ * param_set_dyndbg_classes - class FOO >control
+ * @instr: string echo>d to sysfs, input depends on map_type
+ * @kp: kp->arg has state: bits/lvl, map, map_type
+ *
+ * Enable/disable prdbgs by their class, as given in the arguments to
+ * DECLARE_DYNDBG_CLASSMAP. For LEVEL map-types, enforce relative
+ * levels by bitpos.
+ *
+ * Returns: 0 or <0 if error.
+ */
+int param_set_dyndbg_classes(const char *instr, const struct kernel_param *kp)
+{
+ const struct ddebug_class_param *dcp = kp->arg;
+ const struct ddebug_class_map *map = dcp->map;
+ unsigned long inrep, new_bits, old_bits;
+ int rc, totct = 0;
+
+ switch (map->map_type) {
+
+ case DD_CLASS_TYPE_DISJOINT_NAMES:
+ case DD_CLASS_TYPE_LEVEL_NAMES:
+ /* handle [+-]classnames list separately, we are done here */
+ return param_set_dyndbg_classnames(instr, kp);
+
+ case DD_CLASS_TYPE_DISJOINT_BITS:
+ case DD_CLASS_TYPE_LEVEL_NUM:
+ /* numeric input, accept and fall-thru */
+ rc = kstrtoul(instr, 0, &inrep);
+ if (rc) {
+ pr_err("expecting numeric input: %s > %s\n", instr, KP_NAME(kp));
+ return -EINVAL;
+ }
+ break;
+ default:
+ pr_err("%s: bad map type: %d\n", KP_NAME(kp), map->map_type);
+ return -EINVAL;
+ }
+
+ /* only _BITS,_NUM (numeric) map-types get here */
+ switch (map->map_type) {
+ case DD_CLASS_TYPE_DISJOINT_BITS:
+ /* expect bits. mask and warn if too many */
+ if (inrep & ~CLASSMAP_BITMASK(map->length)) {
+ pr_warn("%s: input: 0x%lx exceeds mask: 0x%lx, masking\n",
+ KP_NAME(kp), inrep, CLASSMAP_BITMASK(map->length));
+ inrep &= CLASSMAP_BITMASK(map->length);
+ }
+ v2pr_info("bits:%lx > %s\n", inrep, KP_NAME(kp));
+ totct += ddebug_apply_class_bitmap(dcp, &inrep, dcp->bits);
+ *dcp->bits = inrep;
+ break;
+ case DD_CLASS_TYPE_LEVEL_NUM:
+ /* input is bitpos, of highest verbosity to be enabled */
+ if (inrep > map->length) {
+ pr_warn("%s: level:%ld exceeds max:%d, clamping\n",
+ KP_NAME(kp), inrep, map->length);
+ inrep = map->length;
+ }
+ old_bits = CLASSMAP_BITMASK(*dcp->lvl);
+ new_bits = CLASSMAP_BITMASK(inrep);
+ v2pr_info("lvl:%ld bits:0x%lx > %s\n", inrep, new_bits, KP_NAME(kp));
+ totct += ddebug_apply_class_bitmap(dcp, &new_bits, &old_bits);
+ *dcp->lvl = inrep;
+ break;
+ default:
+ pr_warn("%s: bad map type: %d\n", KP_NAME(kp), map->map_type);
+ }
+ vpr_info("%s: total matches: %d\n", KP_NAME(kp), totct);
+ return 0;
+}
+EXPORT_SYMBOL(param_set_dyndbg_classes);
+
+/**
+ * param_get_dyndbg_classes - classes reader
+ * @buffer: string description of controlled bits -> classes
+ * @kp: kp->arg has state: bits, map
+ *
+ * Reads last written state, underlying prdbg state may have been
+ * altered by direct >control. Displays 0x for DISJOINT, 0-N for
+ * LEVEL Returns: #chars written or <0 on error
+ */
+int param_get_dyndbg_classes(char *buffer, const struct kernel_param *kp)
+{
+ const struct ddebug_class_param *dcp = kp->arg;
+ const struct ddebug_class_map *map = dcp->map;
+
+ switch (map->map_type) {
+
+ case DD_CLASS_TYPE_DISJOINT_NAMES:
+ case DD_CLASS_TYPE_DISJOINT_BITS:
+ return scnprintf(buffer, PAGE_SIZE, "0x%lx\n", *dcp->bits);
+
+ case DD_CLASS_TYPE_LEVEL_NAMES:
+ case DD_CLASS_TYPE_LEVEL_NUM:
+ return scnprintf(buffer, PAGE_SIZE, "%d\n", *dcp->lvl);
+ default:
+ return -1;
+ }
+}
+EXPORT_SYMBOL(param_get_dyndbg_classes);
+
+const struct kernel_param_ops param_ops_dyndbg_classes = {
+ .set = param_set_dyndbg_classes,
+ .get = param_get_dyndbg_classes,
+};
+EXPORT_SYMBOL(param_ops_dyndbg_classes);
+
+#define PREFIX_SIZE 128
static int remaining(int wrote)
{
@@ -518,13 +818,11 @@ static int remaining(int wrote)
return 0;
}
-static char *dynamic_emit_prefix(const struct _ddebug *desc, char *buf)
+static char *__dynamic_emit_prefix(const struct _ddebug *desc, char *buf)
{
int pos_after_tid;
int pos = 0;
- *buf = '\0';
-
if (desc->flags & _DPRINTK_FLAGS_INCL_TID) {
if (in_interrupt())
pos += snprintf(buf + pos, remaining(pos), "<intr> ");
@@ -539,6 +837,9 @@ static char *dynamic_emit_prefix(const struct _ddebug *desc, char *buf)
if (desc->flags & _DPRINTK_FLAGS_INCL_FUNCNAME)
pos += snprintf(buf + pos, remaining(pos), "%s:",
desc->function);
+ if (desc->flags & _DPRINTK_FLAGS_INCL_SOURCENAME)
+ pos += snprintf(buf + pos, remaining(pos), "%s:",
+ trim_prefix(desc->filename));
if (desc->flags & _DPRINTK_FLAGS_INCL_LINENO)
pos += snprintf(buf + pos, remaining(pos), "%d:",
desc->lineno);
@@ -550,11 +851,18 @@ static char *dynamic_emit_prefix(const struct _ddebug *desc, char *buf)
return buf;
}
+static inline char *dynamic_emit_prefix(struct _ddebug *desc, char *buf)
+{
+ if (unlikely(desc->flags & _DPRINTK_FLAGS_INCL_ANY))
+ return __dynamic_emit_prefix(desc, buf);
+ return buf;
+}
+
void __dynamic_pr_debug(struct _ddebug *descriptor, const char *fmt, ...)
{
va_list args;
struct va_format vaf;
- char buf[PREFIX_SIZE];
+ char buf[PREFIX_SIZE] = "";
BUG_ON(!descriptor);
BUG_ON(!fmt);
@@ -587,7 +895,7 @@ void __dynamic_dev_dbg(struct _ddebug *descriptor,
if (!dev) {
printk(KERN_DEBUG "(NULL device *): %pV", &vaf);
} else {
- char buf[PREFIX_SIZE];
+ char buf[PREFIX_SIZE] = "";
dev_printk_emit(LOGLEVEL_DEBUG, dev, "%s%s %s: %pV",
dynamic_emit_prefix(descriptor, buf),
@@ -616,7 +924,7 @@ void __dynamic_netdev_dbg(struct _ddebug *descriptor,
vaf.va = &args;
if (dev && dev->dev.parent) {
- char buf[PREFIX_SIZE];
+ char buf[PREFIX_SIZE] = "";
dev_printk_emit(LOGLEVEL_DEBUG, dev->dev.parent,
"%s%s %s %s%s: %pV",
@@ -652,7 +960,7 @@ void __dynamic_ibdev_dbg(struct _ddebug *descriptor,
vaf.va = &args;
if (ibdev && ibdev->dev.parent) {
- char buf[PREFIX_SIZE];
+ char buf[PREFIX_SIZE] = "";
dev_printk_emit(LOGLEVEL_DEBUG, ibdev->dev.parent,
"%s%s %s %s: %pV",
@@ -673,20 +981,17 @@ EXPORT_SYMBOL(__dynamic_ibdev_dbg);
#endif
-#define DDEBUG_STRING_SIZE 1024
-static __initdata char ddebug_setup_string[DDEBUG_STRING_SIZE];
-
-static __init int ddebug_setup_query(char *str)
+/*
+ * Install a noop handler to make dyndbg look like a normal kernel cli param.
+ * This avoids warnings about dyndbg being an unknown cli param when supplied
+ * by a user.
+ */
+static __init int dyndbg_setup(char *str)
{
- if (strlen(str) >= DDEBUG_STRING_SIZE) {
- pr_warn("ddebug boot param string too large\n");
- return 0;
- }
- strlcpy(ddebug_setup_string, str, DDEBUG_STRING_SIZE);
return 1;
}
-__setup("ddebug_query=", ddebug_setup_query);
+__setup("dyndbg=", dyndbg_setup);
/*
* File_ops->write method for <debugfs>/dynamic_debug/control. Gathers the
@@ -708,7 +1013,7 @@ static ssize_t ddebug_proc_write(struct file *file, const char __user *ubuf,
tmpbuf = memdup_user_nul(ubuf, len);
if (IS_ERR(tmpbuf))
return PTR_ERR(tmpbuf);
- vpr_info("read %d bytes from userspace\n", (int)len);
+ v2pr_info("read %zu bytes from userspace\n", len);
ret = ddebug_exec_queries(tmpbuf, NULL);
kfree(tmpbuf);
@@ -728,13 +1033,12 @@ static struct _ddebug *ddebug_iter_first(struct ddebug_iter *iter)
{
if (list_empty(&ddebug_tables)) {
iter->table = NULL;
- iter->idx = 0;
return NULL;
}
iter->table = list_entry(ddebug_tables.next,
struct ddebug_table, link);
- iter->idx = 0;
- return &iter->table->ddebugs[iter->idx];
+ iter->idx = iter->table->num_ddebugs;
+ return &iter->table->ddebugs[--iter->idx];
}
/*
@@ -747,15 +1051,16 @@ static struct _ddebug *ddebug_iter_next(struct ddebug_iter *iter)
{
if (iter->table == NULL)
return NULL;
- if (++iter->idx == iter->table->num_ddebugs) {
+ if (--iter->idx < 0) {
/* iterate to next table */
- iter->idx = 0;
if (list_is_last(&iter->table->link, &ddebug_tables)) {
iter->table = NULL;
return NULL;
}
iter->table = list_entry(iter->table->link.next,
struct ddebug_table, link);
+ iter->idx = iter->table->num_ddebugs;
+ --iter->idx;
}
return &iter->table->ddebugs[iter->idx];
}
@@ -771,8 +1076,6 @@ static void *ddebug_proc_start(struct seq_file *m, loff_t *pos)
struct _ddebug *dp;
int n = *pos;
- vpr_info("called m=%p *pos=%lld\n", m, (unsigned long long)*pos);
-
mutex_lock(&ddebug_lock);
if (!n)
@@ -795,9 +1098,6 @@ static void *ddebug_proc_next(struct seq_file *m, void *p, loff_t *pos)
struct ddebug_iter *iter = m->private;
struct _ddebug *dp;
- vpr_info("called m=%p p=%p *pos=%lld\n",
- m, p, (unsigned long long)*pos);
-
if (p == SEQ_START_TOKEN)
dp = ddebug_iter_first(iter);
else
@@ -806,6 +1106,20 @@ static void *ddebug_proc_next(struct seq_file *m, void *p, loff_t *pos)
return dp;
}
+#define class_in_range(class_id, map) \
+ (class_id >= map->base && class_id < map->base + map->length)
+
+static const char *ddebug_class_name(struct ddebug_iter *iter, struct _ddebug *dp)
+{
+ struct ddebug_class_map *map;
+
+ list_for_each_entry(map, &iter->table->maps, link)
+ if (class_in_range(dp->class_id, map))
+ return map->class_names[dp->class_id - map->base];
+
+ return NULL;
+}
+
/*
* Seq_ops show method. Called several times within a read()
* call from userspace, with ddebug_lock held. Formats the
@@ -816,9 +1130,8 @@ static int ddebug_proc_show(struct seq_file *m, void *p)
{
struct ddebug_iter *iter = m->private;
struct _ddebug *dp = p;
- char flagsbuf[10];
-
- vpr_info("called m=%p p=%p\n", m, p);
+ struct flagsbuf flags;
+ char const *class;
if (p == SEQ_START_TOKEN) {
seq_puts(m,
@@ -829,9 +1142,18 @@ static int ddebug_proc_show(struct seq_file *m, void *p)
seq_printf(m, "%s:%u [%s]%s =%s \"",
trim_prefix(dp->filename), dp->lineno,
iter->table->mod_name, dp->function,
- ddebug_describe_flags(dp, flagsbuf, sizeof(flagsbuf)));
- seq_escape(m, dp->format, "\t\r\n\"");
- seq_puts(m, "\"\n");
+ ddebug_describe_flags(dp->flags, &flags));
+ seq_escape_str(m, dp->format, ESCAPE_SPACE, "\t\r\n\"");
+ seq_puts(m, "\"");
+
+ if (dp->class_id != _DPRINTK_CLASS_DFLT) {
+ class = ddebug_class_name(iter, dp);
+ if (class)
+ seq_printf(m, " class:%s", class);
+ else
+ seq_printf(m, " class unknown, _id:%d", dp->class_id);
+ }
+ seq_puts(m, "\n");
return 0;
}
@@ -842,7 +1164,6 @@ static int ddebug_proc_show(struct seq_file *m, void *p)
*/
static void ddebug_proc_stop(struct seq_file *m, void *p)
{
- vpr_info("called m=%p p=%p\n", m, p);
mutex_unlock(&ddebug_lock);
}
@@ -853,16 +1174,8 @@ static const struct seq_operations ddebug_proc_seqops = {
.stop = ddebug_proc_stop
};
-/*
- * File_ops->open method for <debugfs>/dynamic_debug/control. Does
- * the seq_file setup dance, and also creates an iterator to walk the
- * _ddebugs. Note that we create a seq_file always, even for O_WRONLY
- * files where it's not needed, as doing so simplifies the ->release
- * method.
- */
static int ddebug_proc_open(struct inode *inode, struct file *file)
{
- vpr_info("called\n");
return seq_open_private(file, &ddebug_proc_seqops,
sizeof(struct ddebug_iter));
}
@@ -876,18 +1189,57 @@ static const struct file_operations ddebug_proc_fops = {
.write = ddebug_proc_write
};
+static const struct proc_ops proc_fops = {
+ .proc_open = ddebug_proc_open,
+ .proc_read = seq_read,
+ .proc_lseek = seq_lseek,
+ .proc_release = seq_release_private,
+ .proc_write = ddebug_proc_write
+};
+
+static void ddebug_attach_module_classes(struct ddebug_table *dt,
+ struct ddebug_class_map *classes,
+ int num_classes)
+{
+ struct ddebug_class_map *cm;
+ int i, j, ct = 0;
+
+ for (cm = classes, i = 0; i < num_classes; i++, cm++) {
+
+ if (!strcmp(cm->mod_name, dt->mod_name)) {
+
+ v2pr_info("class[%d]: module:%s base:%d len:%d ty:%d\n", i,
+ cm->mod_name, cm->base, cm->length, cm->map_type);
+
+ for (j = 0; j < cm->length; j++)
+ v3pr_info(" %d: %d %s\n", j + cm->base, j,
+ cm->class_names[j]);
+
+ list_add(&cm->link, &dt->maps);
+ ct++;
+ }
+ }
+ if (ct)
+ vpr_info("module:%s attached %d classes\n", dt->mod_name, ct);
+}
+
/*
* Allocate a new ddebug_table for the given module
* and add it to the global list.
*/
-int ddebug_add_module(struct _ddebug *tab, unsigned int n,
- const char *name)
+static int ddebug_add_module(struct _ddebug_info *di, const char *modname)
{
struct ddebug_table *dt;
+ v3pr_info("add-module: %s.%d sites\n", modname, di->num_descs);
+ if (!di->num_descs) {
+ v3pr_info(" skip %s\n", modname);
+ return 0;
+ }
+
dt = kzalloc(sizeof(*dt), GFP_KERNEL);
if (dt == NULL) {
- pr_err("error adding module: %s\n", name);
+ pr_err("error adding module: %s\n", modname);
return -ENOMEM;
}
/*
@@ -896,15 +1248,21 @@ int ddebug_add_module(struct _ddebug *tab, unsigned int n,
* member of struct module, which lives at least as long as
* this struct ddebug_table.
*/
- dt->mod_name = name;
- dt->num_ddebugs = n;
- dt->ddebugs = tab;
+ dt->mod_name = modname;
+ dt->ddebugs = di->descs;
+ dt->num_ddebugs = di->num_descs;
+
+ INIT_LIST_HEAD(&dt->link);
+ INIT_LIST_HEAD(&dt->maps);
+
+ if (di->classes && di->num_classes)
+ ddebug_attach_module_classes(dt, di->classes, di->num_classes);
mutex_lock(&ddebug_lock);
list_add_tail(&dt->link, &ddebug_tables);
mutex_unlock(&ddebug_lock);
- vpr_info("%u debug prints in module %s\n", n, dt->mod_name);
+ vpr_info("%3u debug prints in module %s\n", di->num_descs, modname);
return 0;
}
@@ -926,7 +1284,7 @@ static int ddebug_dyndbg_param_cb(char *param, char *val,
ddebug_exec_queries((val ? val : "+p"), modname);
- return 0; /* query failure shouldnt stop module load */
+ return 0; /* query failure shouldn't stop module load */
}
/* handle both dyndbg and $module.dyndbg params at boot */
@@ -954,17 +1312,17 @@ static void ddebug_table_free(struct ddebug_table *dt)
kfree(dt);
}
+#ifdef CONFIG_MODULES
+
/*
* Called in response to a module being unloaded. Removes
* any ddebug_table's which point at the module.
*/
-int ddebug_remove_module(const char *mod_name)
+static int ddebug_remove_module(const char *mod_name)
{
struct ddebug_table *dt, *nextdt;
int ret = -ENOENT;
- vpr_info("removing module \"%s\"\n", mod_name);
-
mutex_lock(&ddebug_lock);
list_for_each_entry_safe(dt, nextdt, &ddebug_tables, link) {
if (dt->mod_name == mod_name) {
@@ -974,9 +1332,38 @@ int ddebug_remove_module(const char *mod_name)
}
}
mutex_unlock(&ddebug_lock);
+ if (!ret)
+ v2pr_info("removed module \"%s\"\n", mod_name);
return ret;
}
+static int ddebug_module_notify(struct notifier_block *self, unsigned long val,
+ void *data)
+{
+ struct module *mod = data;
+ int ret = 0;
+
+ switch (val) {
+ case MODULE_STATE_COMING:
+ ret = ddebug_add_module(&mod->dyndbg_info, mod->name);
+ if (ret)
+ WARN(1, "Failed to allocate memory: dyndbg may not work properly.\n");
+ break;
+ case MODULE_STATE_GOING:
+ ddebug_remove_module(mod->name);
+ break;
+ }
+
+ return notifier_from_errno(ret);
+}
+
+static struct notifier_block ddebug_module_nb = {
+ .notifier_call = ddebug_module_notify,
+ .priority = 0, /* dynamic debug depends on jump label */
+};
+
+#endif /* CONFIG_MODULES */
+
static void ddebug_remove_all_tables(void)
{
mutex_lock(&ddebug_lock);
@@ -991,76 +1378,94 @@ static void ddebug_remove_all_tables(void)
static __initdata int ddebug_init_success;
-static int __init dynamic_debug_init_debugfs(void)
+static int __init dynamic_debug_init_control(void)
{
- struct dentry *dir, *file;
+ struct proc_dir_entry *procfs_dir;
+ struct dentry *debugfs_dir;
if (!ddebug_init_success)
return -ENODEV;
- dir = debugfs_create_dir("dynamic_debug", NULL);
- if (!dir)
- return -ENOMEM;
- file = debugfs_create_file("control", 0644, dir, NULL,
- &ddebug_proc_fops);
- if (!file) {
- debugfs_remove(dir);
- return -ENOMEM;
+ /* Create the control file in debugfs if it is enabled */
+ if (debugfs_initialized()) {
+ debugfs_dir = debugfs_create_dir("dynamic_debug", NULL);
+ debugfs_create_file("control", 0644, debugfs_dir, NULL,
+ &ddebug_proc_fops);
}
+
+ /* Also create the control file in procfs */
+ procfs_dir = proc_mkdir("dynamic_debug", NULL);
+ if (procfs_dir)
+ proc_create("control", 0644, procfs_dir, &proc_fops);
+
return 0;
}
static int __init dynamic_debug_init(void)
{
- struct _ddebug *iter, *iter_start;
- const char *modname = NULL;
+ struct _ddebug *iter, *iter_mod_start;
+ int ret, i, mod_sites, mod_ct;
+ const char *modname;
char *cmdline;
- int ret = 0;
- int n = 0, entries = 0, modct = 0;
- int verbose_bytes = 0;
- if (__start___verbose == __stop___verbose) {
- pr_warn("_ddebug table is empty in a CONFIG_DYNAMIC_DEBUG build\n");
- return 1;
+ struct _ddebug_info di = {
+ .descs = __start___dyndbg,
+ .classes = __start___dyndbg_classes,
+ .num_descs = __stop___dyndbg - __start___dyndbg,
+ .num_classes = __stop___dyndbg_classes - __start___dyndbg_classes,
+ };
+
+#ifdef CONFIG_MODULES
+ ret = register_module_notifier(&ddebug_module_nb);
+ if (ret) {
+ pr_warn("Failed to register dynamic debug module notifier\n");
+ return ret;
+ }
+#endif /* CONFIG_MODULES */
+
+ if (&__start___dyndbg == &__stop___dyndbg) {
+ if (IS_ENABLED(CONFIG_DYNAMIC_DEBUG)) {
+ pr_warn("_ddebug table is empty in a CONFIG_DYNAMIC_DEBUG build\n");
+ return 1;
+ }
+ pr_info("Ignore empty _ddebug table in a CONFIG_DYNAMIC_DEBUG_CORE build\n");
+ ddebug_init_success = 1;
+ return 0;
}
- iter = __start___verbose;
+
+ iter = iter_mod_start = __start___dyndbg;
modname = iter->modname;
- iter_start = iter;
- for (; iter < __stop___verbose; iter++) {
- entries++;
- verbose_bytes += strlen(iter->modname) + strlen(iter->function)
- + strlen(iter->filename) + strlen(iter->format);
+ i = mod_sites = mod_ct = 0;
+
+ for (; iter < __stop___dyndbg; iter++, i++, mod_sites++) {
if (strcmp(modname, iter->modname)) {
- modct++;
- ret = ddebug_add_module(iter_start, n, modname);
+ mod_ct++;
+ di.num_descs = mod_sites;
+ di.descs = iter_mod_start;
+ ret = ddebug_add_module(&di, modname);
if (ret)
goto out_err;
- n = 0;
+
+ mod_sites = 0;
modname = iter->modname;
- iter_start = iter;
+ iter_mod_start = iter;
}
- n++;
}
- ret = ddebug_add_module(iter_start, n, modname);
+ di.num_descs = mod_sites;
+ di.descs = iter_mod_start;
+ ret = ddebug_add_module(&di, modname);
if (ret)
goto out_err;
ddebug_init_success = 1;
- vpr_info("%d modules, %d entries and %d bytes in ddebug tables, %d bytes in (readonly) verbose section\n",
- modct, entries, (int)(modct * sizeof(struct ddebug_table)),
- verbose_bytes + (int)(__stop___verbose - __start___verbose));
-
- /* apply ddebug_query boot param, dont unload tables on err */
- if (ddebug_setup_string[0] != '\0') {
- pr_warn("ddebug_query param name is deprecated, change it to dyndbg\n");
- ret = ddebug_exec_queries(ddebug_setup_string, NULL);
- if (ret < 0)
- pr_warn("Invalid ddebug boot param %s\n",
- ddebug_setup_string);
- else
- pr_info("%d changes by ddebug_query\n", ret);
- }
+ vpr_info("%d prdebugs in %d modules, %d KiB in ddebug tables, %d kiB in __dyndbg section\n",
+ i, mod_ct, (int)((mod_ct * sizeof(struct ddebug_table)) >> 10),
+ (int)((i * sizeof(struct _ddebug)) >> 10));
+
+ if (di.num_classes)
+ v2pr_info(" %d builtin ddebug class-maps\n", di.num_classes);
+
/* now that ddebug tables are loaded, process all boot args
* again to find and activate queries given in dyndbg params.
* While this has already been done for known boot params, it
@@ -1083,4 +1488,4 @@ out_err:
early_initcall(dynamic_debug_init);
/* Debugfs setup must be done later */
-fs_initcall(dynamic_debug_init_debugfs);
+fs_initcall(dynamic_debug_init_control);
diff --git a/lib/dynamic_queue_limits.c b/lib/dynamic_queue_limits.c
index e659a027036e..fde0aa244148 100644
--- a/lib/dynamic_queue_limits.c
+++ b/lib/dynamic_queue_limits.c
@@ -60,8 +60,8 @@ void dql_completed(struct dql *dql, unsigned int count)
* A decrease is only considered if the queue has been busy in
* the whole interval (the check above).
*
- * If there is slack, the amount of execess data queued above
- * the the amount needed to prevent starvation, the queue limit
+ * If there is slack, the amount of excess data queued above
+ * the amount needed to prevent starvation, the queue limit
* can be decreased. To avoid hysteresis we consider the
* minimum amount of slack found over several iterations of the
* completion routine.
diff --git a/lib/earlycpio.c b/lib/earlycpio.c
index c001e084829e..d2c37d64fd0c 100644
--- a/lib/earlycpio.c
+++ b/lib/earlycpio.c
@@ -40,16 +40,16 @@ enum cpio_fields {
};
/**
- * cpio_data find_cpio_data - Search for files in an uncompressed cpio
+ * find_cpio_data - Search for files in an uncompressed cpio
* @path: The directory to search for, including a slash at the end
- * @data: Pointer to the the cpio archive or a header inside
+ * @data: Pointer to the cpio archive or a header inside
* @len: Remaining length of the cpio based on data pointer
* @nextoff: When a matching file is found, this is the offset from the
* beginning of the cpio to the beginning of the next file, not the
* matching file itself. It can be used to iterate through the cpio
* to find all files inside of a directory path.
*
- * @return: struct cpio_data containing the address, length and
+ * Return: &struct cpio_data containing the address, length and
* filename (with the directory path cut off) of the found file.
* If you search for a filename and not for files in a directory,
* pass the absolute path of the filename in the cpio and make sure
@@ -126,7 +126,7 @@ struct cpio_data find_cpio_data(const char *path, void *data,
"File %s exceeding MAX_CPIO_FILE_NAME [%d]\n",
p, MAX_CPIO_FILE_NAME);
}
- strlcpy(cd.name, p + mypathsize, MAX_CPIO_FILE_NAME);
+ strscpy(cd.name, p + mypathsize, MAX_CPIO_FILE_NAME);
cd.data = (void *)dptr;
cd.size = ch[C_FILESIZE];
diff --git a/lib/errname.c b/lib/errname.c
new file mode 100644
index 000000000000..4f9112b38f3a
--- /dev/null
+++ b/lib/errname.c
@@ -0,0 +1,225 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/build_bug.h>
+#include <linux/errno.h>
+#include <linux/errname.h>
+#include <linux/kernel.h>
+#include <linux/math.h>
+
+/*
+ * Ensure these tables do not accidentally become gigantic if some
+ * huge errno makes it in. On most architectures, the first table will
+ * only have about 140 entries, but mips and parisc have more sparsely
+ * allocated errnos (with EHWPOISON = 257 on parisc, and EDQUOT = 1133
+ * on mips), so this wastes a bit of space on those - though we
+ * special case the EDQUOT case.
+ */
+#define E(err) [err + BUILD_BUG_ON_ZERO(err <= 0 || err > 300)] = "-" #err
+static const char *names_0[] = {
+ E(E2BIG),
+ E(EACCES),
+ E(EADDRINUSE),
+ E(EADDRNOTAVAIL),
+ E(EADV),
+ E(EAFNOSUPPORT),
+ E(EAGAIN), /* EWOULDBLOCK */
+ E(EALREADY),
+ E(EBADE),
+ E(EBADF),
+ E(EBADFD),
+ E(EBADMSG),
+ E(EBADR),
+ E(EBADRQC),
+ E(EBADSLT),
+ E(EBFONT),
+ E(EBUSY),
+ E(ECANCELED), /* ECANCELLED */
+ E(ECHILD),
+ E(ECHRNG),
+ E(ECOMM),
+ E(ECONNABORTED),
+ E(ECONNREFUSED), /* EREFUSED */
+ E(ECONNRESET),
+ E(EDEADLK), /* EDEADLOCK */
+#if EDEADLK != EDEADLOCK /* mips, sparc, powerpc */
+ E(EDEADLOCK),
+#endif
+ E(EDESTADDRREQ),
+ E(EDOM),
+ E(EDOTDOT),
+#ifndef CONFIG_MIPS
+ E(EDQUOT),
+#endif
+ E(EEXIST),
+ E(EFAULT),
+ E(EFBIG),
+ E(EHOSTDOWN),
+ E(EHOSTUNREACH),
+ E(EHWPOISON),
+ E(EIDRM),
+ E(EILSEQ),
+#ifdef EINIT
+ E(EINIT),
+#endif
+ E(EINPROGRESS),
+ E(EINTR),
+ E(EINVAL),
+ E(EIO),
+ E(EISCONN),
+ E(EISDIR),
+ E(EISNAM),
+ E(EKEYEXPIRED),
+ E(EKEYREJECTED),
+ E(EKEYREVOKED),
+ E(EL2HLT),
+ E(EL2NSYNC),
+ E(EL3HLT),
+ E(EL3RST),
+ E(ELIBACC),
+ E(ELIBBAD),
+ E(ELIBEXEC),
+ E(ELIBMAX),
+ E(ELIBSCN),
+ E(ELNRNG),
+ E(ELOOP),
+ E(EMEDIUMTYPE),
+ E(EMFILE),
+ E(EMLINK),
+ E(EMSGSIZE),
+ E(EMULTIHOP),
+ E(ENAMETOOLONG),
+ E(ENAVAIL),
+ E(ENETDOWN),
+ E(ENETRESET),
+ E(ENETUNREACH),
+ E(ENFILE),
+ E(ENOANO),
+ E(ENOBUFS),
+ E(ENOCSI),
+ E(ENODATA),
+ E(ENODEV),
+ E(ENOENT),
+ E(ENOEXEC),
+ E(ENOKEY),
+ E(ENOLCK),
+ E(ENOLINK),
+ E(ENOMEDIUM),
+ E(ENOMEM),
+ E(ENOMSG),
+ E(ENONET),
+ E(ENOPKG),
+ E(ENOPROTOOPT),
+ E(ENOSPC),
+ E(ENOSR),
+ E(ENOSTR),
+ E(ENOSYS),
+ E(ENOTBLK),
+ E(ENOTCONN),
+ E(ENOTDIR),
+ E(ENOTEMPTY),
+ E(ENOTNAM),
+ E(ENOTRECOVERABLE),
+ E(ENOTSOCK),
+ E(ENOTTY),
+ E(ENOTUNIQ),
+ E(ENXIO),
+ E(EOPNOTSUPP),
+ E(EOVERFLOW),
+ E(EOWNERDEAD),
+ E(EPERM),
+ E(EPFNOSUPPORT),
+ E(EPIPE),
+#ifdef EPROCLIM
+ E(EPROCLIM),
+#endif
+ E(EPROTO),
+ E(EPROTONOSUPPORT),
+ E(EPROTOTYPE),
+ E(ERANGE),
+ E(EREMCHG),
+#ifdef EREMDEV
+ E(EREMDEV),
+#endif
+ E(EREMOTE),
+ E(EREMOTEIO),
+ E(ERESTART),
+ E(ERFKILL),
+ E(EROFS),
+#ifdef ERREMOTE
+ E(ERREMOTE),
+#endif
+ E(ESHUTDOWN),
+ E(ESOCKTNOSUPPORT),
+ E(ESPIPE),
+ E(ESRCH),
+ E(ESRMNT),
+ E(ESTALE),
+ E(ESTRPIPE),
+ E(ETIME),
+ E(ETIMEDOUT),
+ E(ETOOMANYREFS),
+ E(ETXTBSY),
+ E(EUCLEAN),
+ E(EUNATCH),
+ E(EUSERS),
+ E(EXDEV),
+ E(EXFULL),
+};
+#undef E
+
+#ifdef EREFUSED /* parisc */
+static_assert(EREFUSED == ECONNREFUSED);
+#endif
+#ifdef ECANCELLED /* parisc */
+static_assert(ECANCELLED == ECANCELED);
+#endif
+static_assert(EAGAIN == EWOULDBLOCK); /* everywhere */
+
+#define E(err) [err - 512 + BUILD_BUG_ON_ZERO(err < 512 || err > 550)] = "-" #err
+static const char *names_512[] = {
+ E(ERESTARTSYS),
+ E(ERESTARTNOINTR),
+ E(ERESTARTNOHAND),
+ E(ENOIOCTLCMD),
+ E(ERESTART_RESTARTBLOCK),
+ E(EPROBE_DEFER),
+ E(EOPENSTALE),
+ E(ENOPARAM),
+
+ E(EBADHANDLE),
+ E(ENOTSYNC),
+ E(EBADCOOKIE),
+ E(ENOTSUPP),
+ E(ETOOSMALL),
+ E(ESERVERFAULT),
+ E(EBADTYPE),
+ E(EJUKEBOX),
+ E(EIOCBQUEUED),
+ E(ERECALLCONFLICT),
+};
+#undef E
+
+static const char *__errname(unsigned err)
+{
+ if (err < ARRAY_SIZE(names_0))
+ return names_0[err];
+ if (err >= 512 && err - 512 < ARRAY_SIZE(names_512))
+ return names_512[err - 512];
+ /* But why? */
+ if (IS_ENABLED(CONFIG_MIPS) && err == EDQUOT) /* 1133 */
+ return "-EDQUOT";
+ return NULL;
+}
+
+/*
+ * errname(EIO) -> "EIO"
+ * errname(-EIO) -> "-EIO"
+ */
+const char *errname(int err)
+{
+ const char *name = __errname(abs(err));
+ if (!name)
+ return NULL;
+
+ return err > 0 ? name + 1 : name;
+}
+EXPORT_SYMBOL(errname);
diff --git a/lib/error-inject.c b/lib/error-inject.c
index aa63751c916f..887acd9a6ea6 100644
--- a/lib/error-inject.c
+++ b/lib/error-inject.c
@@ -8,6 +8,7 @@
#include <linux/mutex.h>
#include <linux/list.h>
#include <linux/slab.h>
+#include <asm/sections.h>
/* Whitelist of symbols that can be overridden for error injection. */
static LIST_HEAD(error_injection_list);
@@ -39,12 +40,18 @@ bool within_error_injection_list(unsigned long addr)
int get_injectable_error_type(unsigned long addr)
{
struct ei_entry *ent;
+ int ei_type = -EINVAL;
+ mutex_lock(&ei_mutex);
list_for_each_entry(ent, &error_injection_list, list) {
- if (addr >= ent->start_addr && addr < ent->end_addr)
- return ent->etype;
+ if (addr >= ent->start_addr && addr < ent->end_addr) {
+ ei_type = ent->etype;
+ break;
+ }
}
- return EI_ETYPE_NONE;
+ mutex_unlock(&ei_mutex);
+
+ return ei_type;
}
/*
@@ -64,7 +71,7 @@ static void populate_error_injection_list(struct error_injection_entry *start,
mutex_lock(&ei_mutex);
for (iter = start; iter < end; iter++) {
- entry = arch_deref_entry_point((void *)iter->addr);
+ entry = (unsigned long)dereference_symbol_descriptor((void *)iter->addr);
if (!kernel_text_address(entry) ||
!kallsyms_lookup_size_offset(entry, &size, &offset)) {
@@ -180,6 +187,8 @@ static const char *error_type_string(int etype)
return "ERRNO";
case EI_ETYPE_ERRNO_NULL:
return "ERRNO_NULL";
+ case EI_ETYPE_TRUE:
+ return "TRUE";
default:
return "(unknown)";
}
@@ -194,34 +203,22 @@ static int ei_seq_show(struct seq_file *m, void *v)
return 0;
}
-static const struct seq_operations ei_seq_ops = {
+static const struct seq_operations ei_sops = {
.start = ei_seq_start,
.next = ei_seq_next,
.stop = ei_seq_stop,
.show = ei_seq_show,
};
-static int ei_open(struct inode *inode, struct file *filp)
-{
- return seq_open(filp, &ei_seq_ops);
-}
-
-static const struct file_operations debugfs_ei_ops = {
- .open = ei_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = seq_release,
-};
+DEFINE_SEQ_ATTRIBUTE(ei);
static int __init ei_debugfs_init(void)
{
struct dentry *dir, *file;
dir = debugfs_create_dir("error_injection", NULL);
- if (!dir)
- return -ENOMEM;
- file = debugfs_create_file("list", 0444, dir, NULL, &debugfs_ei_ops);
+ file = debugfs_create_file("list", 0444, dir, NULL, &ei_fops);
if (!file) {
debugfs_remove(dir);
return -ENOMEM;
diff --git a/lib/errseq.c b/lib/errseq.c
index 81f9e33aa7e7..93e9b94358dc 100644
--- a/lib/errseq.c
+++ b/lib/errseq.c
@@ -3,6 +3,7 @@
#include <linux/bug.h>
#include <linux/atomic.h>
#include <linux/errseq.h>
+#include <linux/log2.h>
/*
* An errseq_t is a way of recording errors in one place, and allowing any
diff --git a/lib/extable.c b/lib/extable.c
index 25da4071122a..9c9f40bd2b3d 100644
--- a/lib/extable.c
+++ b/lib/extable.c
@@ -10,6 +10,7 @@
#include <linux/init.h>
#include <linux/sort.h>
#include <linux/uaccess.h>
+#include <linux/extable.h>
#ifndef ARCH_HAS_RELATIVE_EXTABLE
#define ex_to_insn(x) ((x)->insn)
@@ -20,7 +21,6 @@ static inline unsigned long ex_to_insn(const struct exception_table_entry *x)
}
#endif
-#ifndef ARCH_HAS_SORT_EXTABLE
#ifndef ARCH_HAS_RELATIVE_EXTABLE
#define swap_ex NULL
#else
@@ -87,9 +87,6 @@ void trim_init_extable(struct module *m)
m->num_exentries--;
}
#endif /* CONFIG_MODULES */
-#endif /* !ARCH_HAS_SORT_EXTABLE */
-
-#ifndef ARCH_HAS_SEARCH_EXTABLE
static int cmp_ex_search(const void *key, const void *elt)
{
@@ -119,4 +116,3 @@ search_extable(const struct exception_table_entry *base,
return bsearch(&value, base, num,
sizeof(struct exception_table_entry), cmp_ex_search);
}
-#endif
diff --git a/lib/fault-inject-usercopy.c b/lib/fault-inject-usercopy.c
new file mode 100644
index 000000000000..77558b6c29ca
--- /dev/null
+++ b/lib/fault-inject-usercopy.c
@@ -0,0 +1,39 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/fault-inject.h>
+#include <linux/fault-inject-usercopy.h>
+
+static struct {
+ struct fault_attr attr;
+} fail_usercopy = {
+ .attr = FAULT_ATTR_INITIALIZER,
+};
+
+static int __init setup_fail_usercopy(char *str)
+{
+ return setup_fault_attr(&fail_usercopy.attr, str);
+}
+__setup("fail_usercopy=", setup_fail_usercopy);
+
+#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
+
+static int __init fail_usercopy_debugfs(void)
+{
+ struct dentry *dir;
+
+ dir = fault_create_debugfs_attr("fail_usercopy", NULL,
+ &fail_usercopy.attr);
+ if (IS_ERR(dir))
+ return PTR_ERR(dir);
+
+ return 0;
+}
+
+late_initcall(fail_usercopy_debugfs);
+
+#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
+
+bool should_fail_usercopy(void)
+{
+ return should_fail(&fail_usercopy.attr, 1);
+}
+EXPORT_SYMBOL_GPL(should_fail_usercopy);
diff --git a/lib/fault-inject.c b/lib/fault-inject.c
index 3cb21b2bf088..d608f9b48c10 100644
--- a/lib/fault-inject.c
+++ b/lib/fault-inject.c
@@ -71,7 +71,7 @@ static bool fail_stacktrace(struct fault_attr *attr)
int n, nr_entries;
bool found = (attr->require_start == 0 && attr->require_end == ULONG_MAX);
- if (depth == 0)
+ if (depth == 0 || (found && !attr->reject_start && !attr->reject_end))
return found;
nr_entries = stack_trace_save(entries, depth, 1);
@@ -100,13 +100,21 @@ static inline bool fail_stacktrace(struct fault_attr *attr)
* http://www.nongnu.org/failmalloc/
*/
-bool should_fail(struct fault_attr *attr, ssize_t size)
+bool should_fail_ex(struct fault_attr *attr, ssize_t size, int flags)
{
+ bool stack_checked = false;
+
if (in_task()) {
unsigned int fail_nth = READ_ONCE(current->fail_nth);
if (fail_nth) {
- if (!WRITE_ONCE(current->fail_nth, fail_nth - 1))
+ if (!fail_stacktrace(attr))
+ return false;
+
+ stack_checked = true;
+ fail_nth--;
+ WRITE_ONCE(current->fail_nth, fail_nth);
+ if (!fail_nth)
goto fail;
return false;
@@ -123,6 +131,9 @@ bool should_fail(struct fault_attr *attr, ssize_t size)
if (atomic_read(&attr->times) == 0)
return false;
+ if (!stack_checked && !fail_stacktrace(attr))
+ return false;
+
if (atomic_read(&attr->space) > size) {
atomic_sub(size, &attr->space);
return false;
@@ -134,20 +145,23 @@ bool should_fail(struct fault_attr *attr, ssize_t size)
return false;
}
- if (attr->probability <= prandom_u32() % 100)
- return false;
-
- if (!fail_stacktrace(attr))
+ if (attr->probability <= get_random_u32_below(100))
return false;
fail:
- fail_dump(attr);
+ if (!(flags & FAULT_NOWARN))
+ fail_dump(attr);
if (atomic_read(&attr->times) != -1)
atomic_dec_not_zero(&attr->times);
return true;
}
+
+bool should_fail(struct fault_attr *attr, ssize_t size)
+{
+ return should_fail_ex(attr, size, 0);
+}
EXPORT_SYMBOL_GPL(should_fail);
#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
@@ -166,10 +180,10 @@ static int debugfs_ul_get(void *data, u64 *val)
DEFINE_SIMPLE_ATTRIBUTE(fops_ul, debugfs_ul_get, debugfs_ul_set, "%llu\n");
-static struct dentry *debugfs_create_ul(const char *name, umode_t mode,
- struct dentry *parent, unsigned long *value)
+static void debugfs_create_ul(const char *name, umode_t mode,
+ struct dentry *parent, unsigned long *value)
{
- return debugfs_create_file(name, mode, parent, value, &fops_ul);
+ debugfs_create_file(name, mode, parent, value, &fops_ul);
}
#ifdef CONFIG_FAULT_INJECTION_STACKTRACE_FILTER
@@ -185,12 +199,11 @@ static int debugfs_stacktrace_depth_set(void *data, u64 val)
DEFINE_SIMPLE_ATTRIBUTE(fops_stacktrace_depth, debugfs_ul_get,
debugfs_stacktrace_depth_set, "%llu\n");
-static struct dentry *debugfs_create_stacktrace_depth(
- const char *name, umode_t mode,
- struct dentry *parent, unsigned long *value)
+static void debugfs_create_stacktrace_depth(const char *name, umode_t mode,
+ struct dentry *parent,
+ unsigned long *value)
{
- return debugfs_create_file(name, mode, parent, value,
- &fops_stacktrace_depth);
+ debugfs_create_file(name, mode, parent, value, &fops_stacktrace_depth);
}
#endif /* CONFIG_FAULT_INJECTION_STACKTRACE_FILTER */
@@ -202,52 +215,223 @@ struct dentry *fault_create_debugfs_attr(const char *name,
struct dentry *dir;
dir = debugfs_create_dir(name, parent);
- if (!dir)
- return ERR_PTR(-ENOMEM);
-
- if (!debugfs_create_ul("probability", mode, dir, &attr->probability))
- goto fail;
- if (!debugfs_create_ul("interval", mode, dir, &attr->interval))
- goto fail;
- if (!debugfs_create_atomic_t("times", mode, dir, &attr->times))
- goto fail;
- if (!debugfs_create_atomic_t("space", mode, dir, &attr->space))
- goto fail;
- if (!debugfs_create_ul("verbose", mode, dir, &attr->verbose))
- goto fail;
- if (!debugfs_create_u32("verbose_ratelimit_interval_ms", mode, dir,
- &attr->ratelimit_state.interval))
- goto fail;
- if (!debugfs_create_u32("verbose_ratelimit_burst", mode, dir,
- &attr->ratelimit_state.burst))
- goto fail;
- if (!debugfs_create_bool("task-filter", mode, dir, &attr->task_filter))
- goto fail;
+ if (IS_ERR(dir))
+ return dir;
+
+ debugfs_create_ul("probability", mode, dir, &attr->probability);
+ debugfs_create_ul("interval", mode, dir, &attr->interval);
+ debugfs_create_atomic_t("times", mode, dir, &attr->times);
+ debugfs_create_atomic_t("space", mode, dir, &attr->space);
+ debugfs_create_ul("verbose", mode, dir, &attr->verbose);
+ debugfs_create_u32("verbose_ratelimit_interval_ms", mode, dir,
+ &attr->ratelimit_state.interval);
+ debugfs_create_u32("verbose_ratelimit_burst", mode, dir,
+ &attr->ratelimit_state.burst);
+ debugfs_create_bool("task-filter", mode, dir, &attr->task_filter);
#ifdef CONFIG_FAULT_INJECTION_STACKTRACE_FILTER
-
- if (!debugfs_create_stacktrace_depth("stacktrace-depth", mode, dir,
- &attr->stacktrace_depth))
- goto fail;
- if (!debugfs_create_ul("require-start", mode, dir,
- &attr->require_start))
- goto fail;
- if (!debugfs_create_ul("require-end", mode, dir, &attr->require_end))
- goto fail;
- if (!debugfs_create_ul("reject-start", mode, dir, &attr->reject_start))
- goto fail;
- if (!debugfs_create_ul("reject-end", mode, dir, &attr->reject_end))
- goto fail;
-
+ debugfs_create_stacktrace_depth("stacktrace-depth", mode, dir,
+ &attr->stacktrace_depth);
+ debugfs_create_xul("require-start", mode, dir, &attr->require_start);
+ debugfs_create_xul("require-end", mode, dir, &attr->require_end);
+ debugfs_create_xul("reject-start", mode, dir, &attr->reject_start);
+ debugfs_create_xul("reject-end", mode, dir, &attr->reject_end);
#endif /* CONFIG_FAULT_INJECTION_STACKTRACE_FILTER */
attr->dname = dget(dir);
return dir;
-fail:
- debugfs_remove_recursive(dir);
-
- return ERR_PTR(-ENOMEM);
}
EXPORT_SYMBOL_GPL(fault_create_debugfs_attr);
#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
+
+#ifdef CONFIG_FAULT_INJECTION_CONFIGFS
+
+/* These configfs attribute utilities are copied from drivers/block/null_blk/main.c */
+
+static ssize_t fault_uint_attr_show(unsigned int val, char *page)
+{
+ return snprintf(page, PAGE_SIZE, "%u\n", val);
+}
+
+static ssize_t fault_ulong_attr_show(unsigned long val, char *page)
+{
+ return snprintf(page, PAGE_SIZE, "%lu\n", val);
+}
+
+static ssize_t fault_bool_attr_show(bool val, char *page)
+{
+ return snprintf(page, PAGE_SIZE, "%u\n", val);
+}
+
+static ssize_t fault_atomic_t_attr_show(atomic_t val, char *page)
+{
+ return snprintf(page, PAGE_SIZE, "%d\n", atomic_read(&val));
+}
+
+static ssize_t fault_uint_attr_store(unsigned int *val, const char *page, size_t count)
+{
+ unsigned int tmp;
+ int result;
+
+ result = kstrtouint(page, 0, &tmp);
+ if (result < 0)
+ return result;
+
+ *val = tmp;
+ return count;
+}
+
+static ssize_t fault_ulong_attr_store(unsigned long *val, const char *page, size_t count)
+{
+ int result;
+ unsigned long tmp;
+
+ result = kstrtoul(page, 0, &tmp);
+ if (result < 0)
+ return result;
+
+ *val = tmp;
+ return count;
+}
+
+static ssize_t fault_bool_attr_store(bool *val, const char *page, size_t count)
+{
+ bool tmp;
+ int result;
+
+ result = kstrtobool(page, &tmp);
+ if (result < 0)
+ return result;
+
+ *val = tmp;
+ return count;
+}
+
+static ssize_t fault_atomic_t_attr_store(atomic_t *val, const char *page, size_t count)
+{
+ int tmp;
+ int result;
+
+ result = kstrtoint(page, 0, &tmp);
+ if (result < 0)
+ return result;
+
+ atomic_set(val, tmp);
+ return count;
+}
+
+#define CONFIGFS_ATTR_NAMED(_pfx, _name, _attr_name) \
+static struct configfs_attribute _pfx##attr_##_name = { \
+ .ca_name = _attr_name, \
+ .ca_mode = 0644, \
+ .ca_owner = THIS_MODULE, \
+ .show = _pfx##_name##_show, \
+ .store = _pfx##_name##_store, \
+}
+
+static struct fault_config *to_fault_config(struct config_item *item)
+{
+ return container_of(to_config_group(item), struct fault_config, group);
+}
+
+#define FAULT_CONFIGFS_ATTR_NAMED(NAME, ATTR_NAME, MEMBER, TYPE) \
+static ssize_t fault_##NAME##_show(struct config_item *item, char *page) \
+{ \
+ return fault_##TYPE##_attr_show(to_fault_config(item)->attr.MEMBER, page); \
+} \
+static ssize_t fault_##NAME##_store(struct config_item *item, const char *page, size_t count) \
+{ \
+ struct fault_config *config = to_fault_config(item); \
+ return fault_##TYPE##_attr_store(&config->attr.MEMBER, page, count); \
+} \
+CONFIGFS_ATTR_NAMED(fault_, NAME, ATTR_NAME)
+
+#define FAULT_CONFIGFS_ATTR(NAME, TYPE) \
+ FAULT_CONFIGFS_ATTR_NAMED(NAME, __stringify(NAME), NAME, TYPE)
+
+FAULT_CONFIGFS_ATTR(probability, ulong);
+FAULT_CONFIGFS_ATTR(interval, ulong);
+FAULT_CONFIGFS_ATTR(times, atomic_t);
+FAULT_CONFIGFS_ATTR(space, atomic_t);
+FAULT_CONFIGFS_ATTR(verbose, ulong);
+FAULT_CONFIGFS_ATTR_NAMED(ratelimit_interval, "verbose_ratelimit_interval_ms",
+ ratelimit_state.interval, uint);
+FAULT_CONFIGFS_ATTR_NAMED(ratelimit_burst, "verbose_ratelimit_burst",
+ ratelimit_state.burst, uint);
+FAULT_CONFIGFS_ATTR_NAMED(task_filter, "task-filter", task_filter, bool);
+
+#ifdef CONFIG_FAULT_INJECTION_STACKTRACE_FILTER
+
+static ssize_t fault_stacktrace_depth_show(struct config_item *item, char *page)
+{
+ return fault_ulong_attr_show(to_fault_config(item)->attr.stacktrace_depth, page);
+}
+
+static ssize_t fault_stacktrace_depth_store(struct config_item *item, const char *page,
+ size_t count)
+{
+ int result;
+ unsigned long tmp;
+
+ result = kstrtoul(page, 0, &tmp);
+ if (result < 0)
+ return result;
+
+ to_fault_config(item)->attr.stacktrace_depth =
+ min_t(unsigned long, tmp, MAX_STACK_TRACE_DEPTH);
+
+ return count;
+}
+
+CONFIGFS_ATTR_NAMED(fault_, stacktrace_depth, "stacktrace-depth");
+
+static ssize_t fault_xul_attr_show(unsigned long val, char *page)
+{
+ return snprintf(page, PAGE_SIZE,
+ sizeof(val) == sizeof(u32) ? "0x%08lx\n" : "0x%016lx\n", val);
+}
+
+static ssize_t fault_xul_attr_store(unsigned long *val, const char *page, size_t count)
+{
+ return fault_ulong_attr_store(val, page, count);
+}
+
+FAULT_CONFIGFS_ATTR_NAMED(require_start, "require-start", require_start, xul);
+FAULT_CONFIGFS_ATTR_NAMED(require_end, "require-end", require_end, xul);
+FAULT_CONFIGFS_ATTR_NAMED(reject_start, "reject-start", reject_start, xul);
+FAULT_CONFIGFS_ATTR_NAMED(reject_end, "reject-end", reject_end, xul);
+
+#endif /* CONFIG_FAULT_INJECTION_STACKTRACE_FILTER */
+
+static struct configfs_attribute *fault_config_attrs[] = {
+ &fault_attr_probability,
+ &fault_attr_interval,
+ &fault_attr_times,
+ &fault_attr_space,
+ &fault_attr_verbose,
+ &fault_attr_ratelimit_interval,
+ &fault_attr_ratelimit_burst,
+ &fault_attr_task_filter,
+#ifdef CONFIG_FAULT_INJECTION_STACKTRACE_FILTER
+ &fault_attr_stacktrace_depth,
+ &fault_attr_require_start,
+ &fault_attr_require_end,
+ &fault_attr_reject_start,
+ &fault_attr_reject_end,
+#endif /* CONFIG_FAULT_INJECTION_STACKTRACE_FILTER */
+ NULL,
+};
+
+static const struct config_item_type fault_config_type = {
+ .ct_attrs = fault_config_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+void fault_config_init(struct fault_config *config, const char *name)
+{
+ config_group_init_type_name(&config->group, name, &fault_config_type);
+}
+EXPORT_SYMBOL_GPL(fault_config_init);
+
+#endif /* CONFIG_FAULT_INJECTION_CONFIGFS */
diff --git a/lib/fdt_addresses.c b/lib/fdt_addresses.c
new file mode 100644
index 000000000000..23610bcf390b
--- /dev/null
+++ b/lib/fdt_addresses.c
@@ -0,0 +1,2 @@
+#include <linux/libfdt_env.h>
+#include "../scripts/dtc/libfdt/fdt_addresses.c"
diff --git a/lib/find_bit.c b/lib/find_bit.c
index 5c51eb45178a..32f99e9a670e 100644
--- a/lib/find_bit.c
+++ b/lib/find_bit.c
@@ -15,120 +15,193 @@
#include <linux/bitops.h>
#include <linux/bitmap.h>
#include <linux/export.h>
-#include <linux/kernel.h>
-
-#if !defined(find_next_bit) || !defined(find_next_zero_bit) || \
- !defined(find_next_and_bit)
+#include <linux/math.h>
+#include <linux/minmax.h>
+#include <linux/swab.h>
/*
- * This is a common helper function for find_next_bit, find_next_zero_bit, and
- * find_next_and_bit. The differences are:
- * - The "invert" argument, which is XORed with each fetched word before
- * searching it for one bits.
- * - The optional "addr2", which is anded with "addr1" if present.
+ * Common helper for find_bit() function family
+ * @FETCH: The expression that fetches and pre-processes each word of bitmap(s)
+ * @MUNGE: The expression that post-processes a word containing found bit (may be empty)
+ * @size: The bitmap size in bits
*/
-static inline unsigned long _find_next_bit(const unsigned long *addr1,
- const unsigned long *addr2, unsigned long nbits,
- unsigned long start, unsigned long invert)
-{
- unsigned long tmp;
-
- if (unlikely(start >= nbits))
- return nbits;
-
- tmp = addr1[start / BITS_PER_LONG];
- if (addr2)
- tmp &= addr2[start / BITS_PER_LONG];
- tmp ^= invert;
-
- /* Handle 1st word. */
- tmp &= BITMAP_FIRST_WORD_MASK(start);
- start = round_down(start, BITS_PER_LONG);
+#define FIND_FIRST_BIT(FETCH, MUNGE, size) \
+({ \
+ unsigned long idx, val, sz = (size); \
+ \
+ for (idx = 0; idx * BITS_PER_LONG < sz; idx++) { \
+ val = (FETCH); \
+ if (val) { \
+ sz = min(idx * BITS_PER_LONG + __ffs(MUNGE(val)), sz); \
+ break; \
+ } \
+ } \
+ \
+ sz; \
+})
- while (!tmp) {
- start += BITS_PER_LONG;
- if (start >= nbits)
- return nbits;
-
- tmp = addr1[start / BITS_PER_LONG];
- if (addr2)
- tmp &= addr2[start / BITS_PER_LONG];
- tmp ^= invert;
- }
+/*
+ * Common helper for find_next_bit() function family
+ * @FETCH: The expression that fetches and pre-processes each word of bitmap(s)
+ * @MUNGE: The expression that post-processes a word containing found bit (may be empty)
+ * @size: The bitmap size in bits
+ * @start: The bitnumber to start searching at
+ */
+#define FIND_NEXT_BIT(FETCH, MUNGE, size, start) \
+({ \
+ unsigned long mask, idx, tmp, sz = (size), __start = (start); \
+ \
+ if (unlikely(__start >= sz)) \
+ goto out; \
+ \
+ mask = MUNGE(BITMAP_FIRST_WORD_MASK(__start)); \
+ idx = __start / BITS_PER_LONG; \
+ \
+ for (tmp = (FETCH) & mask; !tmp; tmp = (FETCH)) { \
+ if ((idx + 1) * BITS_PER_LONG >= sz) \
+ goto out; \
+ idx++; \
+ } \
+ \
+ sz = min(idx * BITS_PER_LONG + __ffs(MUNGE(tmp)), sz); \
+out: \
+ sz; \
+})
+
+#define FIND_NTH_BIT(FETCH, size, num) \
+({ \
+ unsigned long sz = (size), nr = (num), idx, w, tmp; \
+ \
+ for (idx = 0; (idx + 1) * BITS_PER_LONG <= sz; idx++) { \
+ if (idx * BITS_PER_LONG + nr >= sz) \
+ goto out; \
+ \
+ tmp = (FETCH); \
+ w = hweight_long(tmp); \
+ if (w > nr) \
+ goto found; \
+ \
+ nr -= w; \
+ } \
+ \
+ if (sz % BITS_PER_LONG) \
+ tmp = (FETCH) & BITMAP_LAST_WORD_MASK(sz); \
+found: \
+ sz = min(idx * BITS_PER_LONG + fns(tmp, nr), sz); \
+out: \
+ sz; \
+})
- return min(start + __ffs(tmp), nbits);
+#ifndef find_first_bit
+/*
+ * Find the first set bit in a memory region.
+ */
+unsigned long _find_first_bit(const unsigned long *addr, unsigned long size)
+{
+ return FIND_FIRST_BIT(addr[idx], /* nop */, size);
}
+EXPORT_SYMBOL(_find_first_bit);
#endif
-#ifndef find_next_bit
+#ifndef find_first_and_bit
/*
- * Find the next set bit in a memory region.
+ * Find the first set bit in two memory regions.
*/
-unsigned long find_next_bit(const unsigned long *addr, unsigned long size,
- unsigned long offset)
+unsigned long _find_first_and_bit(const unsigned long *addr1,
+ const unsigned long *addr2,
+ unsigned long size)
{
- return _find_next_bit(addr, NULL, size, offset, 0UL);
+ return FIND_FIRST_BIT(addr1[idx] & addr2[idx], /* nop */, size);
}
-EXPORT_SYMBOL(find_next_bit);
+EXPORT_SYMBOL(_find_first_and_bit);
#endif
-#ifndef find_next_zero_bit
-unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size,
- unsigned long offset)
+#ifndef find_first_zero_bit
+/*
+ * Find the first cleared bit in a memory region.
+ */
+unsigned long _find_first_zero_bit(const unsigned long *addr, unsigned long size)
{
- return _find_next_bit(addr, NULL, size, offset, ~0UL);
+ return FIND_FIRST_BIT(~addr[idx], /* nop */, size);
}
-EXPORT_SYMBOL(find_next_zero_bit);
+EXPORT_SYMBOL(_find_first_zero_bit);
#endif
-#if !defined(find_next_and_bit)
-unsigned long find_next_and_bit(const unsigned long *addr1,
- const unsigned long *addr2, unsigned long size,
- unsigned long offset)
+#ifndef find_next_bit
+unsigned long _find_next_bit(const unsigned long *addr, unsigned long nbits, unsigned long start)
{
- return _find_next_bit(addr1, addr2, size, offset, 0UL);
+ return FIND_NEXT_BIT(addr[idx], /* nop */, nbits, start);
}
-EXPORT_SYMBOL(find_next_and_bit);
+EXPORT_SYMBOL(_find_next_bit);
#endif
-#ifndef find_first_bit
-/*
- * Find the first set bit in a memory region.
- */
-unsigned long find_first_bit(const unsigned long *addr, unsigned long size)
+unsigned long __find_nth_bit(const unsigned long *addr, unsigned long size, unsigned long n)
{
- unsigned long idx;
+ return FIND_NTH_BIT(addr[idx], size, n);
+}
+EXPORT_SYMBOL(__find_nth_bit);
- for (idx = 0; idx * BITS_PER_LONG < size; idx++) {
- if (addr[idx])
- return min(idx * BITS_PER_LONG + __ffs(addr[idx]), size);
- }
+unsigned long __find_nth_and_bit(const unsigned long *addr1, const unsigned long *addr2,
+ unsigned long size, unsigned long n)
+{
+ return FIND_NTH_BIT(addr1[idx] & addr2[idx], size, n);
+}
+EXPORT_SYMBOL(__find_nth_and_bit);
- return size;
+unsigned long __find_nth_andnot_bit(const unsigned long *addr1, const unsigned long *addr2,
+ unsigned long size, unsigned long n)
+{
+ return FIND_NTH_BIT(addr1[idx] & ~addr2[idx], size, n);
+}
+EXPORT_SYMBOL(__find_nth_andnot_bit);
+
+unsigned long __find_nth_and_andnot_bit(const unsigned long *addr1,
+ const unsigned long *addr2,
+ const unsigned long *addr3,
+ unsigned long size, unsigned long n)
+{
+ return FIND_NTH_BIT(addr1[idx] & addr2[idx] & ~addr3[idx], size, n);
}
-EXPORT_SYMBOL(find_first_bit);
+EXPORT_SYMBOL(__find_nth_and_andnot_bit);
+
+#ifndef find_next_and_bit
+unsigned long _find_next_and_bit(const unsigned long *addr1, const unsigned long *addr2,
+ unsigned long nbits, unsigned long start)
+{
+ return FIND_NEXT_BIT(addr1[idx] & addr2[idx], /* nop */, nbits, start);
+}
+EXPORT_SYMBOL(_find_next_and_bit);
#endif
-#ifndef find_first_zero_bit
-/*
- * Find the first cleared bit in a memory region.
- */
-unsigned long find_first_zero_bit(const unsigned long *addr, unsigned long size)
+#ifndef find_next_andnot_bit
+unsigned long _find_next_andnot_bit(const unsigned long *addr1, const unsigned long *addr2,
+ unsigned long nbits, unsigned long start)
{
- unsigned long idx;
+ return FIND_NEXT_BIT(addr1[idx] & ~addr2[idx], /* nop */, nbits, start);
+}
+EXPORT_SYMBOL(_find_next_andnot_bit);
+#endif
- for (idx = 0; idx * BITS_PER_LONG < size; idx++) {
- if (addr[idx] != ~0UL)
- return min(idx * BITS_PER_LONG + ffz(addr[idx]), size);
- }
+#ifndef find_next_or_bit
+unsigned long _find_next_or_bit(const unsigned long *addr1, const unsigned long *addr2,
+ unsigned long nbits, unsigned long start)
+{
+ return FIND_NEXT_BIT(addr1[idx] | addr2[idx], /* nop */, nbits, start);
+}
+EXPORT_SYMBOL(_find_next_or_bit);
+#endif
- return size;
+#ifndef find_next_zero_bit
+unsigned long _find_next_zero_bit(const unsigned long *addr, unsigned long nbits,
+ unsigned long start)
+{
+ return FIND_NEXT_BIT(~addr[idx], /* nop */, nbits, start);
}
-EXPORT_SYMBOL(find_first_zero_bit);
+EXPORT_SYMBOL(_find_next_zero_bit);
#endif
#ifndef find_last_bit
-unsigned long find_last_bit(const unsigned long *addr, unsigned long size)
+unsigned long _find_last_bit(const unsigned long *addr, unsigned long size)
{
if (size) {
unsigned long val = BITMAP_LAST_WORD_MASK(size);
@@ -144,73 +217,54 @@ unsigned long find_last_bit(const unsigned long *addr, unsigned long size)
}
return size;
}
-EXPORT_SYMBOL(find_last_bit);
+EXPORT_SYMBOL(_find_last_bit);
#endif
-#ifdef __BIG_ENDIAN
-
-/* include/linux/byteorder does not support "unsigned long" type */
-static inline unsigned long ext2_swab(const unsigned long y)
-{
-#if BITS_PER_LONG == 64
- return (unsigned long) __swab64((u64) y);
-#elif BITS_PER_LONG == 32
- return (unsigned long) __swab32((u32) y);
-#else
-#error BITS_PER_LONG not defined
-#endif
-}
-
-#if !defined(find_next_bit_le) || !defined(find_next_zero_bit_le)
-static inline unsigned long _find_next_bit_le(const unsigned long *addr1,
- const unsigned long *addr2, unsigned long nbits,
- unsigned long start, unsigned long invert)
+unsigned long find_next_clump8(unsigned long *clump, const unsigned long *addr,
+ unsigned long size, unsigned long offset)
{
- unsigned long tmp;
-
- if (unlikely(start >= nbits))
- return nbits;
-
- tmp = addr1[start / BITS_PER_LONG];
- if (addr2)
- tmp &= addr2[start / BITS_PER_LONG];
- tmp ^= invert;
+ offset = find_next_bit(addr, size, offset);
+ if (offset == size)
+ return size;
- /* Handle 1st word. */
- tmp &= ext2_swab(BITMAP_FIRST_WORD_MASK(start));
- start = round_down(start, BITS_PER_LONG);
+ offset = round_down(offset, 8);
+ *clump = bitmap_get_value8(addr, offset);
- while (!tmp) {
- start += BITS_PER_LONG;
- if (start >= nbits)
- return nbits;
+ return offset;
+}
+EXPORT_SYMBOL(find_next_clump8);
- tmp = addr1[start / BITS_PER_LONG];
- if (addr2)
- tmp &= addr2[start / BITS_PER_LONG];
- tmp ^= invert;
- }
+#ifdef __BIG_ENDIAN
- return min(start + __ffs(ext2_swab(tmp)), nbits);
+#ifndef find_first_zero_bit_le
+/*
+ * Find the first cleared bit in an LE memory region.
+ */
+unsigned long _find_first_zero_bit_le(const unsigned long *addr, unsigned long size)
+{
+ return FIND_FIRST_BIT(~addr[idx], swab, size);
}
+EXPORT_SYMBOL(_find_first_zero_bit_le);
+
#endif
#ifndef find_next_zero_bit_le
-unsigned long find_next_zero_bit_le(const void *addr, unsigned
- long size, unsigned long offset)
+unsigned long _find_next_zero_bit_le(const unsigned long *addr,
+ unsigned long size, unsigned long offset)
{
- return _find_next_bit_le(addr, NULL, size, offset, ~0UL);
+ return FIND_NEXT_BIT(~addr[idx], swab, size, offset);
}
-EXPORT_SYMBOL(find_next_zero_bit_le);
+EXPORT_SYMBOL(_find_next_zero_bit_le);
#endif
#ifndef find_next_bit_le
-unsigned long find_next_bit_le(const void *addr, unsigned
- long size, unsigned long offset)
+unsigned long _find_next_bit_le(const unsigned long *addr,
+ unsigned long size, unsigned long offset)
{
- return _find_next_bit_le(addr, NULL, size, offset, 0UL);
+ return FIND_NEXT_BIT(addr[idx], swab, size, offset);
}
-EXPORT_SYMBOL(find_next_bit_le);
+EXPORT_SYMBOL(_find_next_bit_le);
+
#endif
#endif /* __BIG_ENDIAN */
diff --git a/lib/find_bit_benchmark.c b/lib/find_bit_benchmark.c
index 5637c5711db9..d3fb09e6eff1 100644
--- a/lib/find_bit_benchmark.c
+++ b/lib/find_bit_benchmark.c
@@ -49,6 +49,25 @@ static int __init test_find_first_bit(void *bitmap, unsigned long len)
return 0;
}
+static int __init test_find_first_and_bit(void *bitmap, const void *bitmap2, unsigned long len)
+{
+ static DECLARE_BITMAP(cp, BITMAP_LEN) __initdata;
+ unsigned long i, cnt;
+ ktime_t time;
+
+ bitmap_copy(cp, bitmap, BITMAP_LEN);
+
+ time = ktime_get();
+ for (cnt = i = 0; i < len; cnt++) {
+ i = find_first_and_bit(cp, bitmap2, len);
+ __clear_bit(i, cp);
+ }
+ time = ktime_get() - time;
+ pr_err("find_first_and_bit: %18llu ns, %6ld iterations\n", time, cnt);
+
+ return 0;
+}
+
static int __init test_find_next_bit(const void *bitmap, unsigned long len)
{
unsigned long i, cnt;
@@ -96,6 +115,22 @@ static int __init test_find_last_bit(const void *bitmap, unsigned long len)
return 0;
}
+static int __init test_find_nth_bit(const unsigned long *bitmap, unsigned long len)
+{
+ unsigned long l, n, w = bitmap_weight(bitmap, len);
+ ktime_t time;
+
+ time = ktime_get();
+ for (n = 0; n < w; n++) {
+ l = find_nth_bit(bitmap, len, n);
+ WARN_ON(l >= len);
+ }
+ time = ktime_get() - time;
+ pr_err("find_nth_bit: %18llu ns, %6ld iterations\n", time, w);
+
+ return 0;
+}
+
static int __init test_find_next_and_bit(const void *bitmap,
const void *bitmap2, unsigned long len)
{
@@ -123,12 +158,14 @@ static int __init find_bit_test(void)
test_find_next_bit(bitmap, BITMAP_LEN);
test_find_next_zero_bit(bitmap, BITMAP_LEN);
test_find_last_bit(bitmap, BITMAP_LEN);
+ test_find_nth_bit(bitmap, BITMAP_LEN / 10);
/*
* test_find_first_bit() may take some time, so
* traverse only part of bitmap to avoid soft lockup.
*/
test_find_first_bit(bitmap, BITMAP_LEN / 10);
+ test_find_first_and_bit(bitmap, bitmap2, BITMAP_LEN / 2);
test_find_next_and_bit(bitmap, bitmap2, BITMAP_LEN);
pr_err("\nStart testing find_bit() with sparse bitmap\n");
@@ -137,14 +174,16 @@ static int __init find_bit_test(void)
bitmap_zero(bitmap2, BITMAP_LEN);
while (nbits--) {
- __set_bit(prandom_u32() % BITMAP_LEN, bitmap);
- __set_bit(prandom_u32() % BITMAP_LEN, bitmap2);
+ __set_bit(get_random_u32_below(BITMAP_LEN), bitmap);
+ __set_bit(get_random_u32_below(BITMAP_LEN), bitmap2);
}
test_find_next_bit(bitmap, BITMAP_LEN);
test_find_next_zero_bit(bitmap, BITMAP_LEN);
test_find_last_bit(bitmap, BITMAP_LEN);
+ test_find_nth_bit(bitmap, BITMAP_LEN);
test_find_first_bit(bitmap, BITMAP_LEN);
+ test_find_first_and_bit(bitmap, bitmap2, BITMAP_LEN);
test_find_next_and_bit(bitmap, bitmap2, BITMAP_LEN);
/*
diff --git a/lib/flex_proportions.c b/lib/flex_proportions.c
index 7852bfff50b1..83332fefa6f4 100644
--- a/lib/flex_proportions.c
+++ b/lib/flex_proportions.c
@@ -63,18 +63,14 @@ void fprop_global_destroy(struct fprop_global *p)
*/
bool fprop_new_period(struct fprop_global *p, int periods)
{
- s64 events;
- unsigned long flags;
+ s64 events = percpu_counter_sum(&p->events);
- local_irq_save(flags);
- events = percpu_counter_sum(&p->events);
/*
* Don't do anything if there are no events.
*/
- if (events <= 1) {
- local_irq_restore(flags);
+ if (events <= 1)
return false;
- }
+ preempt_disable_nested();
write_seqcount_begin(&p->sequence);
if (periods < 64)
events -= events >> periods;
@@ -82,7 +78,7 @@ bool fprop_new_period(struct fprop_global *p, int periods)
percpu_counter_add(&p->events, -events);
p->period += periods;
write_seqcount_end(&p->sequence);
- local_irq_restore(flags);
+ preempt_enable_nested();
return true;
}
@@ -217,11 +213,12 @@ static void fprop_reflect_period_percpu(struct fprop_global *p,
}
/* Event of type pl happened */
-void __fprop_inc_percpu(struct fprop_global *p, struct fprop_local_percpu *pl)
+void __fprop_add_percpu(struct fprop_global *p, struct fprop_local_percpu *pl,
+ long nr)
{
fprop_reflect_period_percpu(p, pl);
- percpu_counter_add_batch(&pl->events, 1, PROP_BATCH);
- percpu_counter_add(&p->events, 1);
+ percpu_counter_add_batch(&pl->events, nr, PROP_BATCH);
+ percpu_counter_add(&p->events, nr);
}
void fprop_fraction_percpu(struct fprop_global *p,
@@ -253,21 +250,29 @@ void fprop_fraction_percpu(struct fprop_global *p,
}
/*
- * Like __fprop_inc_percpu() except that event is counted only if the given
+ * Like __fprop_add_percpu() except that event is counted only if the given
* type has fraction smaller than @max_frac/FPROP_FRAC_BASE
*/
-void __fprop_inc_percpu_max(struct fprop_global *p,
- struct fprop_local_percpu *pl, int max_frac)
+void __fprop_add_percpu_max(struct fprop_global *p,
+ struct fprop_local_percpu *pl, int max_frac, long nr)
{
if (unlikely(max_frac < FPROP_FRAC_BASE)) {
unsigned long numerator, denominator;
+ s64 tmp;
fprop_fraction_percpu(p, pl, &numerator, &denominator);
- if (numerator >
- (((u64)denominator) * max_frac) >> FPROP_FRAC_SHIFT)
+ /* Adding 'nr' to fraction exceeds max_frac/FPROP_FRAC_BASE? */
+ tmp = (u64)denominator * max_frac -
+ ((u64)numerator << FPROP_FRAC_SHIFT);
+ if (tmp < 0) {
+ /* Maximum fraction already exceeded? */
return;
- } else
- fprop_reflect_period_percpu(p, pl);
- percpu_counter_add_batch(&pl->events, 1, PROP_BATCH);
- percpu_counter_add(&p->events, 1);
+ } else if (tmp < nr * (FPROP_FRAC_BASE - max_frac)) {
+ /* Add just enough for the fraction to saturate */
+ nr = div_u64(tmp + FPROP_FRAC_BASE - max_frac - 1,
+ FPROP_FRAC_BASE - max_frac);
+ }
+ }
+
+ __fprop_add_percpu(p, pl, nr);
}
diff --git a/lib/fonts/Kconfig b/lib/fonts/Kconfig
index 37baa79cdd71..7ee468ef21ec 100644
--- a/lib/fonts/Kconfig
+++ b/lib/fonts/Kconfig
@@ -98,7 +98,7 @@ config FONT_10x18
config FONT_SUN8x16
bool "Sparc console 8x16 font"
- depends on FRAMEBUFFER_CONSOLE && (!SPARC && FONTS || SPARC)
+ depends on (FRAMEBUFFER_CONSOLE && (FONTS || SPARC)) || BOOTX_TEXT
help
This is the high resolution console font for Sun machines. Say Y.
@@ -119,6 +119,12 @@ config FONT_TER16x32
This is the high resolution, large version for use with HiDPI screens.
If the standard font is unreadable for you, say Y, otherwise say N.
+config FONT_6x8
+ bool "OLED 6x8 font" if FONTS
+ depends on FRAMEBUFFER_CONSOLE
+ help
+ This font is useful for small displays (OLED).
+
config FONT_AUTOSELECT
def_bool y
depends on !FONT_8x8
@@ -132,6 +138,7 @@ config FONT_AUTOSELECT
depends on !FONT_SUN12x22
depends on !FONT_10x18
depends on !FONT_TER16x32
+ depends on !FONT_6x8
select FONT_8x16
endif # FONT_SUPPORT
diff --git a/lib/fonts/Makefile b/lib/fonts/Makefile
index ed95070860de..e16f68492174 100644
--- a/lib/fonts/Makefile
+++ b/lib/fonts/Makefile
@@ -15,6 +15,7 @@ font-objs-$(CONFIG_FONT_ACORN_8x8) += font_acorn_8x8.o
font-objs-$(CONFIG_FONT_MINI_4x6) += font_mini_4x6.o
font-objs-$(CONFIG_FONT_6x10) += font_6x10.o
font-objs-$(CONFIG_FONT_TER16x32) += font_ter16x32.o
+font-objs-$(CONFIG_FONT_6x8) += font_6x8.o
font-objs += $(font-objs-y)
diff --git a/lib/fonts/font_10x18.c b/lib/fonts/font_10x18.c
index 532f0ff89a96..5d940db626e7 100644
--- a/lib/fonts/font_10x18.c
+++ b/lib/fonts/font_10x18.c
@@ -8,8 +8,8 @@
#define FONTDATAMAX 9216
-static const unsigned char fontdata_10x18[FONTDATAMAX] = {
-
+static const struct font_data fontdata_10x18 = {
+ { 0, 0, FONTDATAMAX, 0 }, {
/* 0 0x00 '^@' */
0x00, 0x00, /* 0000000000 */
0x00, 0x00, /* 0000000000 */
@@ -5129,8 +5129,7 @@ static const unsigned char fontdata_10x18[FONTDATAMAX] = {
0x00, 0x00, /* 0000000000 */
0x00, 0x00, /* 0000000000 */
0x00, 0x00, /* 0000000000 */
-
-};
+} };
const struct font_desc font_10x18 = {
@@ -5138,7 +5137,8 @@ const struct font_desc font_10x18 = {
.name = "10x18",
.width = 10,
.height = 18,
- .data = fontdata_10x18,
+ .charcount = 256,
+ .data = fontdata_10x18.data,
#ifdef __sparc__
.pref = 5,
#else
diff --git a/lib/fonts/font_6x10.c b/lib/fonts/font_6x10.c
index 09b2cc03435b..e65df019e0d2 100644
--- a/lib/fonts/font_6x10.c
+++ b/lib/fonts/font_6x10.c
@@ -1,8 +1,10 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/font.h>
-static const unsigned char fontdata_6x10[] = {
+#define FONTDATAMAX 2560
+static const struct font_data fontdata_6x10 = {
+ { 0, 0, FONTDATAMAX, 0 }, {
/* 0 0x00 '^@' */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -3074,14 +3076,14 @@ static const unsigned char fontdata_6x10[] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
-
-};
+} };
const struct font_desc font_6x10 = {
.idx = FONT6x10_IDX,
.name = "6x10",
.width = 6,
.height = 10,
- .data = fontdata_6x10,
+ .charcount = 256,
+ .data = fontdata_6x10.data,
.pref = 0,
};
diff --git a/lib/fonts/font_6x11.c b/lib/fonts/font_6x11.c
index d7136c33f1f0..bd76b3f6b635 100644
--- a/lib/fonts/font_6x11.c
+++ b/lib/fonts/font_6x11.c
@@ -9,8 +9,8 @@
#define FONTDATAMAX (11*256)
-static const unsigned char fontdata_6x11[FONTDATAMAX] = {
-
+static const struct font_data fontdata_6x11 = {
+ { 0, 0, FONTDATAMAX, 0 }, {
/* 0 0x00 '^@' */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -3338,8 +3338,7 @@ static const unsigned char fontdata_6x11[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
-
-};
+} };
const struct font_desc font_vga_6x11 = {
@@ -3347,7 +3346,8 @@ const struct font_desc font_vga_6x11 = {
.name = "ProFont6x11",
.width = 6,
.height = 11,
- .data = fontdata_6x11,
+ .charcount = 256,
+ .data = fontdata_6x11.data,
/* Try avoiding this font if possible unless on MAC */
.pref = -2000,
};
diff --git a/lib/fonts/font_6x8.c b/lib/fonts/font_6x8.c
new file mode 100644
index 000000000000..06ace7792521
--- /dev/null
+++ b/lib/fonts/font_6x8.c
@@ -0,0 +1,2577 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/font.h>
+
+#define FONTDATAMAX 2048
+
+static const struct font_data fontdata_6x8 = {
+ { 0, 0, FONTDATAMAX, 0 }, {
+ /* 0 0x00 '^@' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+
+ /* 1 0x01 '^A' */
+ 0x78, /* 011110 */
+ 0x84, /* 100001 */
+ 0xCC, /* 110011 */
+ 0x84, /* 100001 */
+ 0xCC, /* 110011 */
+ 0xB4, /* 101101 */
+ 0x78, /* 011110 */
+ 0x00, /* 000000 */
+
+ /* 2 0x02 '^B' */
+ 0x78, /* 011110 */
+ 0xFC, /* 111111 */
+ 0xB4, /* 101101 */
+ 0xFC, /* 111111 */
+ 0xB4, /* 101101 */
+ 0xCC, /* 110011 */
+ 0x78, /* 011110 */
+ 0x00, /* 000000 */
+
+ /* 3 0x03 '^C' */
+ 0x00, /* 000000 */
+ 0x28, /* 001010 */
+ 0x7C, /* 011111 */
+ 0x7C, /* 011111 */
+ 0x38, /* 001110 */
+ 0x10, /* 000100 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+
+ /* 4 0x04 '^D' */
+ 0x00, /* 000000 */
+ 0x10, /* 000100 */
+ 0x38, /* 001110 */
+ 0x7C, /* 011111 */
+ 0x38, /* 001110 */
+ 0x10, /* 000100 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+
+ /* 5 0x05 '^E' */
+ 0x00, /* 000000 */
+ 0x38, /* 001110 */
+ 0x38, /* 001110 */
+ 0x6C, /* 011011 */
+ 0x6C, /* 011011 */
+ 0x10, /* 000100 */
+ 0x38, /* 001110 */
+ 0x00, /* 000000 */
+
+ /* 6 0x06 '^F' */
+ 0x00, /* 000000 */
+ 0x10, /* 000100 */
+ 0x38, /* 001110 */
+ 0x7C, /* 011111 */
+ 0x7C, /* 011111 */
+ 0x10, /* 000100 */
+ 0x38, /* 001110 */
+ 0x00, /* 000000 */
+
+ /* 7 0x07 '^G' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x30, /* 001100 */
+ 0x78, /* 011110 */
+ 0x30, /* 001100 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+
+ /* 8 0x08 '^H' */
+ 0xFC, /* 111111 */
+ 0xFC, /* 111111 */
+ 0xCC, /* 110011 */
+ 0x84, /* 100001 */
+ 0xCC, /* 110011 */
+ 0xFC, /* 111111 */
+ 0xFC, /* 111111 */
+ 0xFC, /* 111111 */
+
+ /* 9 0x09 '^I' */
+ 0x00, /* 000000 */
+ 0x30, /* 001100 */
+ 0x48, /* 010010 */
+ 0x84, /* 100001 */
+ 0x48, /* 010010 */
+ 0x30, /* 001100 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+
+ /* 10 0x0A '^J' */
+ 0xFC, /* 111111 */
+ 0xCC, /* 110011 */
+ 0xB4, /* 101101 */
+ 0x78, /* 011110 */
+ 0xB4, /* 101101 */
+ 0xCC, /* 110011 */
+ 0xFC, /* 111111 */
+ 0xFC, /* 111111 */
+
+ /* 11 0x0B '^K' */
+ 0x3C, /* 001111 */
+ 0x14, /* 000101 */
+ 0x20, /* 001000 */
+ 0x78, /* 011110 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x38, /* 001110 */
+ 0x00, /* 000000 */
+
+ /* 12 0x0C '^L' */
+ 0x38, /* 001110 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x38, /* 001110 */
+ 0x10, /* 000100 */
+ 0x38, /* 001110 */
+ 0x10, /* 000100 */
+ 0x00, /* 000000 */
+
+ /* 13 0x0D '^M' */
+ 0x18, /* 000110 */
+ 0x14, /* 000101 */
+ 0x14, /* 000101 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x70, /* 011100 */
+ 0x60, /* 011000 */
+ 0x00, /* 000000 */
+
+ /* 14 0x0E '^N' */
+ 0x3C, /* 001111 */
+ 0x24, /* 001001 */
+ 0x3C, /* 001111 */
+ 0x24, /* 001001 */
+ 0x24, /* 001001 */
+ 0x6C, /* 011011 */
+ 0x6C, /* 011011 */
+ 0x00, /* 000000 */
+
+ /* 15 0x0F '^O' */
+ 0x10, /* 000100 */
+ 0x54, /* 010101 */
+ 0x38, /* 001110 */
+ 0x6C, /* 011011 */
+ 0x38, /* 001110 */
+ 0x54, /* 010101 */
+ 0x10, /* 000100 */
+ 0x00, /* 000000 */
+
+ /* 16 0x10 '^P' */
+ 0x40, /* 010000 */
+ 0x60, /* 011000 */
+ 0x70, /* 011100 */
+ 0x78, /* 011110 */
+ 0x70, /* 011100 */
+ 0x60, /* 011000 */
+ 0x40, /* 010000 */
+ 0x00, /* 000000 */
+
+ /* 17 0x11 '^Q' */
+ 0x04, /* 000001 */
+ 0x0C, /* 000011 */
+ 0x1C, /* 000111 */
+ 0x3C, /* 001111 */
+ 0x1C, /* 000111 */
+ 0x0C, /* 000011 */
+ 0x04, /* 000001 */
+ 0x00, /* 000000 */
+
+ /* 18 0x12 '^R' */
+ 0x10, /* 000100 */
+ 0x38, /* 001110 */
+ 0x54, /* 010101 */
+ 0x10, /* 000100 */
+ 0x54, /* 010101 */
+ 0x38, /* 001110 */
+ 0x10, /* 000100 */
+ 0x00, /* 000000 */
+
+ /* 19 0x13 '^S' */
+ 0x48, /* 010010 */
+ 0x48, /* 010010 */
+ 0x48, /* 010010 */
+ 0x48, /* 010010 */
+ 0x48, /* 010010 */
+ 0x00, /* 000000 */
+ 0x48, /* 010010 */
+ 0x00, /* 000000 */
+
+ /* 20 0x14 '^T' */
+ 0x3C, /* 001111 */
+ 0x54, /* 010101 */
+ 0x54, /* 010101 */
+ 0x3C, /* 001111 */
+ 0x14, /* 000101 */
+ 0x14, /* 000101 */
+ 0x14, /* 000101 */
+ 0x00, /* 000000 */
+
+ /* 21 0x15 '^U' */
+ 0x38, /* 001110 */
+ 0x44, /* 010001 */
+ 0x30, /* 001100 */
+ 0x28, /* 001010 */
+ 0x14, /* 000101 */
+ 0x0C, /* 000011 */
+ 0x44, /* 010001 */
+ 0x38, /* 001110 */
+
+ /* 22 0x16 '^V' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0xF8, /* 111110 */
+ 0xF8, /* 111110 */
+ 0xF8, /* 111110 */
+ 0x00, /* 000000 */
+
+ /* 23 0x17 '^W' */
+ 0x10, /* 000100 */
+ 0x38, /* 001110 */
+ 0x54, /* 010101 */
+ 0x10, /* 000100 */
+ 0x54, /* 010101 */
+ 0x38, /* 001110 */
+ 0x10, /* 000100 */
+ 0x7C, /* 011111 */
+
+ /* 24 0x18 '^X' */
+ 0x10, /* 000100 */
+ 0x38, /* 001110 */
+ 0x54, /* 010101 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x00, /* 000000 */
+
+ /* 25 0x19 '^Y' */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x54, /* 010101 */
+ 0x38, /* 001110 */
+ 0x10, /* 000100 */
+ 0x00, /* 000000 */
+
+ /* 26 0x1A '^Z' */
+ 0x00, /* 000000 */
+ 0x10, /* 000100 */
+ 0x08, /* 000010 */
+ 0x7C, /* 011111 */
+ 0x08, /* 000010 */
+ 0x10, /* 000100 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+
+ /* 27 0x1B '^[' */
+ 0x00, /* 000000 */
+ 0x10, /* 000100 */
+ 0x20, /* 001000 */
+ 0x7C, /* 011111 */
+ 0x20, /* 001000 */
+ 0x10, /* 000100 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+
+ /* 28 0x1C '^\' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x40, /* 010000 */
+ 0x40, /* 010000 */
+ 0x40, /* 010000 */
+ 0x78, /* 011110 */
+ 0x00, /* 000000 */
+
+ /* 29 0x1D '^]' */
+ 0x00, /* 000000 */
+ 0x48, /* 010010 */
+ 0x84, /* 100001 */
+ 0xFC, /* 111111 */
+ 0x84, /* 100001 */
+ 0x48, /* 010010 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+
+ /* 30 0x1E '^^' */
+ 0x00, /* 000000 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x38, /* 001110 */
+ 0x38, /* 001110 */
+ 0x7C, /* 011111 */
+ 0x7C, /* 011111 */
+ 0x00, /* 000000 */
+
+ /* 31 0x1F '^_' */
+ 0x00, /* 000000 */
+ 0x7C, /* 011111 */
+ 0x7C, /* 011111 */
+ 0x38, /* 001110 */
+ 0x38, /* 001110 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x00, /* 000000 */
+
+ /* 32 0x20 ' ' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+
+ /* 33 0x21 '!' */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x00, /* 000000 */
+ 0x10, /* 000100 */
+ 0x00, /* 000000 */
+
+ /* 34 0x22 '"' */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+
+ /* 35 0x23 '#' */
+ 0x00, /* 000000 */
+ 0x28, /* 001010 */
+ 0x7C, /* 011111 */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+ 0x7C, /* 011111 */
+ 0x28, /* 001010 */
+ 0x00, /* 000000 */
+
+ /* 36 0x24 '$' */
+ 0x10, /* 000000 */
+ 0x38, /* 001000 */
+ 0x40, /* 010000 */
+ 0x30, /* 001000 */
+ 0x08, /* 000000 */
+ 0x70, /* 011000 */
+ 0x20, /* 001000 */
+ 0x00, /* 000000 */
+
+ /* 37 0x25 '%' */
+ 0x64, /* 011001 */
+ 0x64, /* 011001 */
+ 0x08, /* 000010 */
+ 0x10, /* 000100 */
+ 0x20, /* 001000 */
+ 0x4C, /* 010011 */
+ 0x4C, /* 010011 */
+ 0x00, /* 000000 */
+
+ /* 38 0x26 '&' */
+ 0x30, /* 001100 */
+ 0x48, /* 010010 */
+ 0x50, /* 010100 */
+ 0x20, /* 001000 */
+ 0x54, /* 010101 */
+ 0x48, /* 010010 */
+ 0x34, /* 001101 */
+ 0x00, /* 000000 */
+
+ /* 39 0x27 ''' */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+
+ /* 40 0x28 '(' */
+ 0x08, /* 000010 */
+ 0x10, /* 000100 */
+ 0x20, /* 001000 */
+ 0x20, /* 001000 */
+ 0x20, /* 001000 */
+ 0x10, /* 000100 */
+ 0x08, /* 000010 */
+ 0x00, /* 000000 */
+
+ /* 41 0x29 ')' */
+ 0x20, /* 001000 */
+ 0x10, /* 000100 */
+ 0x08, /* 000010 */
+ 0x08, /* 000010 */
+ 0x08, /* 000010 */
+ 0x10, /* 000100 */
+ 0x20, /* 001000 */
+ 0x00, /* 000000 */
+
+ /* 42 0x2A '*' */
+ 0x10, /* 000100 */
+ 0x54, /* 010101 */
+ 0x38, /* 001110 */
+ 0x54, /* 010101 */
+ 0x10, /* 000100 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+
+ /* 43 0x2B '+' */
+ 0x00, /* 000000 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x7C, /* 011111 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+
+ /* 44 0x2C ',' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x30, /* 001100 */
+ 0x30, /* 001100 */
+ 0x20, /* 001000 */
+
+ /* 45 0x2D '-' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x7C, /* 011111 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+
+ /* 46 0x2E '.' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x18, /* 000110 */
+ 0x18, /* 000110 */
+ 0x00, /* 000000 */
+
+ /* 47 0x2F '/' */
+ 0x04, /* 000001 */
+ 0x08, /* 000010 */
+ 0x08, /* 000010 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x20, /* 001000 */
+ 0x20, /* 001000 */
+ 0x40, /* 010000 */
+
+ /* 48 0x30 '0' */
+ 0x38, /* 001110 */
+ 0x44, /* 010001 */
+ 0x4C, /* 010011 */
+ 0x54, /* 010101 */
+ 0x64, /* 011001 */
+ 0x44, /* 010001 */
+ 0x38, /* 001110 */
+ 0x00, /* 000000 */
+
+ /* 49 0x31 '1' */
+ 0x10, /* 000100 */
+ 0x30, /* 001100 */
+ 0x50, /* 010100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x7C, /* 011111 */
+ 0x00, /* 000000 */
+
+ /* 50 0x32 '2' */
+ 0x38, /* 001110 */
+ 0x44, /* 010001 */
+ 0x04, /* 000001 */
+ 0x08, /* 000010 */
+ 0x10, /* 000100 */
+ 0x20, /* 001000 */
+ 0x7C, /* 011111 */
+ 0x00, /* 000000 */
+
+ /* 51 0x33 '3' */
+ 0x38, /* 001110 */
+ 0x44, /* 010001 */
+ 0x04, /* 000001 */
+ 0x18, /* 000110 */
+ 0x04, /* 000001 */
+ 0x44, /* 010001 */
+ 0x38, /* 001110 */
+ 0x00, /* 000000 */
+
+ /* 52 0x34 '4' */
+ 0x08, /* 000010 */
+ 0x18, /* 000110 */
+ 0x28, /* 001010 */
+ 0x48, /* 010010 */
+ 0x7C, /* 011111 */
+ 0x08, /* 000010 */
+ 0x08, /* 000010 */
+ 0x00, /* 000000 */
+
+ /* 53 0x35 '5' */
+ 0x7C, /* 011111 */
+ 0x40, /* 010000 */
+ 0x78, /* 011110 */
+ 0x04, /* 000001 */
+ 0x04, /* 000001 */
+ 0x44, /* 010001 */
+ 0x38, /* 001110 */
+ 0x00, /* 000000 */
+
+ /* 54 0x36 '6' */
+ 0x18, /* 000110 */
+ 0x20, /* 001000 */
+ 0x40, /* 010000 */
+ 0x78, /* 011110 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x38, /* 001110 */
+ 0x00, /* 000000 */
+
+ /* 55 0x37 '7' */
+ 0x7C, /* 011111 */
+ 0x04, /* 000001 */
+ 0x04, /* 000001 */
+ 0x08, /* 000010 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x00, /* 000000 */
+
+ /* 56 0x38 '8' */
+ 0x38, /* 001110 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x38, /* 001110 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x38, /* 001110 */
+ 0x00, /* 000000 */
+
+ /* 57 0x39 '9' */
+ 0x38, /* 001110 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x3C, /* 001111 */
+ 0x04, /* 000001 */
+ 0x08, /* 000010 */
+ 0x30, /* 001100 */
+ 0x00, /* 000000 */
+
+ /* 58 0x3A ':' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x18, /* 000110 */
+ 0x18, /* 000110 */
+ 0x00, /* 000000 */
+ 0x18, /* 000110 */
+ 0x18, /* 000110 */
+ 0x00, /* 000000 */
+
+ /* 59 0x3B ';' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x30, /* 001100 */
+ 0x30, /* 001100 */
+ 0x00, /* 000000 */
+ 0x30, /* 001100 */
+ 0x30, /* 001100 */
+ 0x20, /* 001000 */
+
+ /* 60 0x3C '<' */
+ 0x04, /* 000001 */
+ 0x08, /* 000010 */
+ 0x10, /* 000100 */
+ 0x20, /* 001000 */
+ 0x10, /* 000100 */
+ 0x08, /* 000010 */
+ 0x04, /* 000001 */
+ 0x00, /* 000000 */
+
+ /* 61 0x3D '=' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x7C, /* 011111 */
+ 0x00, /* 000000 */
+ 0x7C, /* 011111 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+
+ /* 62 0x3E '>' */
+ 0x20, /* 001000 */
+ 0x10, /* 000100 */
+ 0x08, /* 000010 */
+ 0x04, /* 000001 */
+ 0x08, /* 000010 */
+ 0x10, /* 000100 */
+ 0x20, /* 001000 */
+ 0x00, /* 000000 */
+
+ /* 63 0x3F '?' */
+ 0x38, /* 001110 */
+ 0x44, /* 010001 */
+ 0x04, /* 000001 */
+ 0x08, /* 000010 */
+ 0x10, /* 000100 */
+ 0x00, /* 000000 */
+ 0x10, /* 000100 */
+ 0x00, /* 000000 */
+
+ /* 64 0x40 '@' */
+ 0x38, /* 001110 */
+ 0x44, /* 010001 */
+ 0x5C, /* 010111 */
+ 0x54, /* 010101 */
+ 0x5C, /* 010111 */
+ 0x40, /* 010000 */
+ 0x38, /* 001110 */
+ 0x00, /* 000000 */
+
+ /* 65 0x41 'A' */
+ 0x10, /* 000100 */
+ 0x28, /* 001010 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x7C, /* 011111 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x00, /* 000000 */
+
+ /* 66 0x42 'B' */
+ 0x78, /* 011110 */
+ 0x24, /* 001001 */
+ 0x24, /* 001001 */
+ 0x38, /* 001110 */
+ 0x24, /* 001001 */
+ 0x24, /* 001001 */
+ 0x78, /* 011110 */
+ 0x00, /* 000000 */
+
+ /* 67 0x43 'C' */
+ 0x38, /* 001110 */
+ 0x44, /* 010001 */
+ 0x40, /* 010000 */
+ 0x40, /* 010000 */
+ 0x40, /* 010000 */
+ 0x44, /* 010001 */
+ 0x38, /* 001110 */
+ 0x00, /* 000000 */
+
+ /* 68 0x44 'D' */
+ 0x78, /* 011110 */
+ 0x24, /* 001001 */
+ 0x24, /* 001001 */
+ 0x24, /* 001001 */
+ 0x24, /* 001001 */
+ 0x24, /* 001001 */
+ 0x78, /* 011110 */
+ 0x00, /* 000000 */
+
+ /* 69 0x45 'E' */
+ 0x7C, /* 011111 */
+ 0x40, /* 010000 */
+ 0x40, /* 010000 */
+ 0x78, /* 011110 */
+ 0x40, /* 010000 */
+ 0x40, /* 010000 */
+ 0x7C, /* 011111 */
+ 0x00, /* 000000 */
+
+ /* 70 0x46 'F' */
+ 0x7C, /* 011111 */
+ 0x40, /* 010000 */
+ 0x40, /* 010000 */
+ 0x78, /* 011110 */
+ 0x40, /* 010000 */
+ 0x40, /* 010000 */
+ 0x40, /* 010000 */
+ 0x00, /* 000000 */
+
+ /* 71 0x47 'G' */
+ 0x38, /* 001110 */
+ 0x44, /* 010001 */
+ 0x40, /* 010000 */
+ 0x5C, /* 010111 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x38, /* 001110 */
+ 0x00, /* 000000 */
+
+ /* 72 0x48 'H' */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x7C, /* 011111 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x00, /* 000000 */
+
+ /* 73 0x49 'I' */
+ 0x38, /* 001110 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x38, /* 001110 */
+ 0x00, /* 000000 */
+
+ /* 74 0x4A 'J' */
+ 0x1C, /* 000111 */
+ 0x08, /* 000010 */
+ 0x08, /* 000010 */
+ 0x08, /* 000010 */
+ 0x48, /* 010010 */
+ 0x48, /* 010010 */
+ 0x30, /* 001100 */
+ 0x00, /* 000000 */
+
+ /* 75 0x4B 'K' */
+ 0x44, /* 010001 */
+ 0x48, /* 010010 */
+ 0x50, /* 010100 */
+ 0x60, /* 011000 */
+ 0x50, /* 010100 */
+ 0x48, /* 010010 */
+ 0x44, /* 010001 */
+ 0x00, /* 000000 */
+
+ /* 76 0x4C 'L' */
+ 0x40, /* 010000 */
+ 0x40, /* 010000 */
+ 0x40, /* 010000 */
+ 0x40, /* 010000 */
+ 0x40, /* 010000 */
+ 0x40, /* 010000 */
+ 0x7C, /* 011111 */
+ 0x00, /* 000000 */
+
+ /* 77 0x4D 'M' */
+ 0x44, /* 010001 */
+ 0x6C, /* 011011 */
+ 0x54, /* 010101 */
+ 0x54, /* 010101 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x00, /* 000000 */
+
+ /* 78 0x4E 'N' */
+ 0x44, /* 010001 */
+ 0x64, /* 011001 */
+ 0x54, /* 010101 */
+ 0x4C, /* 010011 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x00, /* 000000 */
+
+ /* 79 0x4F 'O' */
+ 0x38, /* 001110 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x38, /* 001110 */
+ 0x00, /* 000000 */
+
+ /* 80 0x50 'P' */
+ 0x78, /* 011110 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x78, /* 011110 */
+ 0x40, /* 010000 */
+ 0x40, /* 010000 */
+ 0x40, /* 010000 */
+ 0x00, /* 000000 */
+
+ /* 81 0x51 'Q' */
+ 0x38, /* 001110 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x54, /* 010101 */
+ 0x48, /* 010010 */
+ 0x34, /* 001101 */
+ 0x00, /* 000000 */
+
+ /* 82 0x52 'R' */
+ 0x78, /* 011110 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x78, /* 011110 */
+ 0x50, /* 010100 */
+ 0x48, /* 010010 */
+ 0x44, /* 010001 */
+ 0x00, /* 000000 */
+
+ /* 83 0x53 'S' */
+ 0x38, /* 001110 */
+ 0x44, /* 010001 */
+ 0x40, /* 010000 */
+ 0x38, /* 001110 */
+ 0x04, /* 000001 */
+ 0x44, /* 010001 */
+ 0x38, /* 001110 */
+ 0x00, /* 000000 */
+
+ /* 84 0x54 'T' */
+ 0x7C, /* 011111 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x00, /* 000000 */
+
+ /* 85 0x55 'U' */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x38, /* 001110 */
+ 0x00, /* 000000 */
+
+ /* 86 0x56 'V' */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x28, /* 001010 */
+ 0x10, /* 000100 */
+ 0x00, /* 000000 */
+
+ /* 87 0x57 'W' */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x54, /* 010101 */
+ 0x54, /* 010101 */
+ 0x6C, /* 011011 */
+ 0x44, /* 010001 */
+ 0x00, /* 000000 */
+
+ /* 88 0x58 'X' */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x28, /* 001010 */
+ 0x10, /* 000100 */
+ 0x28, /* 001010 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x00, /* 000000 */
+
+ /* 89 0x59 'Y' */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x28, /* 001010 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x00, /* 000000 */
+
+ /* 90 0x5A 'Z' */
+ 0x7C, /* 011111 */
+ 0x04, /* 000001 */
+ 0x08, /* 000010 */
+ 0x10, /* 000100 */
+ 0x20, /* 001000 */
+ 0x40, /* 010000 */
+ 0x7C, /* 011111 */
+ 0x00, /* 000000 */
+
+ /* 91 0x5B '[' */
+ 0x18, /* 000110 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x18, /* 000110 */
+ 0x00, /* 000000 */
+
+ /* 92 0x5C '\' */
+ 0x40, /* 010000 */
+ 0x20, /* 001000 */
+ 0x20, /* 001000 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x08, /* 000010 */
+ 0x08, /* 000010 */
+ 0x04, /* 000001 */
+
+ /* 93 0x5D ']' */
+ 0x30, /* 001100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x30, /* 001100 */
+ 0x00, /* 000000 */
+
+ /* 94 0x5E '^' */
+ 0x10, /* 000100 */
+ 0x28, /* 001010 */
+ 0x44, /* 010001 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+
+ /* 95 0x5F '_' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x7C, /* 011111 */
+
+ /* 96 0x60 '`' */
+ 0x20, /* 001000 */
+ 0x10, /* 000100 */
+ 0x08, /* 000010 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+
+ /* 97 0x61 'a' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x38, /* 001110 */
+ 0x04, /* 000001 */
+ 0x3C, /* 001111 */
+ 0x44, /* 010001 */
+ 0x3C, /* 001111 */
+ 0x00, /* 000000 */
+
+ /* 98 0x62 'b' */
+ 0x40, /* 010000 */
+ 0x40, /* 010000 */
+ 0x58, /* 010110 */
+ 0x64, /* 011001 */
+ 0x44, /* 010001 */
+ 0x64, /* 011001 */
+ 0x58, /* 010110 */
+ 0x00, /* 000000 */
+
+ /* 99 0x63 'c' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x38, /* 001110 */
+ 0x44, /* 010001 */
+ 0x40, /* 010000 */
+ 0x44, /* 010001 */
+ 0x38, /* 001110 */
+ 0x00, /* 000000 */
+
+ /* 100 0x64 'd' */
+ 0x04, /* 000001 */
+ 0x04, /* 000001 */
+ 0x34, /* 001101 */
+ 0x4C, /* 010011 */
+ 0x44, /* 010001 */
+ 0x4C, /* 010011 */
+ 0x34, /* 001101 */
+ 0x00, /* 000000 */
+
+ /* 101 0x65 'e' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x38, /* 001110 */
+ 0x44, /* 010001 */
+ 0x7C, /* 011111 */
+ 0x40, /* 010000 */
+ 0x3C, /* 001111 */
+ 0x00, /* 000000 */
+
+ /* 102 0x66 'f' */
+ 0x0C, /* 000011 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x38, /* 001110 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x00, /* 000000 */
+
+ /* 103 0x67 'g' */
+ 0x00, /* 000000 */
+ 0x34, /* 001101 */
+ 0x4C, /* 010011 */
+ 0x44, /* 010001 */
+ 0x4C, /* 010011 */
+ 0x34, /* 001101 */
+ 0x04, /* 000001 */
+ 0x38, /* 001110 */
+
+ /* 104 0x68 'h' */
+ 0x40, /* 010000 */
+ 0x40, /* 010000 */
+ 0x78, /* 011110 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x00, /* 000000 */
+
+ /* 105 0x69 'i' */
+ 0x10, /* 000100 */
+ 0x00, /* 000000 */
+ 0x30, /* 001100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x38, /* 001110 */
+ 0x00, /* 000000 */
+
+ /* 106 0x6A 'j' */
+ 0x10, /* 000100 */
+ 0x00, /* 000000 */
+ 0x30, /* 001100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x60, /* 011000 */
+
+ /* 107 0x6B 'k' */
+ 0x40, /* 010000 */
+ 0x40, /* 010000 */
+ 0x48, /* 010010 */
+ 0x50, /* 010100 */
+ 0x70, /* 011100 */
+ 0x48, /* 010010 */
+ 0x44, /* 010001 */
+ 0x00, /* 000000 */
+
+ /* 108 0x6C 'l' */
+ 0x30, /* 001100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x38, /* 001110 */
+ 0x00, /* 000000 */
+
+ /* 109 0x6D 'm' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x68, /* 011010 */
+ 0x54, /* 010101 */
+ 0x54, /* 010101 */
+ 0x54, /* 010101 */
+ 0x54, /* 010101 */
+ 0x00, /* 000000 */
+
+ /* 110 0x6E 'n' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x58, /* 010110 */
+ 0x64, /* 011001 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x00, /* 000000 */
+
+ /* 111 0x6F 'o' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x38, /* 001110 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x38, /* 001110 */
+ 0x00, /* 000000 */
+
+ /* 112 0x70 'p' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x78, /* 011110 */
+ 0x44, /* 010001 */
+ 0x64, /* 011001 */
+ 0x58, /* 010110 */
+ 0x40, /* 010000 */
+ 0x40, /* 010000 */
+
+ /* 113 0x71 'q' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x3C, /* 001111 */
+ 0x44, /* 010001 */
+ 0x4C, /* 010011 */
+ 0x34, /* 001101 */
+ 0x04, /* 000001 */
+ 0x04, /* 000001 */
+
+ /* 114 0x72 'r' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x58, /* 010110 */
+ 0x64, /* 011001 */
+ 0x40, /* 010000 */
+ 0x40, /* 010000 */
+ 0x40, /* 010000 */
+ 0x00, /* 000000 */
+
+ /* 115 0x73 's' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x3C, /* 001111 */
+ 0x40, /* 010000 */
+ 0x38, /* 001110 */
+ 0x04, /* 000001 */
+ 0x78, /* 011110 */
+ 0x00, /* 000000 */
+
+ /* 116 0x74 't' */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x38, /* 001110 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x0C, /* 000011 */
+ 0x00, /* 000000 */
+
+ /* 117 0x75 'u' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x4C, /* 010011 */
+ 0x34, /* 001101 */
+ 0x00, /* 000000 */
+
+ /* 118 0x76 'v' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x28, /* 001010 */
+ 0x10, /* 000100 */
+ 0x00, /* 000000 */
+
+ /* 119 0x77 'w' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x54, /* 010101 */
+ 0x54, /* 010101 */
+ 0x54, /* 010101 */
+ 0x54, /* 010101 */
+ 0x28, /* 001010 */
+ 0x00, /* 000000 */
+
+ /* 120 0x78 'x' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x44, /* 010001 */
+ 0x28, /* 001010 */
+ 0x10, /* 000100 */
+ 0x28, /* 001010 */
+ 0x44, /* 010001 */
+ 0x00, /* 000000 */
+
+ /* 121 0x79 'y' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x3C, /* 001111 */
+ 0x04, /* 000001 */
+ 0x38, /* 001110 */
+
+ /* 122 0x7A 'z' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x7C, /* 011111 */
+ 0x08, /* 000010 */
+ 0x10, /* 000100 */
+ 0x20, /* 001000 */
+ 0x7C, /* 011111 */
+ 0x00, /* 000000 */
+
+ /* 123 0x7B '{' */
+ 0x08, /* 000010 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x20, /* 001000 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x08, /* 000010 */
+ 0x00, /* 000000 */
+
+ /* 124 0x7C '|' */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x00, /* 000000 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x00, /* 000000 */
+
+ /* 125 0x7D '}' */
+ 0x20, /* 001000 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x08, /* 000010 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x20, /* 001000 */
+ 0x00, /* 000000 */
+
+ /* 126 0x7E '~' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x20, /* 001000 */
+ 0x54, /* 010101 */
+ 0x08, /* 000010 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+
+ /* 127 0x7F '' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x10, /* 000100 */
+ 0x28, /* 001010 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x7C, /* 011111 */
+ 0x00, /* 000000 */
+
+ /* 128 0x80 '\200' */
+ 0x00, /* 000000 */
+ 0x38, /* 001110 */
+ 0x44, /* 010001 */
+ 0x40, /* 010000 */
+ 0x44, /* 010001 */
+ 0x38, /* 001110 */
+ 0x10, /* 000100 */
+ 0x20, /* 001000 */
+
+ /* 129 0x81 '\201' */
+ 0x00, /* 000000 */
+ 0x28, /* 001010 */
+ 0x00, /* 000000 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x4C, /* 010011 */
+ 0x34, /* 001101 */
+ 0x00, /* 000000 */
+
+ /* 130 0x82 '\202' */
+ 0x18, /* 000110 */
+ 0x00, /* 000000 */
+ 0x38, /* 001110 */
+ 0x44, /* 010001 */
+ 0x7C, /* 011111 */
+ 0x40, /* 010000 */
+ 0x3C, /* 001111 */
+ 0x00, /* 000000 */
+
+ /* 131 0x83 '\203' */
+ 0x18, /* 000110 */
+ 0x00, /* 000000 */
+ 0x38, /* 001110 */
+ 0x04, /* 000001 */
+ 0x3C, /* 001111 */
+ 0x44, /* 010001 */
+ 0x3C, /* 001111 */
+ 0x00, /* 000000 */
+
+ /* 132 0x84 '\204' */
+ 0x28, /* 001010 */
+ 0x00, /* 000000 */
+ 0x38, /* 001110 */
+ 0x04, /* 000001 */
+ 0x3C, /* 001111 */
+ 0x44, /* 010001 */
+ 0x3C, /* 001111 */
+ 0x00, /* 000000 */
+
+ /* 133 0x85 '\205' */
+ 0x18, /* 000110 */
+ 0x00, /* 000000 */
+ 0x38, /* 001110 */
+ 0x04, /* 000001 */
+ 0x3C, /* 001111 */
+ 0x44, /* 010001 */
+ 0x3C, /* 001111 */
+ 0x00, /* 000000 */
+
+ /* 134 0x86 '\206' */
+ 0x3C, /* 001111 */
+ 0x18, /* 000110 */
+ 0x38, /* 001110 */
+ 0x04, /* 000001 */
+ 0x3C, /* 001111 */
+ 0x44, /* 010001 */
+ 0x3C, /* 001111 */
+ 0x00, /* 000000 */
+
+ /* 135 0x87 '\207' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x38, /* 001110 */
+ 0x44, /* 010001 */
+ 0x40, /* 010000 */
+ 0x44, /* 010001 */
+ 0x38, /* 001110 */
+ 0x10, /* 000100 */
+
+ /* 136 0x88 '\210' */
+ 0x18, /* 000110 */
+ 0x00, /* 000000 */
+ 0x38, /* 001110 */
+ 0x44, /* 010001 */
+ 0x7C, /* 011111 */
+ 0x40, /* 010000 */
+ 0x3C, /* 001111 */
+ 0x00, /* 000000 */
+
+ /* 137 0x89 '\211' */
+ 0x28, /* 001010 */
+ 0x00, /* 000000 */
+ 0x38, /* 001110 */
+ 0x44, /* 010001 */
+ 0x7C, /* 011111 */
+ 0x40, /* 010000 */
+ 0x3C, /* 001111 */
+ 0x00, /* 000000 */
+
+ /* 138 0x8A '\212' */
+ 0x18, /* 000110 */
+ 0x00, /* 000000 */
+ 0x38, /* 001110 */
+ 0x44, /* 010001 */
+ 0x7C, /* 011111 */
+ 0x40, /* 010000 */
+ 0x3C, /* 001111 */
+ 0x00, /* 000000 */
+
+ /* 139 0x8B '\213' */
+ 0x28, /* 001010 */
+ 0x00, /* 000000 */
+ 0x30, /* 001100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x38, /* 001110 */
+ 0x00, /* 000000 */
+
+ /* 140 0x8C '\214' */
+ 0x18, /* 000110 */
+ 0x00, /* 000000 */
+ 0x30, /* 001100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x38, /* 001110 */
+ 0x00, /* 000000 */
+
+ /* 141 0x8D '\215' */
+ 0x18, /* 000110 */
+ 0x00, /* 000000 */
+ 0x30, /* 001100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x38, /* 001110 */
+ 0x00, /* 000000 */
+
+ /* 142 0x8E '\216' */
+ 0x44, /* 010001 */
+ 0x10, /* 000100 */
+ 0x28, /* 001010 */
+ 0x44, /* 010001 */
+ 0x7C, /* 011111 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x00, /* 000000 */
+
+ /* 143 0x8F '\217' */
+ 0x30, /* 001100 */
+ 0x48, /* 010010 */
+ 0x38, /* 001110 */
+ 0x44, /* 010001 */
+ 0x7C, /* 011111 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x00, /* 000000 */
+
+ /* 144 0x90 '\220' */
+ 0x10, /* 000100 */
+ 0x7C, /* 011111 */
+ 0x40, /* 010000 */
+ 0x78, /* 011110 */
+ 0x40, /* 010000 */
+ 0x40, /* 010000 */
+ 0x7C, /* 011111 */
+ 0x00, /* 000000 */
+
+ /* 145 0x91 '\221' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x78, /* 011110 */
+ 0x14, /* 000101 */
+ 0x7C, /* 011111 */
+ 0x50, /* 010100 */
+ 0x3C, /* 001111 */
+ 0x00, /* 000000 */
+
+ /* 146 0x92 '\222' */
+ 0x3C, /* 001111 */
+ 0x50, /* 010100 */
+ 0x50, /* 010100 */
+ 0x78, /* 011110 */
+ 0x50, /* 010100 */
+ 0x50, /* 010100 */
+ 0x5C, /* 010111 */
+ 0x00, /* 000000 */
+
+ /* 147 0x93 '\223' */
+ 0x18, /* 000110 */
+ 0x00, /* 000000 */
+ 0x38, /* 001110 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x38, /* 001110 */
+ 0x00, /* 000000 */
+
+ /* 148 0x94 '\224' */
+ 0x28, /* 001010 */
+ 0x00, /* 000000 */
+ 0x38, /* 001110 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x38, /* 001110 */
+ 0x00, /* 000000 */
+
+ /* 149 0x95 '\225' */
+ 0x18, /* 000110 */
+ 0x00, /* 000000 */
+ 0x38, /* 001110 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x38, /* 001110 */
+ 0x00, /* 000000 */
+
+ /* 150 0x96 '\226' */
+ 0x10, /* 000100 */
+ 0x28, /* 001010 */
+ 0x00, /* 000000 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x4C, /* 010011 */
+ 0x34, /* 001101 */
+ 0x00, /* 000000 */
+
+ /* 151 0x97 '\227' */
+ 0x20, /* 001000 */
+ 0x10, /* 000100 */
+ 0x00, /* 000000 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x4C, /* 010011 */
+ 0x34, /* 001101 */
+ 0x00, /* 000000 */
+
+ /* 152 0x98 '\230' */
+ 0x28, /* 001010 */
+ 0x00, /* 000000 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x3C, /* 001111 */
+ 0x04, /* 000001 */
+ 0x38, /* 001110 */
+
+ /* 153 0x99 '\231' */
+ 0x84, /* 100001 */
+ 0x38, /* 001110 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x38, /* 001110 */
+ 0x00, /* 000000 */
+
+ /* 154 0x9A '\232' */
+ 0x88, /* 100010 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x38, /* 001110 */
+ 0x00, /* 000000 */
+
+ /* 155 0x9B '\233' */
+ 0x10, /* 000100 */
+ 0x38, /* 001110 */
+ 0x54, /* 010101 */
+ 0x50, /* 010100 */
+ 0x54, /* 010101 */
+ 0x38, /* 001110 */
+ 0x10, /* 000100 */
+ 0x00, /* 000000 */
+
+ /* 156 0x9C '\234' */
+ 0x30, /* 001100 */
+ 0x48, /* 010010 */
+ 0x40, /* 010000 */
+ 0x70, /* 011100 */
+ 0x40, /* 010000 */
+ 0x44, /* 010001 */
+ 0x78, /* 011110 */
+ 0x00, /* 000000 */
+
+ /* 157 0x9D '\235' */
+ 0x44, /* 010001 */
+ 0x28, /* 001010 */
+ 0x7C, /* 011111 */
+ 0x10, /* 000100 */
+ 0x7C, /* 011111 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x00, /* 000000 */
+
+ /* 158 0x9E '\236' */
+ 0x70, /* 011100 */
+ 0x48, /* 010010 */
+ 0x70, /* 011100 */
+ 0x48, /* 010010 */
+ 0x5C, /* 010111 */
+ 0x48, /* 010010 */
+ 0x44, /* 010001 */
+ 0x00, /* 000000 */
+
+ /* 159 0x9F '\237' */
+ 0x0C, /* 000011 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x38, /* 001110 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x60, /* 011000 */
+ 0x00, /* 000000 */
+
+ /* 160 0xA0 '\240' */
+ 0x18, /* 000110 */
+ 0x00, /* 000000 */
+ 0x38, /* 001110 */
+ 0x04, /* 000001 */
+ 0x3C, /* 001111 */
+ 0x44, /* 010001 */
+ 0x3C, /* 001111 */
+ 0x00, /* 000000 */
+
+ /* 161 0xA1 '\241' */
+ 0x08, /* 000010 */
+ 0x10, /* 000100 */
+ 0x00, /* 000000 */
+ 0x30, /* 001100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x38, /* 001110 */
+ 0x00, /* 000000 */
+
+ /* 162 0xA2 '\242' */
+ 0x08, /* 000010 */
+ 0x10, /* 000100 */
+ 0x00, /* 000000 */
+ 0x38, /* 001110 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x38, /* 001110 */
+ 0x00, /* 000000 */
+
+ /* 163 0xA3 '\243' */
+ 0x08, /* 000010 */
+ 0x10, /* 000100 */
+ 0x00, /* 000000 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x4C, /* 010011 */
+ 0x34, /* 001101 */
+ 0x00, /* 000000 */
+
+ /* 164 0xA4 '\244' */
+ 0x34, /* 001101 */
+ 0x58, /* 010110 */
+ 0x00, /* 000000 */
+ 0x58, /* 010110 */
+ 0x64, /* 011001 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x00, /* 000000 */
+
+ /* 165 0xA5 '\245' */
+ 0x58, /* 010110 */
+ 0x44, /* 010001 */
+ 0x64, /* 011001 */
+ 0x54, /* 010101 */
+ 0x4C, /* 010011 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x00, /* 000000 */
+
+ /* 166 0xA6 '\246' */
+ 0x38, /* 001110 */
+ 0x04, /* 000001 */
+ 0x3C, /* 001111 */
+ 0x44, /* 010001 */
+ 0x3C, /* 001111 */
+ 0x00, /* 000000 */
+ 0x7C, /* 011111 */
+ 0x00, /* 000000 */
+
+ /* 167 0xA7 '\247' */
+ 0x38, /* 001110 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x38, /* 001110 */
+ 0x00, /* 000000 */
+ 0x7C, /* 011111 */
+ 0x00, /* 000000 */
+
+ /* 168 0xA8 '\250' */
+ 0x10, /* 000100 */
+ 0x00, /* 000000 */
+ 0x10, /* 000100 */
+ 0x20, /* 001000 */
+ 0x40, /* 010000 */
+ 0x44, /* 010001 */
+ 0x38, /* 001110 */
+ 0x00, /* 000000 */
+
+ /* 169 0xA9 '\251' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x7C, /* 011111 */
+ 0x40, /* 010000 */
+ 0x40, /* 010000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+
+ /* 170 0xAA '\252' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x7C, /* 011111 */
+ 0x04, /* 000001 */
+ 0x04, /* 000001 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+
+ /* 171 0xAB '\253' */
+ 0x20, /* 001000 */
+ 0x24, /* 001001 */
+ 0x28, /* 001010 */
+ 0x10, /* 000100 */
+ 0x28, /* 001010 */
+ 0x44, /* 010001 */
+ 0x08, /* 000010 */
+ 0x1C, /* 000111 */
+
+ /* 172 0xAC '\254' */
+ 0x20, /* 001000 */
+ 0x24, /* 001001 */
+ 0x28, /* 001010 */
+ 0x10, /* 000100 */
+ 0x28, /* 001010 */
+ 0x58, /* 010110 */
+ 0x3C, /* 001111 */
+ 0x08, /* 000010 */
+
+ /* 173 0xAD '\255' */
+ 0x10, /* 000100 */
+ 0x00, /* 000000 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x00, /* 000000 */
+
+ /* 174 0xAE '\256' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x24, /* 001001 */
+ 0x48, /* 010010 */
+ 0x90, /* 100100 */
+ 0x48, /* 010010 */
+ 0x24, /* 001001 */
+ 0x00, /* 000000 */
+
+ /* 175 0xAF '\257' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x90, /* 100100 */
+ 0x48, /* 010010 */
+ 0x24, /* 001001 */
+ 0x48, /* 010010 */
+ 0x90, /* 100100 */
+ 0x00, /* 000000 */
+
+ /* 176 0xB0 '\260' */
+ 0x10, /* 000100 */
+ 0x44, /* 010001 */
+ 0x10, /* 000100 */
+ 0x44, /* 010001 */
+ 0x10, /* 000100 */
+ 0x44, /* 010001 */
+ 0x10, /* 000100 */
+ 0x44, /* 010001 */
+
+ /* 177 0xB1 '\261' */
+ 0xA8, /* 101010 */
+ 0x54, /* 010101 */
+ 0xA8, /* 101010 */
+ 0x54, /* 010101 */
+ 0xA8, /* 101010 */
+ 0x54, /* 010101 */
+ 0xA8, /* 101010 */
+ 0x54, /* 010101 */
+
+ /* 178 0xB2 '\262' */
+ 0xDC, /* 110111 */
+ 0x74, /* 011101 */
+ 0xDC, /* 110111 */
+ 0x74, /* 011101 */
+ 0xDC, /* 110111 */
+ 0x74, /* 011101 */
+ 0xDC, /* 110111 */
+ 0x74, /* 011101 */
+
+ /* 179 0xB3 '\263' */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+
+ /* 180 0xB4 '\264' */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0xF0, /* 111100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+
+ /* 181 0xB5 '\265' */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0xF0, /* 111100 */
+ 0x10, /* 000100 */
+ 0xF0, /* 111100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+
+ /* 182 0xB6 '\266' */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+ 0xE8, /* 111010 */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+
+ /* 183 0xB7 '\267' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0xF8, /* 111110 */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+
+ /* 184 0xB8 '\270' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0xF0, /* 111100 */
+ 0x10, /* 000100 */
+ 0xF0, /* 111100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+
+ /* 185 0xB9 '\271' */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+ 0xE8, /* 111010 */
+ 0x08, /* 000010 */
+ 0xE8, /* 111010 */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+
+ /* 186 0xBA '\272' */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+
+ /* 187 0xBB '\273' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0xF8, /* 111110 */
+ 0x08, /* 000010 */
+ 0xE8, /* 111010 */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+
+ /* 188 0xBC '\274' */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+ 0xE8, /* 111010 */
+ 0x08, /* 000010 */
+ 0xF8, /* 111110 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+
+ /* 189 0xBD '\275' */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+ 0xF8, /* 111110 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+
+ /* 190 0xBE '\276' */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0xF0, /* 111100 */
+ 0x10, /* 000100 */
+ 0xF0, /* 111100 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+
+ /* 191 0xBF '\277' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0xF0, /* 111100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+
+ /* 192 0xC0 '\300' */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x1C, /* 000111 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+
+ /* 193 0xC1 '\301' */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0xFC, /* 111111 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+
+ /* 194 0xC2 '\302' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0xFC, /* 111111 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+
+ /* 195 0xC3 '\303' */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x1C, /* 000111 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+
+ /* 196 0xC4 '\304' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0xFC, /* 111111 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+
+ /* 197 0xC5 '\305' */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0xFC, /* 111111 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+
+ /* 198 0xC6 '\306' */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x1C, /* 000111 */
+ 0x10, /* 000100 */
+ 0x1C, /* 000111 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+
+ /* 199 0xC7 '\307' */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+ 0x2C, /* 001011 */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+
+ /* 200 0xC8 '\310' */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+ 0x2C, /* 001011 */
+ 0x20, /* 001000 */
+ 0x3C, /* 001111 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+
+ /* 201 0xC9 '\311' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x3C, /* 001111 */
+ 0x20, /* 001000 */
+ 0x2C, /* 001011 */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+
+ /* 202 0xCA '\312' */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+ 0xEC, /* 111011 */
+ 0x00, /* 000000 */
+ 0xFC, /* 111111 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+
+ /* 203 0xCB '\313' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0xFC, /* 111111 */
+ 0x00, /* 000000 */
+ 0xEC, /* 111011 */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+
+ /* 204 0xCC '\314' */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+ 0x2C, /* 001011 */
+ 0x20, /* 001000 */
+ 0x2C, /* 001011 */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+
+ /* 205 0xCD '\315' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0xFC, /* 111111 */
+ 0x00, /* 000000 */
+ 0xFC, /* 111111 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+
+ /* 206 0xCE '\316' */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+ 0xEC, /* 111011 */
+ 0x00, /* 000000 */
+ 0xEC, /* 111011 */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+
+ /* 207 0xCF '\317' */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0xFC, /* 111111 */
+ 0x00, /* 000000 */
+ 0xFC, /* 111111 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+
+ /* 208 0xD0 '\320' */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+ 0xFC, /* 111111 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+
+ /* 209 0xD1 '\321' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0xFC, /* 111111 */
+ 0x00, /* 000000 */
+ 0xFC, /* 111111 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+
+ /* 210 0xD2 '\322' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0xFC, /* 111111 */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+
+ /* 211 0xD3 '\323' */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+ 0x3C, /* 001111 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+
+ /* 212 0xD4 '\324' */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x1C, /* 000111 */
+ 0x10, /* 000100 */
+ 0x1C, /* 000111 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+
+ /* 213 0xD5 '\325' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x1C, /* 000111 */
+ 0x10, /* 000100 */
+ 0x1C, /* 000111 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+
+ /* 214 0xD6 '\326' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x3C, /* 001111 */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+
+ /* 215 0xD7 '\327' */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+ 0xFC, /* 111111 */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+
+ /* 216 0xD8 '\330' */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0xFC, /* 111111 */
+ 0x10, /* 000100 */
+ 0xFC, /* 111111 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+
+ /* 217 0xD9 '\331' */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0xF0, /* 111100 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+
+ /* 218 0xDA '\332' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x1C, /* 000111 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+
+ /* 219 0xDB '\333' */
+ 0xFC, /* 111111 */
+ 0xFC, /* 111111 */
+ 0xFC, /* 111111 */
+ 0xFC, /* 111111 */
+ 0xFC, /* 111111 */
+ 0xFC, /* 111111 */
+ 0xFC, /* 111111 */
+ 0xFC, /* 111111 */
+
+ /* 220 0xDC '\334' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0xFC, /* 111111 */
+ 0xFC, /* 111111 */
+ 0xFC, /* 111111 */
+ 0xFC, /* 111111 */
+
+ /* 221 0xDD '\335' */
+ 0xE0, /* 111000 */
+ 0xE0, /* 111000 */
+ 0xE0, /* 111000 */
+ 0xE0, /* 111000 */
+ 0xE0, /* 111000 */
+ 0xE0, /* 111000 */
+ 0xE0, /* 111000 */
+ 0xE0, /* 111000 */
+
+ /* 222 0xDE '\336' */
+ 0x1C, /* 000111 */
+ 0x1C, /* 000111 */
+ 0x1C, /* 000111 */
+ 0x1C, /* 000111 */
+ 0x1C, /* 000111 */
+ 0x1C, /* 000111 */
+ 0x1C, /* 000111 */
+ 0x1C, /* 000111 */
+
+ /* 223 0xDF '\337' */
+ 0xFC, /* 111111 */
+ 0xFC, /* 111111 */
+ 0xFC, /* 111111 */
+ 0xFC, /* 111111 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+
+ /* 224 0xE0 '\340' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x34, /* 001101 */
+ 0x48, /* 010010 */
+ 0x48, /* 010010 */
+ 0x48, /* 010010 */
+ 0x34, /* 001101 */
+ 0x00, /* 000000 */
+
+ /* 225 0xE1 '\341' */
+ 0x24, /* 001001 */
+ 0x44, /* 010001 */
+ 0x48, /* 010010 */
+ 0x48, /* 010010 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x58, /* 010110 */
+ 0x40, /* 010000 */
+
+ /* 226 0xE2 '\342' */
+ 0x7C, /* 011111 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x40, /* 010000 */
+ 0x40, /* 010000 */
+ 0x40, /* 010000 */
+ 0x40, /* 010000 */
+ 0x00, /* 000000 */
+
+ /* 227 0xE3 '\343' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x7C, /* 011111 */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+ 0x00, /* 000000 */
+
+ /* 228 0xE4 '\344' */
+ 0x7C, /* 011111 */
+ 0x24, /* 001001 */
+ 0x10, /* 000100 */
+ 0x08, /* 000010 */
+ 0x10, /* 000100 */
+ 0x24, /* 001001 */
+ 0x7C, /* 011111 */
+ 0x00, /* 000000 */
+
+ /* 229 0xE5 '\345' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x3C, /* 001111 */
+ 0x48, /* 010010 */
+ 0x48, /* 010010 */
+ 0x48, /* 010010 */
+ 0x30, /* 001100 */
+ 0x00, /* 000000 */
+
+ /* 230 0xE6 '\346' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x48, /* 010010 */
+ 0x48, /* 010010 */
+ 0x48, /* 010010 */
+ 0x48, /* 010010 */
+ 0x74, /* 011101 */
+ 0x40, /* 010000 */
+
+ /* 231 0xE7 '\347' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x7C, /* 011111 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x0C, /* 000011 */
+ 0x00, /* 000000 */
+
+ /* 232 0xE8 '\350' */
+ 0x10, /* 000100 */
+ 0x38, /* 001110 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x38, /* 001110 */
+ 0x10, /* 000100 */
+ 0x38, /* 001110 */
+ 0x00, /* 000000 */
+
+ /* 233 0xE9 '\351' */
+ 0x38, /* 001110 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x7C, /* 011111 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x38, /* 001110 */
+ 0x00, /* 000000 */
+
+ /* 234 0xEA '\352' */
+ 0x38, /* 001110 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x28, /* 001010 */
+ 0x6C, /* 011011 */
+ 0x00, /* 000000 */
+
+ /* 235 0xEB '\353' */
+ 0x18, /* 000110 */
+ 0x20, /* 001000 */
+ 0x18, /* 000110 */
+ 0x24, /* 001001 */
+ 0x24, /* 001001 */
+ 0x24, /* 001001 */
+ 0x18, /* 000110 */
+ 0x00, /* 000000 */
+
+ /* 236 0xEC '\354' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x38, /* 001110 */
+ 0x54, /* 010101 */
+ 0x54, /* 010101 */
+ 0x54, /* 010101 */
+ 0x38, /* 001110 */
+ 0x00, /* 000000 */
+
+ /* 237 0xED '\355' */
+ 0x00, /* 000000 */
+ 0x04, /* 000001 */
+ 0x38, /* 001110 */
+ 0x54, /* 010101 */
+ 0x54, /* 010101 */
+ 0x38, /* 001110 */
+ 0x40, /* 010000 */
+ 0x00, /* 000000 */
+
+ /* 238 0xEE '\356' */
+ 0x3C, /* 001111 */
+ 0x40, /* 010000 */
+ 0x40, /* 010000 */
+ 0x38, /* 001110 */
+ 0x40, /* 010000 */
+ 0x40, /* 010000 */
+ 0x3C, /* 001111 */
+ 0x00, /* 000000 */
+
+ /* 239 0xEF '\357' */
+ 0x38, /* 001110 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x00, /* 000000 */
+
+ /* 240 0xF0 '\360' */
+ 0x00, /* 000000 */
+ 0xFC, /* 111111 */
+ 0x00, /* 000000 */
+ 0xFC, /* 111111 */
+ 0x00, /* 000000 */
+ 0xFC, /* 111111 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+
+ /* 241 0xF1 '\361' */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x7C, /* 011111 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x00, /* 000000 */
+ 0x7C, /* 011111 */
+ 0x00, /* 000000 */
+
+ /* 242 0xF2 '\362' */
+ 0x20, /* 001000 */
+ 0x10, /* 000100 */
+ 0x08, /* 000010 */
+ 0x10, /* 000100 */
+ 0x20, /* 001000 */
+ 0x00, /* 000000 */
+ 0x38, /* 001110 */
+ 0x00, /* 000000 */
+
+ /* 243 0xF3 '\363' */
+ 0x08, /* 000010 */
+ 0x10, /* 000100 */
+ 0x20, /* 001000 */
+ 0x10, /* 000100 */
+ 0x08, /* 000010 */
+ 0x00, /* 000000 */
+ 0x38, /* 001110 */
+ 0x00, /* 000000 */
+
+ /* 244 0xF4 '\364' */
+ 0x0C, /* 000011 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+
+ /* 245 0xF5 '\365' */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x60, /* 011000 */
+
+ /* 246 0xF6 '\366' */
+ 0x00, /* 000000 */
+ 0x10, /* 000100 */
+ 0x00, /* 000000 */
+ 0x7C, /* 011111 */
+ 0x00, /* 000000 */
+ 0x10, /* 000100 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+
+ /* 247 0xF7 '\367' */
+ 0x00, /* 000000 */
+ 0x20, /* 001000 */
+ 0x54, /* 010101 */
+ 0x08, /* 000010 */
+ 0x20, /* 001000 */
+ 0x54, /* 010101 */
+ 0x08, /* 000010 */
+ 0x00, /* 000000 */
+
+ /* 248 0xF8 '\370' */
+ 0x30, /* 001100 */
+ 0x48, /* 010010 */
+ 0x48, /* 010010 */
+ 0x30, /* 001100 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+
+ /* 249 0xF9 '\371' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x10, /* 000100 */
+ 0x38, /* 001110 */
+ 0x10, /* 000100 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+
+ /* 250 0xFA '\372' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x10, /* 000100 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+
+ /* 251 0xFB '\373' */
+ 0x04, /* 000001 */
+ 0x08, /* 000010 */
+ 0x08, /* 000010 */
+ 0x50, /* 010100 */
+ 0x50, /* 010100 */
+ 0x20, /* 001000 */
+ 0x20, /* 001000 */
+ 0x00, /* 000000 */
+
+ /* 252 0xFC '\374' */
+ 0x60, /* 011000 */
+ 0x50, /* 010100 */
+ 0x50, /* 010100 */
+ 0x50, /* 010100 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+
+ /* 253 0xFD '\375' */
+ 0x60, /* 011000 */
+ 0x10, /* 000100 */
+ 0x20, /* 001000 */
+ 0x70, /* 011100 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+
+ /* 254 0xFE '\376' */
+ 0x00, /* 000000 */
+ 0x38, /* 001110 */
+ 0x38, /* 001110 */
+ 0x38, /* 001110 */
+ 0x38, /* 001110 */
+ 0x38, /* 001110 */
+ 0x38, /* 001110 */
+ 0x00, /* 000000 */
+
+ /* 255 0xFF '\377' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+} };
+
+const struct font_desc font_6x8 = {
+ .idx = FONT6x8_IDX,
+ .name = "6x8",
+ .width = 6,
+ .height = 8,
+ .charcount = 256,
+ .data = fontdata_6x8.data,
+ .pref = 0,
+};
diff --git a/lib/fonts/font_7x14.c b/lib/fonts/font_7x14.c
index 89752d0b23e8..a2f561c9fa04 100644
--- a/lib/fonts/font_7x14.c
+++ b/lib/fonts/font_7x14.c
@@ -8,8 +8,8 @@
#define FONTDATAMAX 3584
-static const unsigned char fontdata_7x14[FONTDATAMAX] = {
-
+static const struct font_data fontdata_7x14 = {
+ { 0, 0, FONTDATAMAX, 0 }, {
/* 0 0x00 '^@' */
0x00, /* 0000000 */
0x00, /* 0000000 */
@@ -4105,8 +4105,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
0x00, /* 0000000 */
-
-};
+} };
const struct font_desc font_7x14 = {
@@ -4114,6 +4113,7 @@ const struct font_desc font_7x14 = {
.name = "7x14",
.width = 7,
.height = 14,
- .data = fontdata_7x14,
+ .charcount = 256,
+ .data = fontdata_7x14.data,
.pref = 0,
};
diff --git a/lib/fonts/font_8x16.c b/lib/fonts/font_8x16.c
index b7ab1f5fbdb8..06ae14088514 100644
--- a/lib/fonts/font_8x16.c
+++ b/lib/fonts/font_8x16.c
@@ -10,8 +10,8 @@
#define FONTDATAMAX 4096
-static const unsigned char fontdata_8x16[FONTDATAMAX] = {
-
+static const struct font_data fontdata_8x16 = {
+ { 0, 0, FONTDATAMAX, 0 }, {
/* 0 0x00 '^@' */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -4619,8 +4619,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
-
-};
+} };
const struct font_desc font_vga_8x16 = {
@@ -4628,7 +4627,8 @@ const struct font_desc font_vga_8x16 = {
.name = "VGA8x16",
.width = 8,
.height = 16,
- .data = fontdata_8x16,
+ .charcount = 256,
+ .data = fontdata_8x16.data,
.pref = 0,
};
EXPORT_SYMBOL(font_vga_8x16);
diff --git a/lib/fonts/font_8x8.c b/lib/fonts/font_8x8.c
index 2328ebc8bab5..69570b8c31af 100644
--- a/lib/fonts/font_8x8.c
+++ b/lib/fonts/font_8x8.c
@@ -9,8 +9,8 @@
#define FONTDATAMAX 2048
-static const unsigned char fontdata_8x8[FONTDATAMAX] = {
-
+static const struct font_data fontdata_8x8 = {
+ { 0, 0, FONTDATAMAX, 0 }, {
/* 0 0x00 '^@' */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -2570,8 +2570,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
-
-};
+} };
const struct font_desc font_vga_8x8 = {
@@ -2579,6 +2578,7 @@ const struct font_desc font_vga_8x8 = {
.name = "VGA8x8",
.width = 8,
.height = 8,
- .data = fontdata_8x8,
+ .charcount = 256,
+ .data = fontdata_8x8.data,
.pref = 0,
};
diff --git a/lib/fonts/font_acorn_8x8.c b/lib/fonts/font_acorn_8x8.c
index 0ff0e85d4481..18755c33d249 100644
--- a/lib/fonts/font_acorn_8x8.c
+++ b/lib/fonts/font_acorn_8x8.c
@@ -3,7 +3,10 @@
#include <linux/font.h>
-static const unsigned char acorndata_8x8[] = {
+#define FONTDATAMAX 2048
+
+static const struct font_data acorndata_8x8 = {
+{ 0, 0, FONTDATAMAX, 0 }, {
/* 00 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* ^@ */
/* 01 */ 0x7e, 0x81, 0xa5, 0x81, 0xbd, 0x99, 0x81, 0x7e, /* ^A */
/* 02 */ 0x7e, 0xff, 0xbd, 0xff, 0xc3, 0xe7, 0xff, 0x7e, /* ^B */
@@ -260,14 +263,15 @@ static const unsigned char acorndata_8x8[] = {
/* FD */ 0x38, 0x04, 0x18, 0x20, 0x3c, 0x00, 0x00, 0x00,
/* FE */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00,
/* FF */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
-};
+} };
const struct font_desc font_acorn_8x8 = {
.idx = ACORN8x8_IDX,
.name = "Acorn8x8",
.width = 8,
.height = 8,
- .data = acorndata_8x8,
+ .charcount = 256,
+ .data = acorndata_8x8.data,
#ifdef CONFIG_ARCH_ACORN
.pref = 20,
#else
diff --git a/lib/fonts/font_mini_4x6.c b/lib/fonts/font_mini_4x6.c
index 838caa1cfef7..8d39fd447952 100644
--- a/lib/fonts/font_mini_4x6.c
+++ b/lib/fonts/font_mini_4x6.c
@@ -43,8 +43,8 @@ __END__;
#define FONTDATAMAX 1536
-static const unsigned char fontdata_mini_4x6[FONTDATAMAX] = {
-
+static const struct font_data fontdata_mini_4x6 = {
+ { 0, 0, FONTDATAMAX, 0 }, {
/*{*/
/* Char 0: ' ' */
0xee, /*= [*** ] */
@@ -2145,14 +2145,15 @@ static const unsigned char fontdata_mini_4x6[FONTDATAMAX] = {
0xee, /*= [*** ] */
0x00, /*= [ ] */
/*}*/
-};
+} };
const struct font_desc font_mini_4x6 = {
.idx = MINI4x6_IDX,
.name = "MINI4x6",
.width = 4,
.height = 6,
- .data = fontdata_mini_4x6,
+ .charcount = 256,
+ .data = fontdata_mini_4x6.data,
.pref = 3,
};
diff --git a/lib/fonts/font_pearl_8x8.c b/lib/fonts/font_pearl_8x8.c
index b15d3c342c5b..ae98ca17982e 100644
--- a/lib/fonts/font_pearl_8x8.c
+++ b/lib/fonts/font_pearl_8x8.c
@@ -3,7 +3,7 @@
/* */
/* Font file generated by cpi2fnt */
/* ------------------------------ */
-/* Combined with the alpha-numeric */
+/* Combined with the alphanumeric */
/* portion of Greg Harp's old PEARL */
/* font (from earlier versions of */
/* linux-m86k) by John Shifflett */
@@ -14,8 +14,8 @@
#define FONTDATAMAX 2048
-static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
-
+static const struct font_data fontdata_pearl8x8 = {
+ { 0, 0, FONTDATAMAX, 0 }, {
/* 0 0x00 '^@' */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -2575,14 +2575,14 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
-
-};
+} };
const struct font_desc font_pearl_8x8 = {
.idx = PEARL8x8_IDX,
.name = "PEARL8x8",
.width = 8,
.height = 8,
- .data = fontdata_pearl8x8,
+ .charcount = 256,
+ .data = fontdata_pearl8x8.data,
.pref = 2,
};
diff --git a/lib/fonts/font_sun12x22.c b/lib/fonts/font_sun12x22.c
index 955d6eee3959..91daf5ab8b6b 100644
--- a/lib/fonts/font_sun12x22.c
+++ b/lib/fonts/font_sun12x22.c
@@ -3,8 +3,8 @@
#define FONTDATAMAX 11264
-static const unsigned char fontdata_sun12x22[FONTDATAMAX] = {
-
+static const struct font_data fontdata_sun12x22 = {
+ { 0, 0, FONTDATAMAX, 0 }, {
/* 0 0x00 '^@' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
@@ -6148,8 +6148,7 @@ static const unsigned char fontdata_sun12x22[FONTDATAMAX] = {
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
-
-};
+} };
const struct font_desc font_sun_12x22 = {
@@ -6157,7 +6156,8 @@ const struct font_desc font_sun_12x22 = {
.name = "SUN12x22",
.width = 12,
.height = 22,
- .data = fontdata_sun12x22,
+ .charcount = 256,
+ .data = fontdata_sun12x22.data,
#ifdef __sparc__
.pref = 5,
#else
diff --git a/lib/fonts/font_sun8x16.c b/lib/fonts/font_sun8x16.c
index 03d71e53954a..81bb4eeae04e 100644
--- a/lib/fonts/font_sun8x16.c
+++ b/lib/fonts/font_sun8x16.c
@@ -3,7 +3,8 @@
#define FONTDATAMAX 4096
-static const unsigned char fontdata_sun8x16[FONTDATAMAX] = {
+static const struct font_data fontdata_sun8x16 = {
+{ 0, 0, FONTDATAMAX, 0 }, {
/* */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
/* */ 0x00,0x00,0x7e,0x81,0xa5,0x81,0x81,0xbd,0x99,0x81,0x81,0x7e,0x00,0x00,0x00,0x00,
/* */ 0x00,0x00,0x7e,0xff,0xdb,0xff,0xff,0xc3,0xe7,0xff,0xff,0x7e,0x00,0x00,0x00,0x00,
@@ -260,14 +261,15 @@ static const unsigned char fontdata_sun8x16[FONTDATAMAX] = {
/* */ 0x00,0x70,0xd8,0x30,0x60,0xc8,0xf8,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
/* */ 0x00,0x00,0x00,0x00,0x7c,0x7c,0x7c,0x7c,0x7c,0x7c,0x7c,0x00,0x00,0x00,0x00,0x00,
/* */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
-};
+} };
const struct font_desc font_sun_8x16 = {
.idx = SUN8x16_IDX,
.name = "SUN8x16",
.width = 8,
.height = 16,
- .data = fontdata_sun8x16,
+ .charcount = 256,
+ .data = fontdata_sun8x16.data,
#ifdef __sparc__
.pref = 10,
#else
diff --git a/lib/fonts/font_ter16x32.c b/lib/fonts/font_ter16x32.c
index 3f0cf1ccdf3a..5baedc573dd6 100644
--- a/lib/fonts/font_ter16x32.c
+++ b/lib/fonts/font_ter16x32.c
@@ -4,8 +4,8 @@
#define FONTDATAMAX 16384
-static const unsigned char fontdata_ter16x32[FONTDATAMAX] = {
-
+static const struct font_data fontdata_ter16x32 = {
+ { 0, 0, FONTDATAMAX, 0 }, {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x7f, 0xfc, 0x7f, 0xfc,
0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
@@ -774,8 +774,8 @@ static const unsigned char fontdata_ter16x32[FONTDATAMAX] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7f, 0xfc,
0x7f, 0xfc, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 95 */
- 0x00, 0x00, 0x1c, 0x00, 0x0e, 0x00, 0x07, 0x00,
- 0x03, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x0e, 0x00,
+ 0x07, 0x00, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
@@ -1169,7 +1169,7 @@ static const unsigned char fontdata_ter16x32[FONTDATAMAX] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x7f, 0xf8, 0x7f, 0xfc, 0x03, 0x9e, 0x03, 0x8e,
+ 0x7e, 0xf8, 0x7f, 0xfc, 0x03, 0x9e, 0x03, 0x8e,
0x03, 0x8e, 0x3f, 0x8e, 0x7f, 0xfe, 0xf3, 0xfe,
0xe3, 0x80, 0xe3, 0x80, 0xe3, 0x80, 0xf3, 0xce,
0x7f, 0xfe, 0x3e, 0xfc, 0x00, 0x00, 0x00, 0x00,
@@ -2054,8 +2054,7 @@ static const unsigned char fontdata_ter16x32[FONTDATAMAX] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 255 */
-
-};
+} };
const struct font_desc font_ter_16x32 = {
@@ -2063,7 +2062,8 @@ const struct font_desc font_ter_16x32 = {
.name = "TER16x32",
.width = 16,
.height = 32,
- .data = fontdata_ter16x32,
+ .charcount = 256,
+ .data = fontdata_ter16x32.data,
#ifdef __sparc__
.pref = 5,
#else
diff --git a/lib/fonts/fonts.c b/lib/fonts/fonts.c
index 9969358a7af5..973866438608 100644
--- a/lib/fonts/fonts.c
+++ b/lib/fonts/fonts.c
@@ -20,56 +20,45 @@
#endif
#include <linux/font.h>
-#define NO_FONTS
-
static const struct font_desc *fonts[] = {
#ifdef CONFIG_FONT_8x8
-#undef NO_FONTS
- &font_vga_8x8,
+ &font_vga_8x8,
#endif
#ifdef CONFIG_FONT_8x16
-#undef NO_FONTS
- &font_vga_8x16,
+ &font_vga_8x16,
#endif
#ifdef CONFIG_FONT_6x11
-#undef NO_FONTS
- &font_vga_6x11,
+ &font_vga_6x11,
#endif
#ifdef CONFIG_FONT_7x14
-#undef NO_FONTS
- &font_7x14,
+ &font_7x14,
#endif
#ifdef CONFIG_FONT_SUN8x16
-#undef NO_FONTS
- &font_sun_8x16,
+ &font_sun_8x16,
#endif
#ifdef CONFIG_FONT_SUN12x22
-#undef NO_FONTS
- &font_sun_12x22,
+ &font_sun_12x22,
#endif
#ifdef CONFIG_FONT_10x18
-#undef NO_FONTS
- &font_10x18,
+ &font_10x18,
#endif
#ifdef CONFIG_FONT_ACORN_8x8
-#undef NO_FONTS
- &font_acorn_8x8,
+ &font_acorn_8x8,
#endif
#ifdef CONFIG_FONT_PEARL_8x8
-#undef NO_FONTS
- &font_pearl_8x8,
+ &font_pearl_8x8,
#endif
#ifdef CONFIG_FONT_MINI_4x6
-#undef NO_FONTS
- &font_mini_4x6,
+ &font_mini_4x6,
#endif
#ifdef CONFIG_FONT_6x10
-#undef NO_FONTS
- &font_6x10,
+ &font_6x10,
#endif
#ifdef CONFIG_FONT_TER16x32
-#undef NO_FONTS
- &font_ter_16x32,
+ &font_ter_16x32,
+#endif
+#ifdef CONFIG_FONT_6x8
+ &font_6x8,
#endif
};
@@ -90,16 +79,17 @@ static const struct font_desc *fonts[] = {
* specified font.
*
*/
-
const struct font_desc *find_font(const char *name)
{
- unsigned int i;
+ unsigned int i;
- for (i = 0; i < num_fonts; i++)
- if (!strcmp(fonts[i]->name, name))
- return fonts[i];
- return NULL;
+ BUILD_BUG_ON(!num_fonts);
+ for (i = 0; i < num_fonts; i++)
+ if (!strcmp(fonts[i]->name, name))
+ return fonts[i];
+ return NULL;
}
+EXPORT_SYMBOL(find_font);
/**
@@ -116,44 +106,46 @@ const struct font_desc *find_font(const char *name)
* chosen font.
*
*/
-
const struct font_desc *get_default_font(int xres, int yres, u32 font_w,
u32 font_h)
{
- int i, c, cc;
- const struct font_desc *f, *g;
-
- g = NULL;
- cc = -10000;
- for(i=0; i<num_fonts; i++) {
- f = fonts[i];
- c = f->pref;
+ int i, c, cc, res;
+ const struct font_desc *f, *g;
+
+ g = NULL;
+ cc = -10000;
+ for (i = 0; i < num_fonts; i++) {
+ f = fonts[i];
+ c = f->pref;
#if defined(__mc68000__)
#ifdef CONFIG_FONT_PEARL_8x8
- if (MACH_IS_AMIGA && f->idx == PEARL8x8_IDX)
- c = 100;
+ if (MACH_IS_AMIGA && f->idx == PEARL8x8_IDX)
+ c = 100;
#endif
#ifdef CONFIG_FONT_6x11
- if (MACH_IS_MAC && xres < 640 && f->idx == VGA6x11_IDX)
- c = 100;
+ if (MACH_IS_MAC && xres < 640 && f->idx == VGA6x11_IDX)
+ c = 100;
#endif
#endif
- if ((yres < 400) == (f->height <= 8))
- c += 1000;
+ if ((yres < 400) == (f->height <= 8))
+ c += 1000;
+
+ /* prefer a bigger font for high resolution */
+ res = (xres / f->width) * (yres / f->height) / 1000;
+ if (res > 20)
+ c += 20 - res;
- if ((font_w & (1 << (f->width - 1))) &&
- (font_h & (1 << (f->height - 1))))
- c += 1000;
+ if ((font_w & (1U << (f->width - 1))) &&
+ (font_h & (1U << (f->height - 1))))
+ c += 1000;
- if (c > cc) {
- cc = c;
- g = f;
+ if (c > cc) {
+ cc = c;
+ g = f;
+ }
}
- }
- return g;
+ return g;
}
-
-EXPORT_SYMBOL(find_font);
EXPORT_SYMBOL(get_default_font);
MODULE_AUTHOR("James Simmons <jsimmons@users.sf.net>");
diff --git a/lib/fortify_kunit.c b/lib/fortify_kunit.c
new file mode 100644
index 000000000000..2e4fedc81621
--- /dev/null
+++ b/lib/fortify_kunit.c
@@ -0,0 +1,332 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Runtime test cases for CONFIG_FORTIFY_SOURCE that aren't expected to
+ * Oops the kernel on success. (For those, see drivers/misc/lkdtm/fortify.c)
+ *
+ * For corner cases with UBSAN, try testing with:
+ *
+ * ./tools/testing/kunit/kunit.py run --arch=x86_64 \
+ * --kconfig_add CONFIG_FORTIFY_SOURCE=y \
+ * --kconfig_add CONFIG_UBSAN=y \
+ * --kconfig_add CONFIG_UBSAN_TRAP=y \
+ * --kconfig_add CONFIG_UBSAN_BOUNDS=y \
+ * --kconfig_add CONFIG_UBSAN_LOCAL_BOUNDS=y \
+ * --make_options LLVM=1 fortify
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <kunit/device.h>
+#include <kunit/test.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/vmalloc.h>
+
+static const char array_of_10[] = "this is 10";
+static const char *ptr_of_11 = "this is 11!";
+static char array_unknown[] = "compiler thinks I might change";
+
+static void known_sizes_test(struct kunit *test)
+{
+ KUNIT_EXPECT_EQ(test, __compiletime_strlen("88888888"), 8);
+ KUNIT_EXPECT_EQ(test, __compiletime_strlen(array_of_10), 10);
+ KUNIT_EXPECT_EQ(test, __compiletime_strlen(ptr_of_11), 11);
+
+ KUNIT_EXPECT_EQ(test, __compiletime_strlen(array_unknown), SIZE_MAX);
+ /* Externally defined and dynamically sized string pointer: */
+ KUNIT_EXPECT_EQ(test, __compiletime_strlen(test->name), SIZE_MAX);
+}
+
+/* This is volatile so the optimizer can't perform DCE below. */
+static volatile int pick;
+
+/* Not inline to keep optimizer from figuring out which string we want. */
+static noinline size_t want_minus_one(int pick)
+{
+ const char *str;
+
+ switch (pick) {
+ case 1:
+ str = "4444";
+ break;
+ case 2:
+ str = "333";
+ break;
+ default:
+ str = "1";
+ break;
+ }
+ return __compiletime_strlen(str);
+}
+
+static void control_flow_split_test(struct kunit *test)
+{
+ KUNIT_EXPECT_EQ(test, want_minus_one(pick), SIZE_MAX);
+}
+
+#define KUNIT_EXPECT_BOS(test, p, expected, name) \
+ KUNIT_EXPECT_EQ_MSG(test, __builtin_object_size(p, 1), \
+ expected, \
+ "__alloc_size() not working with __bos on " name "\n")
+
+#if !__has_builtin(__builtin_dynamic_object_size)
+#define KUNIT_EXPECT_BDOS(test, p, expected, name) \
+ /* Silence "unused variable 'expected'" warning. */ \
+ KUNIT_EXPECT_EQ(test, expected, expected)
+#else
+#define KUNIT_EXPECT_BDOS(test, p, expected, name) \
+ KUNIT_EXPECT_EQ_MSG(test, __builtin_dynamic_object_size(p, 1), \
+ expected, \
+ "__alloc_size() not working with __bdos on " name "\n")
+#endif
+
+/* If the execpted size is a constant value, __bos can see it. */
+#define check_const(_expected, alloc, free) do { \
+ size_t expected = (_expected); \
+ void *p = alloc; \
+ KUNIT_EXPECT_TRUE_MSG(test, p != NULL, #alloc " failed?!\n"); \
+ KUNIT_EXPECT_BOS(test, p, expected, #alloc); \
+ KUNIT_EXPECT_BDOS(test, p, expected, #alloc); \
+ free; \
+} while (0)
+
+/* If the execpted size is NOT a constant value, __bos CANNOT see it. */
+#define check_dynamic(_expected, alloc, free) do { \
+ size_t expected = (_expected); \
+ void *p = alloc; \
+ KUNIT_EXPECT_TRUE_MSG(test, p != NULL, #alloc " failed?!\n"); \
+ KUNIT_EXPECT_BOS(test, p, SIZE_MAX, #alloc); \
+ KUNIT_EXPECT_BDOS(test, p, expected, #alloc); \
+ free; \
+} while (0)
+
+/* Assortment of constant-value kinda-edge cases. */
+#define CONST_TEST_BODY(TEST_alloc) do { \
+ /* Special-case vmalloc()-family to skip 0-sized allocs. */ \
+ if (strcmp(#TEST_alloc, "TEST_vmalloc") != 0) \
+ TEST_alloc(check_const, 0, 0); \
+ TEST_alloc(check_const, 1, 1); \
+ TEST_alloc(check_const, 128, 128); \
+ TEST_alloc(check_const, 1023, 1023); \
+ TEST_alloc(check_const, 1025, 1025); \
+ TEST_alloc(check_const, 4096, 4096); \
+ TEST_alloc(check_const, 4097, 4097); \
+} while (0)
+
+static volatile size_t zero_size;
+static volatile size_t unknown_size = 50;
+
+#if !__has_builtin(__builtin_dynamic_object_size)
+#define DYNAMIC_TEST_BODY(TEST_alloc) \
+ kunit_skip(test, "Compiler is missing __builtin_dynamic_object_size() support\n")
+#else
+#define DYNAMIC_TEST_BODY(TEST_alloc) do { \
+ size_t size = unknown_size; \
+ \
+ /* \
+ * Expected size is "size" in each test, before it is then \
+ * internally incremented in each test. Requires we disable \
+ * -Wunsequenced. \
+ */ \
+ TEST_alloc(check_dynamic, size, size++); \
+ /* Make sure incrementing actually happened. */ \
+ KUNIT_EXPECT_NE(test, size, unknown_size); \
+} while (0)
+#endif
+
+#define DEFINE_ALLOC_SIZE_TEST_PAIR(allocator) \
+static void alloc_size_##allocator##_const_test(struct kunit *test) \
+{ \
+ CONST_TEST_BODY(TEST_##allocator); \
+} \
+static void alloc_size_##allocator##_dynamic_test(struct kunit *test) \
+{ \
+ DYNAMIC_TEST_BODY(TEST_##allocator); \
+}
+
+#define TEST_kmalloc(checker, expected_size, alloc_size) do { \
+ gfp_t gfp = GFP_KERNEL | __GFP_NOWARN; \
+ void *orig; \
+ size_t len; \
+ \
+ checker(expected_size, kmalloc(alloc_size, gfp), \
+ kfree(p)); \
+ checker(expected_size, \
+ kmalloc_node(alloc_size, gfp, NUMA_NO_NODE), \
+ kfree(p)); \
+ checker(expected_size, kzalloc(alloc_size, gfp), \
+ kfree(p)); \
+ checker(expected_size, \
+ kzalloc_node(alloc_size, gfp, NUMA_NO_NODE), \
+ kfree(p)); \
+ checker(expected_size, kcalloc(1, alloc_size, gfp), \
+ kfree(p)); \
+ checker(expected_size, kcalloc(alloc_size, 1, gfp), \
+ kfree(p)); \
+ checker(expected_size, \
+ kcalloc_node(1, alloc_size, gfp, NUMA_NO_NODE), \
+ kfree(p)); \
+ checker(expected_size, \
+ kcalloc_node(alloc_size, 1, gfp, NUMA_NO_NODE), \
+ kfree(p)); \
+ checker(expected_size, kmalloc_array(1, alloc_size, gfp), \
+ kfree(p)); \
+ checker(expected_size, kmalloc_array(alloc_size, 1, gfp), \
+ kfree(p)); \
+ checker(expected_size, \
+ kmalloc_array_node(1, alloc_size, gfp, NUMA_NO_NODE), \
+ kfree(p)); \
+ checker(expected_size, \
+ kmalloc_array_node(alloc_size, 1, gfp, NUMA_NO_NODE), \
+ kfree(p)); \
+ checker(expected_size, __kmalloc(alloc_size, gfp), \
+ kfree(p)); \
+ checker(expected_size, \
+ __kmalloc_node(alloc_size, gfp, NUMA_NO_NODE), \
+ kfree(p)); \
+ \
+ orig = kmalloc(alloc_size, gfp); \
+ KUNIT_EXPECT_TRUE(test, orig != NULL); \
+ checker((expected_size) * 2, \
+ krealloc(orig, (alloc_size) * 2, gfp), \
+ kfree(p)); \
+ orig = kmalloc(alloc_size, gfp); \
+ KUNIT_EXPECT_TRUE(test, orig != NULL); \
+ checker((expected_size) * 2, \
+ krealloc_array(orig, 1, (alloc_size) * 2, gfp), \
+ kfree(p)); \
+ orig = kmalloc(alloc_size, gfp); \
+ KUNIT_EXPECT_TRUE(test, orig != NULL); \
+ checker((expected_size) * 2, \
+ krealloc_array(orig, (alloc_size) * 2, 1, gfp), \
+ kfree(p)); \
+ \
+ len = 11; \
+ /* Using memdup() with fixed size, so force unknown length. */ \
+ if (!__builtin_constant_p(expected_size)) \
+ len += zero_size; \
+ checker(len, kmemdup("hello there", len, gfp), kfree(p)); \
+} while (0)
+DEFINE_ALLOC_SIZE_TEST_PAIR(kmalloc)
+
+/* Sizes are in pages, not bytes. */
+#define TEST_vmalloc(checker, expected_pages, alloc_pages) do { \
+ gfp_t gfp = GFP_KERNEL | __GFP_NOWARN; \
+ checker((expected_pages) * PAGE_SIZE, \
+ vmalloc((alloc_pages) * PAGE_SIZE), vfree(p)); \
+ checker((expected_pages) * PAGE_SIZE, \
+ vzalloc((alloc_pages) * PAGE_SIZE), vfree(p)); \
+ checker((expected_pages) * PAGE_SIZE, \
+ __vmalloc((alloc_pages) * PAGE_SIZE, gfp), vfree(p)); \
+} while (0)
+DEFINE_ALLOC_SIZE_TEST_PAIR(vmalloc)
+
+/* Sizes are in pages (and open-coded for side-effects), not bytes. */
+#define TEST_kvmalloc(checker, expected_pages, alloc_pages) do { \
+ gfp_t gfp = GFP_KERNEL | __GFP_NOWARN; \
+ size_t prev_size; \
+ void *orig; \
+ \
+ checker((expected_pages) * PAGE_SIZE, \
+ kvmalloc((alloc_pages) * PAGE_SIZE, gfp), \
+ vfree(p)); \
+ checker((expected_pages) * PAGE_SIZE, \
+ kvmalloc_node((alloc_pages) * PAGE_SIZE, gfp, NUMA_NO_NODE), \
+ vfree(p)); \
+ checker((expected_pages) * PAGE_SIZE, \
+ kvzalloc((alloc_pages) * PAGE_SIZE, gfp), \
+ vfree(p)); \
+ checker((expected_pages) * PAGE_SIZE, \
+ kvzalloc_node((alloc_pages) * PAGE_SIZE, gfp, NUMA_NO_NODE), \
+ vfree(p)); \
+ checker((expected_pages) * PAGE_SIZE, \
+ kvcalloc(1, (alloc_pages) * PAGE_SIZE, gfp), \
+ vfree(p)); \
+ checker((expected_pages) * PAGE_SIZE, \
+ kvcalloc((alloc_pages) * PAGE_SIZE, 1, gfp), \
+ vfree(p)); \
+ checker((expected_pages) * PAGE_SIZE, \
+ kvmalloc_array(1, (alloc_pages) * PAGE_SIZE, gfp), \
+ vfree(p)); \
+ checker((expected_pages) * PAGE_SIZE, \
+ kvmalloc_array((alloc_pages) * PAGE_SIZE, 1, gfp), \
+ vfree(p)); \
+ \
+ prev_size = (expected_pages) * PAGE_SIZE; \
+ orig = kvmalloc(prev_size, gfp); \
+ KUNIT_EXPECT_TRUE(test, orig != NULL); \
+ checker(((expected_pages) * PAGE_SIZE) * 2, \
+ kvrealloc(orig, prev_size, \
+ ((alloc_pages) * PAGE_SIZE) * 2, gfp), \
+ kvfree(p)); \
+} while (0)
+DEFINE_ALLOC_SIZE_TEST_PAIR(kvmalloc)
+
+#define TEST_devm_kmalloc(checker, expected_size, alloc_size) do { \
+ gfp_t gfp = GFP_KERNEL | __GFP_NOWARN; \
+ const char dev_name[] = "fortify-test"; \
+ struct device *dev; \
+ void *orig; \
+ size_t len; \
+ \
+ /* Create dummy device for devm_kmalloc()-family tests. */ \
+ dev = kunit_device_register(test, dev_name); \
+ KUNIT_ASSERT_FALSE_MSG(test, IS_ERR(dev), \
+ "Cannot register test device\n"); \
+ \
+ checker(expected_size, devm_kmalloc(dev, alloc_size, gfp), \
+ devm_kfree(dev, p)); \
+ checker(expected_size, devm_kzalloc(dev, alloc_size, gfp), \
+ devm_kfree(dev, p)); \
+ checker(expected_size, \
+ devm_kmalloc_array(dev, 1, alloc_size, gfp), \
+ devm_kfree(dev, p)); \
+ checker(expected_size, \
+ devm_kmalloc_array(dev, alloc_size, 1, gfp), \
+ devm_kfree(dev, p)); \
+ checker(expected_size, \
+ devm_kcalloc(dev, 1, alloc_size, gfp), \
+ devm_kfree(dev, p)); \
+ checker(expected_size, \
+ devm_kcalloc(dev, alloc_size, 1, gfp), \
+ devm_kfree(dev, p)); \
+ \
+ orig = devm_kmalloc(dev, alloc_size, gfp); \
+ KUNIT_EXPECT_TRUE(test, orig != NULL); \
+ checker((expected_size) * 2, \
+ devm_krealloc(dev, orig, (alloc_size) * 2, gfp), \
+ devm_kfree(dev, p)); \
+ \
+ len = 4; \
+ /* Using memdup() with fixed size, so force unknown length. */ \
+ if (!__builtin_constant_p(expected_size)) \
+ len += zero_size; \
+ checker(len, devm_kmemdup(dev, "Ohai", len, gfp), \
+ devm_kfree(dev, p)); \
+ \
+ kunit_device_unregister(test, dev); \
+} while (0)
+DEFINE_ALLOC_SIZE_TEST_PAIR(devm_kmalloc)
+
+static struct kunit_case fortify_test_cases[] = {
+ KUNIT_CASE(known_sizes_test),
+ KUNIT_CASE(control_flow_split_test),
+ KUNIT_CASE(alloc_size_kmalloc_const_test),
+ KUNIT_CASE(alloc_size_kmalloc_dynamic_test),
+ KUNIT_CASE(alloc_size_vmalloc_const_test),
+ KUNIT_CASE(alloc_size_vmalloc_dynamic_test),
+ KUNIT_CASE(alloc_size_kvmalloc_const_test),
+ KUNIT_CASE(alloc_size_kvmalloc_dynamic_test),
+ KUNIT_CASE(alloc_size_devm_kmalloc_const_test),
+ KUNIT_CASE(alloc_size_devm_kmalloc_dynamic_test),
+ {}
+};
+
+static struct kunit_suite fortify_test_suite = {
+ .name = "fortify",
+ .test_cases = fortify_test_cases,
+};
+
+kunit_test_suite(fortify_test_suite);
+
+MODULE_LICENSE("GPL");
diff --git a/lib/fw_table.c b/lib/fw_table.c
new file mode 100644
index 000000000000..c3569d2ba503
--- /dev/null
+++ b/lib/fw_table.c
@@ -0,0 +1,227 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * fw_tables.c - Parsing support for ACPI and ACPI-like tables provided by
+ * platform or device firmware
+ *
+ * Copyright (C) 2001 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
+ * Copyright (C) 2023 Intel Corp.
+ */
+#include <linux/errno.h>
+#include <linux/acpi.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/fw_table.h>
+
+enum acpi_subtable_type {
+ ACPI_SUBTABLE_COMMON,
+ ACPI_SUBTABLE_HMAT,
+ ACPI_SUBTABLE_PRMT,
+ ACPI_SUBTABLE_CEDT,
+ CDAT_SUBTABLE,
+};
+
+struct acpi_subtable_entry {
+ union acpi_subtable_headers *hdr;
+ enum acpi_subtable_type type;
+};
+
+static unsigned long __init_or_fwtbl_lib
+acpi_get_entry_type(struct acpi_subtable_entry *entry)
+{
+ switch (entry->type) {
+ case ACPI_SUBTABLE_COMMON:
+ return entry->hdr->common.type;
+ case ACPI_SUBTABLE_HMAT:
+ return entry->hdr->hmat.type;
+ case ACPI_SUBTABLE_PRMT:
+ return 0;
+ case ACPI_SUBTABLE_CEDT:
+ return entry->hdr->cedt.type;
+ case CDAT_SUBTABLE:
+ return entry->hdr->cdat.type;
+ }
+ return 0;
+}
+
+static unsigned long __init_or_fwtbl_lib
+acpi_get_entry_length(struct acpi_subtable_entry *entry)
+{
+ switch (entry->type) {
+ case ACPI_SUBTABLE_COMMON:
+ return entry->hdr->common.length;
+ case ACPI_SUBTABLE_HMAT:
+ return entry->hdr->hmat.length;
+ case ACPI_SUBTABLE_PRMT:
+ return entry->hdr->prmt.length;
+ case ACPI_SUBTABLE_CEDT:
+ return entry->hdr->cedt.length;
+ case CDAT_SUBTABLE: {
+ __le16 length = (__force __le16)entry->hdr->cdat.length;
+
+ return le16_to_cpu(length);
+ }
+ }
+ return 0;
+}
+
+static unsigned long __init_or_fwtbl_lib
+acpi_get_subtable_header_length(struct acpi_subtable_entry *entry)
+{
+ switch (entry->type) {
+ case ACPI_SUBTABLE_COMMON:
+ return sizeof(entry->hdr->common);
+ case ACPI_SUBTABLE_HMAT:
+ return sizeof(entry->hdr->hmat);
+ case ACPI_SUBTABLE_PRMT:
+ return sizeof(entry->hdr->prmt);
+ case ACPI_SUBTABLE_CEDT:
+ return sizeof(entry->hdr->cedt);
+ case CDAT_SUBTABLE:
+ return sizeof(entry->hdr->cdat);
+ }
+ return 0;
+}
+
+static enum acpi_subtable_type __init_or_fwtbl_lib
+acpi_get_subtable_type(char *id)
+{
+ if (strncmp(id, ACPI_SIG_HMAT, 4) == 0)
+ return ACPI_SUBTABLE_HMAT;
+ if (strncmp(id, ACPI_SIG_PRMT, 4) == 0)
+ return ACPI_SUBTABLE_PRMT;
+ if (strncmp(id, ACPI_SIG_CEDT, 4) == 0)
+ return ACPI_SUBTABLE_CEDT;
+ if (strncmp(id, ACPI_SIG_CDAT, 4) == 0)
+ return CDAT_SUBTABLE;
+ return ACPI_SUBTABLE_COMMON;
+}
+
+static unsigned long __init_or_fwtbl_lib
+acpi_table_get_length(enum acpi_subtable_type type,
+ union fw_table_header *header)
+{
+ if (type == CDAT_SUBTABLE) {
+ __le32 length = (__force __le32)header->cdat.length;
+
+ return le32_to_cpu(length);
+ }
+
+ return header->acpi.length;
+}
+
+static __init_or_fwtbl_lib int call_handler(struct acpi_subtable_proc *proc,
+ union acpi_subtable_headers *hdr,
+ unsigned long end)
+{
+ if (proc->handler)
+ return proc->handler(hdr, end);
+ if (proc->handler_arg)
+ return proc->handler_arg(hdr, proc->arg, end);
+ return -EINVAL;
+}
+
+/**
+ * acpi_parse_entries_array - for each proc_num find a suitable subtable
+ *
+ * @id: table id (for debugging purposes)
+ * @table_size: size of the root table
+ * @table_header: where does the table start?
+ * @proc: array of acpi_subtable_proc struct containing entry id
+ * and associated handler with it
+ * @proc_num: how big proc is?
+ * @max_entries: how many entries can we process?
+ *
+ * For each proc_num find a subtable with proc->id and run proc->handler
+ * on it. Assumption is that there's only single handler for particular
+ * entry id.
+ *
+ * The table_size is not the size of the complete ACPI table (the length
+ * field in the header struct), but only the size of the root table; i.e.,
+ * the offset from the very first byte of the complete ACPI table, to the
+ * first byte of the very first subtable.
+ *
+ * On success returns sum of all matching entries for all proc handlers.
+ * Otherwise, -ENODEV or -EINVAL is returned.
+ */
+int __init_or_fwtbl_lib
+acpi_parse_entries_array(char *id, unsigned long table_size,
+ union fw_table_header *table_header,
+ struct acpi_subtable_proc *proc,
+ int proc_num, unsigned int max_entries)
+{
+ unsigned long table_end, subtable_len, entry_len;
+ struct acpi_subtable_entry entry;
+ enum acpi_subtable_type type;
+ int count = 0;
+ int i;
+
+ type = acpi_get_subtable_type(id);
+ table_end = (unsigned long)table_header +
+ acpi_table_get_length(type, table_header);
+
+ /* Parse all entries looking for a match. */
+
+ entry.type = type;
+ entry.hdr = (union acpi_subtable_headers *)
+ ((unsigned long)table_header + table_size);
+ subtable_len = acpi_get_subtable_header_length(&entry);
+
+ while (((unsigned long)entry.hdr) + subtable_len < table_end) {
+ for (i = 0; i < proc_num; i++) {
+ if (acpi_get_entry_type(&entry) != proc[i].id)
+ continue;
+
+ if (!max_entries || count < max_entries)
+ if (call_handler(&proc[i], entry.hdr, table_end))
+ return -EINVAL;
+
+ proc[i].count++;
+ count++;
+ break;
+ }
+
+ /*
+ * If entry->length is 0, break from this loop to avoid
+ * infinite loop.
+ */
+ entry_len = acpi_get_entry_length(&entry);
+ if (entry_len == 0) {
+ pr_err("[%4.4s:0x%02x] Invalid zero length\n", id, proc->id);
+ return -EINVAL;
+ }
+
+ entry.hdr = (union acpi_subtable_headers *)
+ ((unsigned long)entry.hdr + entry_len);
+ }
+
+ if (max_entries && count > max_entries) {
+ pr_warn("[%4.4s:0x%02x] ignored %i entries of %i found\n",
+ id, proc->id, count - max_entries, count);
+ }
+
+ return count;
+}
+
+int __init_or_fwtbl_lib
+cdat_table_parse(enum acpi_cdat_type type,
+ acpi_tbl_entry_handler_arg handler_arg,
+ void *arg,
+ struct acpi_table_cdat *table_header)
+{
+ struct acpi_subtable_proc proc = {
+ .id = type,
+ .handler_arg = handler_arg,
+ .arg = arg,
+ };
+
+ if (!table_header)
+ return -EINVAL;
+
+ return acpi_parse_entries_array(ACPI_SIG_CDAT,
+ sizeof(struct acpi_table_cdat),
+ (union fw_table_header *)table_header,
+ &proc, 1, 0);
+}
+EXPORT_SYMBOL_FWTBL_LIB(cdat_table_parse);
diff --git a/lib/gen_crc64table.c b/lib/gen_crc64table.c
index 094b43aef8db..55e222acd0b8 100644
--- a/lib/gen_crc64table.c
+++ b/lib/gen_crc64table.c
@@ -17,10 +17,30 @@
#include <stdio.h>
#define CRC64_ECMA182_POLY 0x42F0E1EBA9EA3693ULL
+#define CRC64_ROCKSOFT_POLY 0x9A6C9329AC4BC9B5ULL
static uint64_t crc64_table[256] = {0};
+static uint64_t crc64_rocksoft_table[256] = {0};
-static void generate_crc64_table(void)
+static void generate_reflected_crc64_table(uint64_t table[256], uint64_t poly)
+{
+ uint64_t i, j, c, crc;
+
+ for (i = 0; i < 256; i++) {
+ crc = 0ULL;
+ c = i;
+
+ for (j = 0; j < 8; j++) {
+ if ((crc ^ (c >> j)) & 1)
+ crc = (crc >> 1) ^ poly;
+ else
+ crc >>= 1;
+ }
+ table[i] = crc;
+ }
+}
+
+static void generate_crc64_table(uint64_t table[256], uint64_t poly)
{
uint64_t i, j, c, crc;
@@ -30,26 +50,22 @@ static void generate_crc64_table(void)
for (j = 0; j < 8; j++) {
if ((crc ^ c) & 0x8000000000000000ULL)
- crc = (crc << 1) ^ CRC64_ECMA182_POLY;
+ crc = (crc << 1) ^ poly;
else
crc <<= 1;
c <<= 1;
}
- crc64_table[i] = crc;
+ table[i] = crc;
}
}
-static void print_crc64_table(void)
+static void output_table(uint64_t table[256])
{
int i;
- printf("/* this file is generated - do not edit */\n\n");
- printf("#include <linux/types.h>\n");
- printf("#include <linux/cache.h>\n\n");
- printf("static const u64 ____cacheline_aligned crc64table[256] = {\n");
for (i = 0; i < 256; i++) {
- printf("\t0x%016" PRIx64 "ULL", crc64_table[i]);
+ printf("\t0x%016" PRIx64 "ULL", table[i]);
if (i & 0x1)
printf(",\n");
else
@@ -58,9 +74,22 @@ static void print_crc64_table(void)
printf("};\n");
}
+static void print_crc64_tables(void)
+{
+ printf("/* this file is generated - do not edit */\n\n");
+ printf("#include <linux/types.h>\n");
+ printf("#include <linux/cache.h>\n\n");
+ printf("static const u64 ____cacheline_aligned crc64table[256] = {\n");
+ output_table(crc64_table);
+
+ printf("\nstatic const u64 ____cacheline_aligned crc64rocksofttable[256] = {\n");
+ output_table(crc64_rocksoft_table);
+}
+
int main(int argc, char *argv[])
{
- generate_crc64_table();
- print_crc64_table();
+ generate_crc64_table(crc64_table, CRC64_ECMA182_POLY);
+ generate_reflected_crc64_table(crc64_rocksoft_table, CRC64_ROCKSOFT_POLY);
+ print_crc64_tables();
return 0;
}
diff --git a/lib/genalloc.c b/lib/genalloc.c
index 5257f74fccf3..4fa5635bf81b 100644
--- a/lib/genalloc.c
+++ b/lib/genalloc.c
@@ -32,7 +32,9 @@
#include <linux/rculist.h>
#include <linux/interrupt.h>
#include <linux/genalloc.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <linux/vmalloc.h>
static inline size_t chunk_size(const struct gen_pool_chunk *chunk)
@@ -40,32 +42,30 @@ static inline size_t chunk_size(const struct gen_pool_chunk *chunk)
return chunk->end_addr - chunk->start_addr + 1;
}
-static int set_bits_ll(unsigned long *addr, unsigned long mask_to_set)
+static inline int
+set_bits_ll(unsigned long *addr, unsigned long mask_to_set)
{
- unsigned long val, nval;
+ unsigned long val = READ_ONCE(*addr);
- nval = *addr;
do {
- val = nval;
if (val & mask_to_set)
return -EBUSY;
cpu_relax();
- } while ((nval = cmpxchg(addr, val, val | mask_to_set)) != val);
+ } while (!try_cmpxchg(addr, &val, val | mask_to_set));
return 0;
}
-static int clear_bits_ll(unsigned long *addr, unsigned long mask_to_clear)
+static inline int
+clear_bits_ll(unsigned long *addr, unsigned long mask_to_clear)
{
- unsigned long val, nval;
+ unsigned long val = READ_ONCE(*addr);
- nval = *addr;
do {
- val = nval;
if ((val & mask_to_clear) != mask_to_clear)
return -EBUSY;
cpu_relax();
- } while ((nval = cmpxchg(addr, val, val & ~mask_to_clear)) != val);
+ } while (!try_cmpxchg(addr, &val, val & ~mask_to_clear));
return 0;
}
@@ -81,14 +81,15 @@ static int clear_bits_ll(unsigned long *addr, unsigned long mask_to_clear)
* users set the same bit, one user will return remain bits, otherwise
* return 0.
*/
-static int bitmap_set_ll(unsigned long *map, int start, int nr)
+static unsigned long
+bitmap_set_ll(unsigned long *map, unsigned long start, unsigned long nr)
{
unsigned long *p = map + BIT_WORD(start);
- const int size = start + nr;
+ const unsigned long size = start + nr;
int bits_to_set = BITS_PER_LONG - (start % BITS_PER_LONG);
unsigned long mask_to_set = BITMAP_FIRST_WORD_MASK(start);
- while (nr - bits_to_set >= 0) {
+ while (nr >= bits_to_set) {
if (set_bits_ll(p, mask_to_set))
return nr;
nr -= bits_to_set;
@@ -116,14 +117,15 @@ static int bitmap_set_ll(unsigned long *map, int start, int nr)
* users clear the same bit, one user will return remain bits,
* otherwise return 0.
*/
-static int bitmap_clear_ll(unsigned long *map, int start, int nr)
+static unsigned long
+bitmap_clear_ll(unsigned long *map, unsigned long start, unsigned long nr)
{
unsigned long *p = map + BIT_WORD(start);
- const int size = start + nr;
+ const unsigned long size = start + nr;
int bits_to_clear = BITS_PER_LONG - (start % BITS_PER_LONG);
unsigned long mask_to_clear = BITMAP_FIRST_WORD_MASK(start);
- while (nr - bits_to_clear >= 0) {
+ while (nr >= bits_to_clear) {
if (clear_bits_ll(p, mask_to_clear))
return nr;
nr -= bits_to_clear;
@@ -183,8 +185,8 @@ int gen_pool_add_owner(struct gen_pool *pool, unsigned long virt, phys_addr_t ph
size_t size, int nid, void *owner)
{
struct gen_pool_chunk *chunk;
- int nbits = size >> pool->min_alloc_order;
- int nbytes = sizeof(struct gen_pool_chunk) +
+ unsigned long nbits = size >> pool->min_alloc_order;
+ unsigned long nbytes = sizeof(struct gen_pool_chunk) +
BITS_TO_LONGS(nbits) * sizeof(long);
chunk = vzalloc_node(nbytes, nid);
@@ -242,14 +244,14 @@ void gen_pool_destroy(struct gen_pool *pool)
struct list_head *_chunk, *_next_chunk;
struct gen_pool_chunk *chunk;
int order = pool->min_alloc_order;
- int bit, end_bit;
+ unsigned long bit, end_bit;
list_for_each_safe(_chunk, _next_chunk, &pool->chunks) {
chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk);
list_del(&chunk->next_chunk);
end_bit = chunk_size(chunk) >> order;
- bit = find_next_bit(chunk->bits, end_bit, 0);
+ bit = find_first_bit(chunk->bits, end_bit);
BUG_ON(bit < end_bit);
vfree(chunk);
@@ -278,7 +280,7 @@ unsigned long gen_pool_alloc_algo_owner(struct gen_pool *pool, size_t size,
struct gen_pool_chunk *chunk;
unsigned long addr = 0;
int order = pool->min_alloc_order;
- int nbits, start_bit, end_bit, remain;
+ unsigned long nbits, start_bit, end_bit, remain;
#ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
BUG_ON(in_nmi());
@@ -327,21 +329,45 @@ EXPORT_SYMBOL(gen_pool_alloc_algo_owner);
* gen_pool_dma_alloc - allocate special memory from the pool for DMA usage
* @pool: pool to allocate from
* @size: number of bytes to allocate from the pool
- * @dma: dma-view physical address return value. Use NULL if unneeded.
+ * @dma: dma-view physical address return value. Use %NULL if unneeded.
*
* Allocate the requested number of bytes from the specified pool.
* Uses the pool allocation function (with first-fit algorithm by default).
* Can not be used in NMI handler on architectures without
* NMI-safe cmpxchg implementation.
+ *
+ * Return: virtual address of the allocated memory, or %NULL on failure
*/
void *gen_pool_dma_alloc(struct gen_pool *pool, size_t size, dma_addr_t *dma)
{
+ return gen_pool_dma_alloc_algo(pool, size, dma, pool->algo, pool->data);
+}
+EXPORT_SYMBOL(gen_pool_dma_alloc);
+
+/**
+ * gen_pool_dma_alloc_algo - allocate special memory from the pool for DMA
+ * usage with the given pool algorithm
+ * @pool: pool to allocate from
+ * @size: number of bytes to allocate from the pool
+ * @dma: DMA-view physical address return value. Use %NULL if unneeded.
+ * @algo: algorithm passed from caller
+ * @data: data passed to algorithm
+ *
+ * Allocate the requested number of bytes from the specified pool. Uses the
+ * given pool allocation function. Can not be used in NMI handler on
+ * architectures without NMI-safe cmpxchg implementation.
+ *
+ * Return: virtual address of the allocated memory, or %NULL on failure
+ */
+void *gen_pool_dma_alloc_algo(struct gen_pool *pool, size_t size,
+ dma_addr_t *dma, genpool_algo_t algo, void *data)
+{
unsigned long vaddr;
if (!pool)
return NULL;
- vaddr = gen_pool_alloc(pool, size);
+ vaddr = gen_pool_alloc_algo(pool, size, algo, data);
if (!vaddr)
return NULL;
@@ -350,10 +376,105 @@ void *gen_pool_dma_alloc(struct gen_pool *pool, size_t size, dma_addr_t *dma)
return (void *)vaddr;
}
-EXPORT_SYMBOL(gen_pool_dma_alloc);
+EXPORT_SYMBOL(gen_pool_dma_alloc_algo);
+
+/**
+ * gen_pool_dma_alloc_align - allocate special memory from the pool for DMA
+ * usage with the given alignment
+ * @pool: pool to allocate from
+ * @size: number of bytes to allocate from the pool
+ * @dma: DMA-view physical address return value. Use %NULL if unneeded.
+ * @align: alignment in bytes for starting address
+ *
+ * Allocate the requested number bytes from the specified pool, with the given
+ * alignment restriction. Can not be used in NMI handler on architectures
+ * without NMI-safe cmpxchg implementation.
+ *
+ * Return: virtual address of the allocated memory, or %NULL on failure
+ */
+void *gen_pool_dma_alloc_align(struct gen_pool *pool, size_t size,
+ dma_addr_t *dma, int align)
+{
+ struct genpool_data_align data = { .align = align };
+
+ return gen_pool_dma_alloc_algo(pool, size, dma,
+ gen_pool_first_fit_align, &data);
+}
+EXPORT_SYMBOL(gen_pool_dma_alloc_align);
+
+/**
+ * gen_pool_dma_zalloc - allocate special zeroed memory from the pool for
+ * DMA usage
+ * @pool: pool to allocate from
+ * @size: number of bytes to allocate from the pool
+ * @dma: dma-view physical address return value. Use %NULL if unneeded.
+ *
+ * Allocate the requested number of zeroed bytes from the specified pool.
+ * Uses the pool allocation function (with first-fit algorithm by default).
+ * Can not be used in NMI handler on architectures without
+ * NMI-safe cmpxchg implementation.
+ *
+ * Return: virtual address of the allocated zeroed memory, or %NULL on failure
+ */
+void *gen_pool_dma_zalloc(struct gen_pool *pool, size_t size, dma_addr_t *dma)
+{
+ return gen_pool_dma_zalloc_algo(pool, size, dma, pool->algo, pool->data);
+}
+EXPORT_SYMBOL(gen_pool_dma_zalloc);
+
+/**
+ * gen_pool_dma_zalloc_algo - allocate special zeroed memory from the pool for
+ * DMA usage with the given pool algorithm
+ * @pool: pool to allocate from
+ * @size: number of bytes to allocate from the pool
+ * @dma: DMA-view physical address return value. Use %NULL if unneeded.
+ * @algo: algorithm passed from caller
+ * @data: data passed to algorithm
+ *
+ * Allocate the requested number of zeroed bytes from the specified pool. Uses
+ * the given pool allocation function. Can not be used in NMI handler on
+ * architectures without NMI-safe cmpxchg implementation.
+ *
+ * Return: virtual address of the allocated zeroed memory, or %NULL on failure
+ */
+void *gen_pool_dma_zalloc_algo(struct gen_pool *pool, size_t size,
+ dma_addr_t *dma, genpool_algo_t algo, void *data)
+{
+ void *vaddr = gen_pool_dma_alloc_algo(pool, size, dma, algo, data);
+
+ if (vaddr)
+ memset(vaddr, 0, size);
+
+ return vaddr;
+}
+EXPORT_SYMBOL(gen_pool_dma_zalloc_algo);
+
+/**
+ * gen_pool_dma_zalloc_align - allocate special zeroed memory from the pool for
+ * DMA usage with the given alignment
+ * @pool: pool to allocate from
+ * @size: number of bytes to allocate from the pool
+ * @dma: DMA-view physical address return value. Use %NULL if unneeded.
+ * @align: alignment in bytes for starting address
+ *
+ * Allocate the requested number of zeroed bytes from the specified pool,
+ * with the given alignment restriction. Can not be used in NMI handler on
+ * architectures without NMI-safe cmpxchg implementation.
+ *
+ * Return: virtual address of the allocated zeroed memory, or %NULL on failure
+ */
+void *gen_pool_dma_zalloc_align(struct gen_pool *pool, size_t size,
+ dma_addr_t *dma, int align)
+{
+ struct genpool_data_align data = { .align = align };
+
+ return gen_pool_dma_zalloc_algo(pool, size, dma,
+ gen_pool_first_fit_align, &data);
+}
+EXPORT_SYMBOL(gen_pool_dma_zalloc_align);
/**
- * gen_pool_free - free allocated special memory back to the pool
+ * gen_pool_free_owner - free allocated special memory back to the pool
* @pool: pool to free to
* @addr: starting address of memory to free back to pool
* @size: size in bytes of memory to free
@@ -368,7 +489,7 @@ void gen_pool_free_owner(struct gen_pool *pool, unsigned long addr, size_t size,
{
struct gen_pool_chunk *chunk;
int order = pool->min_alloc_order;
- int start_bit, nbits, remain;
+ unsigned long start_bit, nbits, remain;
#ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
BUG_ON(in_nmi());
@@ -421,7 +542,7 @@ void gen_pool_for_each_chunk(struct gen_pool *pool,
EXPORT_SYMBOL(gen_pool_for_each_chunk);
/**
- * addr_in_gen_pool - checks if an address falls within the range of a pool
+ * gen_pool_has_addr - checks if an address falls within the range of a pool
* @pool: the generic memory pool
* @start: start address
* @size: size of the region
@@ -429,7 +550,7 @@ EXPORT_SYMBOL(gen_pool_for_each_chunk);
* Check if the range of addresses falls within the specified pool. Returns
* true if the entire range is contained in the pool and false otherwise.
*/
-bool addr_in_gen_pool(struct gen_pool *pool, unsigned long start,
+bool gen_pool_has_addr(struct gen_pool *pool, unsigned long start,
size_t size)
{
bool found = false;
@@ -448,6 +569,7 @@ bool addr_in_gen_pool(struct gen_pool *pool, unsigned long start,
rcu_read_unlock();
return found;
}
+EXPORT_SYMBOL(gen_pool_has_addr);
/**
* gen_pool_avail - get available free space of the pool
@@ -520,6 +642,7 @@ EXPORT_SYMBOL(gen_pool_set_algo);
* @nr: The number of zeroed bits we're looking for
* @data: additional data - unused
* @pool: pool to find the fit region memory from
+ * @start_addr: not used in this function
*/
unsigned long gen_pool_first_fit(unsigned long *map, unsigned long size,
unsigned long start, unsigned int nr, void *data,
@@ -538,6 +661,7 @@ EXPORT_SYMBOL(gen_pool_first_fit);
* @nr: The number of zeroed bits we're looking for
* @data: data for alignment
* @pool: pool to get order from
+ * @start_addr: start addr of alloction chunk
*/
unsigned long gen_pool_first_fit_align(unsigned long *map, unsigned long size,
unsigned long start, unsigned int nr, void *data,
@@ -565,6 +689,7 @@ EXPORT_SYMBOL(gen_pool_first_fit_align);
* @nr: The number of zeroed bits we're looking for
* @data: data for alignment
* @pool: pool to get order from
+ * @start_addr: not used in this function
*/
unsigned long gen_pool_fixed_alloc(unsigned long *map, unsigned long size,
unsigned long start, unsigned int nr, void *data,
@@ -599,6 +724,7 @@ EXPORT_SYMBOL(gen_pool_fixed_alloc);
* @nr: The number of zeroed bits we're looking for
* @data: additional data - unused
* @pool: pool to find the fit region memory from
+ * @start_addr: not used in this function
*/
unsigned long gen_pool_first_fit_order_align(unsigned long *map,
unsigned long size, unsigned long start,
@@ -613,13 +739,14 @@ EXPORT_SYMBOL(gen_pool_first_fit_order_align);
/**
* gen_pool_best_fit - find the best fitting region of memory
- * macthing the size requirement (no alignment constraint)
+ * matching the size requirement (no alignment constraint)
* @map: The address to base the search on
* @size: The bitmap size in bits
* @start: The bitnumber to start searching at
* @nr: The number of zeroed bits we're looking for
* @data: additional data - unused
* @pool: pool to find the fit region memory from
+ * @start_addr: not used in this function
*
* Iterate over the bitmap to find the smallest free region
* which we can allocate the memory.
@@ -635,7 +762,7 @@ unsigned long gen_pool_best_fit(unsigned long *map, unsigned long size,
index = bitmap_find_next_zero_area(map, size, start, nr, 0);
while (index < size) {
- int next_bit = find_next_bit(map, size, index + nr);
+ unsigned long next_bit = find_next_bit(map, size, index + nr);
if ((next_bit - index) < len) {
len = next_bit - index;
start_bit = index;
@@ -770,7 +897,7 @@ struct gen_pool *of_gen_pool_get(struct device_node *np,
of_property_read_string(np_pool, "label", &name);
if (!name)
- name = np_pool->name;
+ name = of_node_full_name(np_pool);
}
if (pdev)
pool = gen_pool_get(&pdev->dev, name);
diff --git a/lib/generic-radix-tree.c b/lib/generic-radix-tree.c
index a7bafc413730..41f1bcdc4488 100644
--- a/lib/generic-radix-tree.c
+++ b/lib/generic-radix-tree.c
@@ -1,7 +1,9 @@
+#include <linux/atomic.h>
#include <linux/export.h>
#include <linux/generic-radix-tree.h>
#include <linux/gfp.h>
+#include <linux/kmemleak.h>
#define GENRADIX_ARY (PAGE_SIZE / sizeof(struct genradix_node *))
#define GENRADIX_ARY_SHIFT ilog2(GENRADIX_ARY)
@@ -36,12 +38,12 @@ static inline size_t genradix_depth_size(unsigned depth)
#define GENRADIX_DEPTH_MASK \
((unsigned long) (roundup_pow_of_two(GENRADIX_MAX_DEPTH + 1) - 1))
-unsigned genradix_root_to_depth(struct genradix_root *r)
+static inline unsigned genradix_root_to_depth(struct genradix_root *r)
{
return (unsigned long) r & GENRADIX_DEPTH_MASK;
}
-struct genradix_node *genradix_root_to_node(struct genradix_root *r)
+static inline struct genradix_node *genradix_root_to_node(struct genradix_root *r)
{
return (void *) ((unsigned long) r & ~GENRADIX_DEPTH_MASK);
}
@@ -75,6 +77,27 @@ void *__genradix_ptr(struct __genradix *radix, size_t offset)
}
EXPORT_SYMBOL(__genradix_ptr);
+static inline struct genradix_node *genradix_alloc_node(gfp_t gfp_mask)
+{
+ struct genradix_node *node;
+
+ node = (struct genradix_node *)__get_free_page(gfp_mask|__GFP_ZERO);
+
+ /*
+ * We're using pages (not slab allocations) directly for kernel data
+ * structures, so we need to explicitly inform kmemleak of them in order
+ * to avoid false positive memory leak reports.
+ */
+ kmemleak_alloc(node, PAGE_SIZE, 1, gfp_mask);
+ return node;
+}
+
+static inline void genradix_free_node(struct genradix_node *node)
+{
+ kmemleak_free(node);
+ free_page((unsigned long)node);
+}
+
/*
* Returns pointer to the specified byte @offset within @radix, allocating it if
* necessary - newly allocated slots are always zeroed out:
@@ -97,8 +120,7 @@ void *__genradix_ptr_alloc(struct __genradix *radix, size_t offset,
break;
if (!new_node) {
- new_node = (void *)
- __get_free_page(gfp_mask|__GFP_ZERO);
+ new_node = genradix_alloc_node(gfp_mask);
if (!new_node)
return NULL;
}
@@ -121,8 +143,7 @@ void *__genradix_ptr_alloc(struct __genradix *radix, size_t offset,
n = READ_ONCE(*p);
if (!n) {
if (!new_node) {
- new_node = (void *)
- __get_free_page(gfp_mask|__GFP_ZERO);
+ new_node = genradix_alloc_node(gfp_mask);
if (!new_node)
return NULL;
}
@@ -133,7 +154,7 @@ void *__genradix_ptr_alloc(struct __genradix *radix, size_t offset,
}
if (new_node)
- free_page((unsigned long) new_node);
+ genradix_free_node(new_node);
return &n->data[offset];
}
@@ -146,6 +167,10 @@ void *__genradix_iter_peek(struct genradix_iter *iter,
struct genradix_root *r;
struct genradix_node *n;
unsigned level, i;
+
+ if (iter->offset == SIZE_MAX)
+ return NULL;
+
restart:
r = READ_ONCE(radix->root);
if (!r)
@@ -164,10 +189,17 @@ restart:
(GENRADIX_ARY - 1);
while (!n->children[i]) {
+ size_t objs_per_ptr = genradix_depth_size(level);
+
+ if (iter->offset + objs_per_ptr < iter->offset) {
+ iter->offset = SIZE_MAX;
+ iter->pos = SIZE_MAX;
+ return NULL;
+ }
+
i++;
- iter->offset = round_down(iter->offset +
- genradix_depth_size(level),
- genradix_depth_size(level));
+ iter->offset = round_down(iter->offset + objs_per_ptr,
+ objs_per_ptr);
iter->pos = (iter->offset >> PAGE_SHIFT) *
objs_per_page;
if (i == GENRADIX_ARY)
@@ -181,6 +213,64 @@ restart:
}
EXPORT_SYMBOL(__genradix_iter_peek);
+void *__genradix_iter_peek_prev(struct genradix_iter *iter,
+ struct __genradix *radix,
+ size_t objs_per_page,
+ size_t obj_size_plus_page_remainder)
+{
+ struct genradix_root *r;
+ struct genradix_node *n;
+ unsigned level, i;
+
+ if (iter->offset == SIZE_MAX)
+ return NULL;
+
+restart:
+ r = READ_ONCE(radix->root);
+ if (!r)
+ return NULL;
+
+ n = genradix_root_to_node(r);
+ level = genradix_root_to_depth(r);
+
+ if (ilog2(iter->offset) >= genradix_depth_shift(level)) {
+ iter->offset = genradix_depth_size(level);
+ iter->pos = (iter->offset >> PAGE_SHIFT) * objs_per_page;
+
+ iter->offset -= obj_size_plus_page_remainder;
+ iter->pos--;
+ }
+
+ while (level) {
+ level--;
+
+ i = (iter->offset >> genradix_depth_shift(level)) &
+ (GENRADIX_ARY - 1);
+
+ while (!n->children[i]) {
+ size_t objs_per_ptr = genradix_depth_size(level);
+
+ iter->offset = round_down(iter->offset, objs_per_ptr);
+ iter->pos = (iter->offset >> PAGE_SHIFT) * objs_per_page;
+
+ if (!iter->offset)
+ return NULL;
+
+ iter->offset -= obj_size_plus_page_remainder;
+ iter->pos--;
+
+ if (!i)
+ goto restart;
+ --i;
+ }
+
+ n = n->children[i];
+ }
+
+ return &n->data[iter->offset & (PAGE_SIZE - 1)];
+}
+EXPORT_SYMBOL(__genradix_iter_peek_prev);
+
static void genradix_free_recurse(struct genradix_node *n, unsigned level)
{
if (level) {
@@ -191,7 +281,7 @@ static void genradix_free_recurse(struct genradix_node *n, unsigned level)
genradix_free_recurse(n->children[i], level - 1);
}
- free_page((unsigned long) n);
+ genradix_free_node(n);
}
int __genradix_prealloc(struct __genradix *radix, size_t size,
diff --git a/lib/glob.c b/lib/glob.c
index 0ba3ea86b546..15b73f490720 100644
--- a/lib/glob.c
+++ b/lib/glob.c
@@ -45,7 +45,7 @@ bool __pure glob_match(char const *pat, char const *str)
* (no exception for /), it can be easily proved that there's
* never a need to backtrack multiple levels.
*/
- char const *back_pat = NULL, *back_str = back_str;
+ char const *back_pat = NULL, *back_str;
/*
* Loop over each token (character or class) in pat, matching
@@ -102,7 +102,7 @@ bool __pure glob_match(char const *pat, char const *str)
break;
case '\\':
d = *pat++;
- /*FALLTHROUGH*/
+ fallthrough;
default: /* Literal character */
literal:
if (c == d) {
diff --git a/lib/group_cpus.c b/lib/group_cpus.c
new file mode 100644
index 000000000000..ee272c4cefcc
--- /dev/null
+++ b/lib/group_cpus.c
@@ -0,0 +1,439 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2016 Thomas Gleixner.
+ * Copyright (C) 2016-2017 Christoph Hellwig.
+ */
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/cpu.h>
+#include <linux/sort.h>
+#include <linux/group_cpus.h>
+
+#ifdef CONFIG_SMP
+
+static void grp_spread_init_one(struct cpumask *irqmsk, struct cpumask *nmsk,
+ unsigned int cpus_per_grp)
+{
+ const struct cpumask *siblmsk;
+ int cpu, sibl;
+
+ for ( ; cpus_per_grp > 0; ) {
+ cpu = cpumask_first(nmsk);
+
+ /* Should not happen, but I'm too lazy to think about it */
+ if (cpu >= nr_cpu_ids)
+ return;
+
+ cpumask_clear_cpu(cpu, nmsk);
+ cpumask_set_cpu(cpu, irqmsk);
+ cpus_per_grp--;
+
+ /* If the cpu has siblings, use them first */
+ siblmsk = topology_sibling_cpumask(cpu);
+ for (sibl = -1; cpus_per_grp > 0; ) {
+ sibl = cpumask_next(sibl, siblmsk);
+ if (sibl >= nr_cpu_ids)
+ break;
+ if (!cpumask_test_and_clear_cpu(sibl, nmsk))
+ continue;
+ cpumask_set_cpu(sibl, irqmsk);
+ cpus_per_grp--;
+ }
+ }
+}
+
+static cpumask_var_t *alloc_node_to_cpumask(void)
+{
+ cpumask_var_t *masks;
+ int node;
+
+ masks = kcalloc(nr_node_ids, sizeof(cpumask_var_t), GFP_KERNEL);
+ if (!masks)
+ return NULL;
+
+ for (node = 0; node < nr_node_ids; node++) {
+ if (!zalloc_cpumask_var(&masks[node], GFP_KERNEL))
+ goto out_unwind;
+ }
+
+ return masks;
+
+out_unwind:
+ while (--node >= 0)
+ free_cpumask_var(masks[node]);
+ kfree(masks);
+ return NULL;
+}
+
+static void free_node_to_cpumask(cpumask_var_t *masks)
+{
+ int node;
+
+ for (node = 0; node < nr_node_ids; node++)
+ free_cpumask_var(masks[node]);
+ kfree(masks);
+}
+
+static void build_node_to_cpumask(cpumask_var_t *masks)
+{
+ int cpu;
+
+ for_each_possible_cpu(cpu)
+ cpumask_set_cpu(cpu, masks[cpu_to_node(cpu)]);
+}
+
+static int get_nodes_in_cpumask(cpumask_var_t *node_to_cpumask,
+ const struct cpumask *mask, nodemask_t *nodemsk)
+{
+ int n, nodes = 0;
+
+ /* Calculate the number of nodes in the supplied affinity mask */
+ for_each_node(n) {
+ if (cpumask_intersects(mask, node_to_cpumask[n])) {
+ node_set(n, *nodemsk);
+ nodes++;
+ }
+ }
+ return nodes;
+}
+
+struct node_groups {
+ unsigned id;
+
+ union {
+ unsigned ngroups;
+ unsigned ncpus;
+ };
+};
+
+static int ncpus_cmp_func(const void *l, const void *r)
+{
+ const struct node_groups *ln = l;
+ const struct node_groups *rn = r;
+
+ return ln->ncpus - rn->ncpus;
+}
+
+/*
+ * Allocate group number for each node, so that for each node:
+ *
+ * 1) the allocated number is >= 1
+ *
+ * 2) the allocated number is <= active CPU number of this node
+ *
+ * The actual allocated total groups may be less than @numgrps when
+ * active total CPU number is less than @numgrps.
+ *
+ * Active CPUs means the CPUs in '@cpu_mask AND @node_to_cpumask[]'
+ * for each node.
+ */
+static void alloc_nodes_groups(unsigned int numgrps,
+ cpumask_var_t *node_to_cpumask,
+ const struct cpumask *cpu_mask,
+ const nodemask_t nodemsk,
+ struct cpumask *nmsk,
+ struct node_groups *node_groups)
+{
+ unsigned n, remaining_ncpus = 0;
+
+ for (n = 0; n < nr_node_ids; n++) {
+ node_groups[n].id = n;
+ node_groups[n].ncpus = UINT_MAX;
+ }
+
+ for_each_node_mask(n, nodemsk) {
+ unsigned ncpus;
+
+ cpumask_and(nmsk, cpu_mask, node_to_cpumask[n]);
+ ncpus = cpumask_weight(nmsk);
+
+ if (!ncpus)
+ continue;
+ remaining_ncpus += ncpus;
+ node_groups[n].ncpus = ncpus;
+ }
+
+ numgrps = min_t(unsigned, remaining_ncpus, numgrps);
+
+ sort(node_groups, nr_node_ids, sizeof(node_groups[0]),
+ ncpus_cmp_func, NULL);
+
+ /*
+ * Allocate groups for each node according to the ratio of this
+ * node's nr_cpus to remaining un-assigned ncpus. 'numgrps' is
+ * bigger than number of active numa nodes. Always start the
+ * allocation from the node with minimized nr_cpus.
+ *
+ * This way guarantees that each active node gets allocated at
+ * least one group, and the theory is simple: over-allocation
+ * is only done when this node is assigned by one group, so
+ * other nodes will be allocated >= 1 groups, since 'numgrps' is
+ * bigger than number of numa nodes.
+ *
+ * One perfect invariant is that number of allocated groups for
+ * each node is <= CPU count of this node:
+ *
+ * 1) suppose there are two nodes: A and B
+ * ncpu(X) is CPU count of node X
+ * grps(X) is the group count allocated to node X via this
+ * algorithm
+ *
+ * ncpu(A) <= ncpu(B)
+ * ncpu(A) + ncpu(B) = N
+ * grps(A) + grps(B) = G
+ *
+ * grps(A) = max(1, round_down(G * ncpu(A) / N))
+ * grps(B) = G - grps(A)
+ *
+ * both N and G are integer, and 2 <= G <= N, suppose
+ * G = N - delta, and 0 <= delta <= N - 2
+ *
+ * 2) obviously grps(A) <= ncpu(A) because:
+ *
+ * if grps(A) is 1, then grps(A) <= ncpu(A) given
+ * ncpu(A) >= 1
+ *
+ * otherwise,
+ * grps(A) <= G * ncpu(A) / N <= ncpu(A), given G <= N
+ *
+ * 3) prove how grps(B) <= ncpu(B):
+ *
+ * if round_down(G * ncpu(A) / N) == 0, vecs(B) won't be
+ * over-allocated, so grps(B) <= ncpu(B),
+ *
+ * otherwise:
+ *
+ * grps(A) =
+ * round_down(G * ncpu(A) / N) =
+ * round_down((N - delta) * ncpu(A) / N) =
+ * round_down((N * ncpu(A) - delta * ncpu(A)) / N) >=
+ * round_down((N * ncpu(A) - delta * N) / N) =
+ * cpu(A) - delta
+ *
+ * then:
+ *
+ * grps(A) - G >= ncpu(A) - delta - G
+ * =>
+ * G - grps(A) <= G + delta - ncpu(A)
+ * =>
+ * grps(B) <= N - ncpu(A)
+ * =>
+ * grps(B) <= cpu(B)
+ *
+ * For nodes >= 3, it can be thought as one node and another big
+ * node given that is exactly what this algorithm is implemented,
+ * and we always re-calculate 'remaining_ncpus' & 'numgrps', and
+ * finally for each node X: grps(X) <= ncpu(X).
+ *
+ */
+ for (n = 0; n < nr_node_ids; n++) {
+ unsigned ngroups, ncpus;
+
+ if (node_groups[n].ncpus == UINT_MAX)
+ continue;
+
+ WARN_ON_ONCE(numgrps == 0);
+
+ ncpus = node_groups[n].ncpus;
+ ngroups = max_t(unsigned, 1,
+ numgrps * ncpus / remaining_ncpus);
+ WARN_ON_ONCE(ngroups > ncpus);
+
+ node_groups[n].ngroups = ngroups;
+
+ remaining_ncpus -= ncpus;
+ numgrps -= ngroups;
+ }
+}
+
+static int __group_cpus_evenly(unsigned int startgrp, unsigned int numgrps,
+ cpumask_var_t *node_to_cpumask,
+ const struct cpumask *cpu_mask,
+ struct cpumask *nmsk, struct cpumask *masks)
+{
+ unsigned int i, n, nodes, cpus_per_grp, extra_grps, done = 0;
+ unsigned int last_grp = numgrps;
+ unsigned int curgrp = startgrp;
+ nodemask_t nodemsk = NODE_MASK_NONE;
+ struct node_groups *node_groups;
+
+ if (cpumask_empty(cpu_mask))
+ return 0;
+
+ nodes = get_nodes_in_cpumask(node_to_cpumask, cpu_mask, &nodemsk);
+
+ /*
+ * If the number of nodes in the mask is greater than or equal the
+ * number of groups we just spread the groups across the nodes.
+ */
+ if (numgrps <= nodes) {
+ for_each_node_mask(n, nodemsk) {
+ /* Ensure that only CPUs which are in both masks are set */
+ cpumask_and(nmsk, cpu_mask, node_to_cpumask[n]);
+ cpumask_or(&masks[curgrp], &masks[curgrp], nmsk);
+ if (++curgrp == last_grp)
+ curgrp = 0;
+ }
+ return numgrps;
+ }
+
+ node_groups = kcalloc(nr_node_ids,
+ sizeof(struct node_groups),
+ GFP_KERNEL);
+ if (!node_groups)
+ return -ENOMEM;
+
+ /* allocate group number for each node */
+ alloc_nodes_groups(numgrps, node_to_cpumask, cpu_mask,
+ nodemsk, nmsk, node_groups);
+ for (i = 0; i < nr_node_ids; i++) {
+ unsigned int ncpus, v;
+ struct node_groups *nv = &node_groups[i];
+
+ if (nv->ngroups == UINT_MAX)
+ continue;
+
+ /* Get the cpus on this node which are in the mask */
+ cpumask_and(nmsk, cpu_mask, node_to_cpumask[nv->id]);
+ ncpus = cpumask_weight(nmsk);
+ if (!ncpus)
+ continue;
+
+ WARN_ON_ONCE(nv->ngroups > ncpus);
+
+ /* Account for rounding errors */
+ extra_grps = ncpus - nv->ngroups * (ncpus / nv->ngroups);
+
+ /* Spread allocated groups on CPUs of the current node */
+ for (v = 0; v < nv->ngroups; v++, curgrp++) {
+ cpus_per_grp = ncpus / nv->ngroups;
+
+ /* Account for extra groups to compensate rounding errors */
+ if (extra_grps) {
+ cpus_per_grp++;
+ --extra_grps;
+ }
+
+ /*
+ * wrapping has to be considered given 'startgrp'
+ * may start anywhere
+ */
+ if (curgrp >= last_grp)
+ curgrp = 0;
+ grp_spread_init_one(&masks[curgrp], nmsk,
+ cpus_per_grp);
+ }
+ done += nv->ngroups;
+ }
+ kfree(node_groups);
+ return done;
+}
+
+/**
+ * group_cpus_evenly - Group all CPUs evenly per NUMA/CPU locality
+ * @numgrps: number of groups
+ *
+ * Return: cpumask array if successful, NULL otherwise. And each element
+ * includes CPUs assigned to this group
+ *
+ * Try to put close CPUs from viewpoint of CPU and NUMA locality into
+ * same group, and run two-stage grouping:
+ * 1) allocate present CPUs on these groups evenly first
+ * 2) allocate other possible CPUs on these groups evenly
+ *
+ * We guarantee in the resulted grouping that all CPUs are covered, and
+ * no same CPU is assigned to multiple groups
+ */
+struct cpumask *group_cpus_evenly(unsigned int numgrps)
+{
+ unsigned int curgrp = 0, nr_present = 0, nr_others = 0;
+ cpumask_var_t *node_to_cpumask;
+ cpumask_var_t nmsk, npresmsk;
+ int ret = -ENOMEM;
+ struct cpumask *masks = NULL;
+
+ if (!zalloc_cpumask_var(&nmsk, GFP_KERNEL))
+ return NULL;
+
+ if (!zalloc_cpumask_var(&npresmsk, GFP_KERNEL))
+ goto fail_nmsk;
+
+ node_to_cpumask = alloc_node_to_cpumask();
+ if (!node_to_cpumask)
+ goto fail_npresmsk;
+
+ masks = kcalloc(numgrps, sizeof(*masks), GFP_KERNEL);
+ if (!masks)
+ goto fail_node_to_cpumask;
+
+ build_node_to_cpumask(node_to_cpumask);
+
+ /*
+ * Make a local cache of 'cpu_present_mask', so the two stages
+ * spread can observe consistent 'cpu_present_mask' without holding
+ * cpu hotplug lock, then we can reduce deadlock risk with cpu
+ * hotplug code.
+ *
+ * Here CPU hotplug may happen when reading `cpu_present_mask`, and
+ * we can live with the case because it only affects that hotplug
+ * CPU is handled in the 1st or 2nd stage, and either way is correct
+ * from API user viewpoint since 2-stage spread is sort of
+ * optimization.
+ */
+ cpumask_copy(npresmsk, data_race(cpu_present_mask));
+
+ /* grouping present CPUs first */
+ ret = __group_cpus_evenly(curgrp, numgrps, node_to_cpumask,
+ npresmsk, nmsk, masks);
+ if (ret < 0)
+ goto fail_build_affinity;
+ nr_present = ret;
+
+ /*
+ * Allocate non present CPUs starting from the next group to be
+ * handled. If the grouping of present CPUs already exhausted the
+ * group space, assign the non present CPUs to the already
+ * allocated out groups.
+ */
+ if (nr_present >= numgrps)
+ curgrp = 0;
+ else
+ curgrp = nr_present;
+ cpumask_andnot(npresmsk, cpu_possible_mask, npresmsk);
+ ret = __group_cpus_evenly(curgrp, numgrps, node_to_cpumask,
+ npresmsk, nmsk, masks);
+ if (ret >= 0)
+ nr_others = ret;
+
+ fail_build_affinity:
+ if (ret >= 0)
+ WARN_ON(nr_present + nr_others < numgrps);
+
+ fail_node_to_cpumask:
+ free_node_to_cpumask(node_to_cpumask);
+
+ fail_npresmsk:
+ free_cpumask_var(npresmsk);
+
+ fail_nmsk:
+ free_cpumask_var(nmsk);
+ if (ret < 0) {
+ kfree(masks);
+ return NULL;
+ }
+ return masks;
+}
+#else /* CONFIG_SMP */
+struct cpumask *group_cpus_evenly(unsigned int numgrps)
+{
+ struct cpumask *masks = kcalloc(numgrps, sizeof(*masks), GFP_KERNEL);
+
+ if (!masks)
+ return NULL;
+
+ /* assign all CPUs(cpu 0) to the 1st group only */
+ cpumask_copy(&masks[0], cpu_possible_mask);
+ return masks;
+}
+#endif /* CONFIG_SMP */
+EXPORT_SYMBOL_GPL(group_cpus_evenly);
diff --git a/lib/hashtable_test.c b/lib/hashtable_test.c
new file mode 100644
index 000000000000..1d1b3288dee2
--- /dev/null
+++ b/lib/hashtable_test.c
@@ -0,0 +1,317 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * KUnit test for the Kernel Hashtable structures.
+ *
+ * Copyright (C) 2022, Google LLC.
+ * Author: Rae Moar <rmoar@google.com>
+ */
+#include <kunit/test.h>
+
+#include <linux/hashtable.h>
+
+struct hashtable_test_entry {
+ int key;
+ int data;
+ struct hlist_node node;
+ int visited;
+};
+
+static void hashtable_test_hash_init(struct kunit *test)
+{
+ /* Test the different ways of initialising a hashtable. */
+ DEFINE_HASHTABLE(hash1, 2);
+ DECLARE_HASHTABLE(hash2, 3);
+
+ /* When using DECLARE_HASHTABLE, must use hash_init to
+ * initialize the hashtable.
+ */
+ hash_init(hash2);
+
+ KUNIT_EXPECT_TRUE(test, hash_empty(hash1));
+ KUNIT_EXPECT_TRUE(test, hash_empty(hash2));
+}
+
+static void hashtable_test_hash_empty(struct kunit *test)
+{
+ struct hashtable_test_entry a;
+ DEFINE_HASHTABLE(hash, 1);
+
+ KUNIT_EXPECT_TRUE(test, hash_empty(hash));
+
+ a.key = 1;
+ a.data = 13;
+ hash_add(hash, &a.node, a.key);
+
+ /* Hashtable should no longer be empty. */
+ KUNIT_EXPECT_FALSE(test, hash_empty(hash));
+}
+
+static void hashtable_test_hash_hashed(struct kunit *test)
+{
+ struct hashtable_test_entry a, b;
+ DEFINE_HASHTABLE(hash, 4);
+
+ a.key = 1;
+ a.data = 13;
+ hash_add(hash, &a.node, a.key);
+ b.key = 1;
+ b.data = 2;
+ hash_add(hash, &b.node, b.key);
+
+ KUNIT_EXPECT_TRUE(test, hash_hashed(&a.node));
+ KUNIT_EXPECT_TRUE(test, hash_hashed(&b.node));
+}
+
+static void hashtable_test_hash_add(struct kunit *test)
+{
+ struct hashtable_test_entry a, b, *x;
+ int bkt;
+ DEFINE_HASHTABLE(hash, 3);
+
+ a.key = 1;
+ a.data = 13;
+ a.visited = 0;
+ hash_add(hash, &a.node, a.key);
+ b.key = 2;
+ b.data = 10;
+ b.visited = 0;
+ hash_add(hash, &b.node, b.key);
+
+ hash_for_each(hash, bkt, x, node) {
+ x->visited++;
+ if (x->key == a.key)
+ KUNIT_EXPECT_EQ(test, x->data, 13);
+ else if (x->key == b.key)
+ KUNIT_EXPECT_EQ(test, x->data, 10);
+ else
+ KUNIT_FAIL(test, "Unexpected key in hashtable.");
+ }
+
+ /* Both entries should have been visited exactly once. */
+ KUNIT_EXPECT_EQ(test, a.visited, 1);
+ KUNIT_EXPECT_EQ(test, b.visited, 1);
+}
+
+static void hashtable_test_hash_del(struct kunit *test)
+{
+ struct hashtable_test_entry a, b, *x;
+ DEFINE_HASHTABLE(hash, 6);
+
+ a.key = 1;
+ a.data = 13;
+ hash_add(hash, &a.node, a.key);
+ b.key = 2;
+ b.data = 10;
+ b.visited = 0;
+ hash_add(hash, &b.node, b.key);
+
+ hash_del(&b.node);
+ hash_for_each_possible(hash, x, node, b.key) {
+ x->visited++;
+ KUNIT_EXPECT_NE(test, x->key, b.key);
+ }
+
+ /* The deleted entry should not have been visited. */
+ KUNIT_EXPECT_EQ(test, b.visited, 0);
+
+ hash_del(&a.node);
+
+ /* The hashtable should be empty. */
+ KUNIT_EXPECT_TRUE(test, hash_empty(hash));
+}
+
+static void hashtable_test_hash_for_each(struct kunit *test)
+{
+ struct hashtable_test_entry entries[3];
+ struct hashtable_test_entry *x;
+ int bkt, i, j, count;
+ DEFINE_HASHTABLE(hash, 3);
+
+ /* Add three entries to the hashtable. */
+ for (i = 0; i < 3; i++) {
+ entries[i].key = i;
+ entries[i].data = i + 10;
+ entries[i].visited = 0;
+ hash_add(hash, &entries[i].node, entries[i].key);
+ }
+
+ count = 0;
+ hash_for_each(hash, bkt, x, node) {
+ x->visited += 1;
+ KUNIT_ASSERT_GE_MSG(test, x->key, 0, "Unexpected key in hashtable.");
+ KUNIT_ASSERT_LT_MSG(test, x->key, 3, "Unexpected key in hashtable.");
+ count++;
+ }
+
+ /* Should have visited each entry exactly once. */
+ KUNIT_EXPECT_EQ(test, count, 3);
+ for (j = 0; j < 3; j++)
+ KUNIT_EXPECT_EQ(test, entries[j].visited, 1);
+}
+
+static void hashtable_test_hash_for_each_safe(struct kunit *test)
+{
+ struct hashtable_test_entry entries[3];
+ struct hashtable_test_entry *x;
+ struct hlist_node *tmp;
+ int bkt, i, j, count;
+ DEFINE_HASHTABLE(hash, 3);
+
+ /* Add three entries to the hashtable. */
+ for (i = 0; i < 3; i++) {
+ entries[i].key = i;
+ entries[i].data = i + 10;
+ entries[i].visited = 0;
+ hash_add(hash, &entries[i].node, entries[i].key);
+ }
+
+ count = 0;
+ hash_for_each_safe(hash, bkt, tmp, x, node) {
+ x->visited += 1;
+ KUNIT_ASSERT_GE_MSG(test, x->key, 0, "Unexpected key in hashtable.");
+ KUNIT_ASSERT_LT_MSG(test, x->key, 3, "Unexpected key in hashtable.");
+ count++;
+
+ /* Delete entry during loop. */
+ hash_del(&x->node);
+ }
+
+ /* Should have visited each entry exactly once. */
+ KUNIT_EXPECT_EQ(test, count, 3);
+ for (j = 0; j < 3; j++)
+ KUNIT_EXPECT_EQ(test, entries[j].visited, 1);
+}
+
+static void hashtable_test_hash_for_each_possible(struct kunit *test)
+{
+ struct hashtable_test_entry entries[4];
+ struct hashtable_test_entry *x, *y;
+ int buckets[2];
+ int bkt, i, j, count;
+ DEFINE_HASHTABLE(hash, 5);
+
+ /* Add three entries with key = 0 to the hashtable. */
+ for (i = 0; i < 3; i++) {
+ entries[i].key = 0;
+ entries[i].data = i;
+ entries[i].visited = 0;
+ hash_add(hash, &entries[i].node, entries[i].key);
+ }
+
+ /* Add an entry with key = 1. */
+ entries[3].key = 1;
+ entries[3].data = 3;
+ entries[3].visited = 0;
+ hash_add(hash, &entries[3].node, entries[3].key);
+
+ count = 0;
+ hash_for_each_possible(hash, x, node, 0) {
+ x->visited += 1;
+ KUNIT_ASSERT_GE_MSG(test, x->data, 0, "Unexpected data in hashtable.");
+ KUNIT_ASSERT_LT_MSG(test, x->data, 4, "Unexpected data in hashtable.");
+ count++;
+ }
+
+ /* Should have visited each entry with key = 0 exactly once. */
+ for (j = 0; j < 3; j++)
+ KUNIT_EXPECT_EQ(test, entries[j].visited, 1);
+
+ /* Save the buckets for the different keys. */
+ hash_for_each(hash, bkt, y, node) {
+ KUNIT_ASSERT_GE_MSG(test, y->key, 0, "Unexpected key in hashtable.");
+ KUNIT_ASSERT_LE_MSG(test, y->key, 1, "Unexpected key in hashtable.");
+ buckets[y->key] = bkt;
+ }
+
+ /* If entry with key = 1 is in the same bucket as the entries with
+ * key = 0, check it was visited. Otherwise ensure that only three
+ * entries were visited.
+ */
+ if (buckets[0] == buckets[1]) {
+ KUNIT_EXPECT_EQ(test, count, 4);
+ KUNIT_EXPECT_EQ(test, entries[3].visited, 1);
+ } else {
+ KUNIT_EXPECT_EQ(test, count, 3);
+ KUNIT_EXPECT_EQ(test, entries[3].visited, 0);
+ }
+}
+
+static void hashtable_test_hash_for_each_possible_safe(struct kunit *test)
+{
+ struct hashtable_test_entry entries[4];
+ struct hashtable_test_entry *x, *y;
+ struct hlist_node *tmp;
+ int buckets[2];
+ int bkt, i, j, count;
+ DEFINE_HASHTABLE(hash, 5);
+
+ /* Add three entries with key = 0 to the hashtable. */
+ for (i = 0; i < 3; i++) {
+ entries[i].key = 0;
+ entries[i].data = i;
+ entries[i].visited = 0;
+ hash_add(hash, &entries[i].node, entries[i].key);
+ }
+
+ /* Add an entry with key = 1. */
+ entries[3].key = 1;
+ entries[3].data = 3;
+ entries[3].visited = 0;
+ hash_add(hash, &entries[3].node, entries[3].key);
+
+ count = 0;
+ hash_for_each_possible_safe(hash, x, tmp, node, 0) {
+ x->visited += 1;
+ KUNIT_ASSERT_GE_MSG(test, x->data, 0, "Unexpected data in hashtable.");
+ KUNIT_ASSERT_LT_MSG(test, x->data, 4, "Unexpected data in hashtable.");
+ count++;
+
+ /* Delete entry during loop. */
+ hash_del(&x->node);
+ }
+
+ /* Should have visited each entry with key = 0 exactly once. */
+ for (j = 0; j < 3; j++)
+ KUNIT_EXPECT_EQ(test, entries[j].visited, 1);
+
+ /* Save the buckets for the different keys. */
+ hash_for_each(hash, bkt, y, node) {
+ KUNIT_ASSERT_GE_MSG(test, y->key, 0, "Unexpected key in hashtable.");
+ KUNIT_ASSERT_LE_MSG(test, y->key, 1, "Unexpected key in hashtable.");
+ buckets[y->key] = bkt;
+ }
+
+ /* If entry with key = 1 is in the same bucket as the entries with
+ * key = 0, check it was visited. Otherwise ensure that only three
+ * entries were visited.
+ */
+ if (buckets[0] == buckets[1]) {
+ KUNIT_EXPECT_EQ(test, count, 4);
+ KUNIT_EXPECT_EQ(test, entries[3].visited, 1);
+ } else {
+ KUNIT_EXPECT_EQ(test, count, 3);
+ KUNIT_EXPECT_EQ(test, entries[3].visited, 0);
+ }
+}
+
+static struct kunit_case hashtable_test_cases[] = {
+ KUNIT_CASE(hashtable_test_hash_init),
+ KUNIT_CASE(hashtable_test_hash_empty),
+ KUNIT_CASE(hashtable_test_hash_hashed),
+ KUNIT_CASE(hashtable_test_hash_add),
+ KUNIT_CASE(hashtable_test_hash_del),
+ KUNIT_CASE(hashtable_test_hash_for_each),
+ KUNIT_CASE(hashtable_test_hash_for_each_safe),
+ KUNIT_CASE(hashtable_test_hash_for_each_possible),
+ KUNIT_CASE(hashtable_test_hash_for_each_possible_safe),
+ {},
+};
+
+static struct kunit_suite hashtable_test_module = {
+ .name = "hashtable",
+ .test_cases = hashtable_test_cases,
+};
+
+kunit_test_suites(&hashtable_test_module);
+
+MODULE_LICENSE("GPL");
diff --git a/lib/hexdump.c b/lib/hexdump.c
index b1d55b669ae2..06833d404398 100644
--- a/lib/hexdump.c
+++ b/lib/hexdump.c
@@ -7,6 +7,7 @@
#include <linux/ctype.h>
#include <linux/errno.h>
#include <linux/kernel.h>
+#include <linux/minmax.h>
#include <linux/export.h>
#include <asm/unaligned.h>
@@ -21,15 +22,33 @@ EXPORT_SYMBOL(hex_asc_upper);
*
* hex_to_bin() converts one hex digit to its actual value or -1 in case of bad
* input.
+ *
+ * This function is used to load cryptographic keys, so it is coded in such a
+ * way that there are no conditions or memory accesses that depend on data.
+ *
+ * Explanation of the logic:
+ * (ch - '9' - 1) is negative if ch <= '9'
+ * ('0' - 1 - ch) is negative if ch >= '0'
+ * we "and" these two values, so the result is negative if ch is in the range
+ * '0' ... '9'
+ * we are only interested in the sign, so we do a shift ">> 8"; note that right
+ * shift of a negative value is implementation-defined, so we cast the
+ * value to (unsigned) before the shift --- we have 0xffffff if ch is in
+ * the range '0' ... '9', 0 otherwise
+ * we "and" this value with (ch - '0' + 1) --- we have a value 1 ... 10 if ch is
+ * in the range '0' ... '9', 0 otherwise
+ * we add this value to -1 --- we have a value 0 ... 9 if ch is in the range '0'
+ * ... '9', -1 otherwise
+ * the next line is similar to the previous one, but we need to decode both
+ * uppercase and lowercase letters, so we use (ch & 0xdf), which converts
+ * lowercase to uppercase
*/
-int hex_to_bin(char ch)
+int hex_to_bin(unsigned char ch)
{
- if ((ch >= '0') && (ch <= '9'))
- return ch - '0';
- ch = tolower(ch);
- if ((ch >= 'a') && (ch <= 'f'))
- return ch - 'a' + 10;
- return -1;
+ unsigned char cu = ch & 0xdf;
+ return -1 +
+ ((ch - '0' + 1) & (unsigned)((ch - '9' - 1) & ('0' - 1 - ch)) >> 8) +
+ ((cu - 'A' + 11) & (unsigned)((cu - 'F' - 1) & ('A' - 1 - cu)) >> 8);
}
EXPORT_SYMBOL(hex_to_bin);
@@ -44,10 +63,13 @@ EXPORT_SYMBOL(hex_to_bin);
int hex2bin(u8 *dst, const char *src, size_t count)
{
while (count--) {
- int hi = hex_to_bin(*src++);
- int lo = hex_to_bin(*src++);
+ int hi, lo;
- if ((hi < 0) || (lo < 0))
+ hi = hex_to_bin(*src++);
+ if (unlikely(hi < 0))
+ return -EINVAL;
+ lo = hex_to_bin(*src++);
+ if (unlikely(lo < 0))
return -EINVAL;
*dst++ = (hi << 4) | lo;
@@ -270,25 +292,4 @@ void print_hex_dump(const char *level, const char *prefix_str, int prefix_type,
}
EXPORT_SYMBOL(print_hex_dump);
-#if !defined(CONFIG_DYNAMIC_DEBUG)
-/**
- * print_hex_dump_bytes - shorthand form of print_hex_dump() with default params
- * @prefix_str: string to prefix each line with;
- * caller supplies trailing spaces for alignment if desired
- * @prefix_type: controls whether prefix of an offset, address, or none
- * is printed (%DUMP_PREFIX_OFFSET, %DUMP_PREFIX_ADDRESS, %DUMP_PREFIX_NONE)
- * @buf: data blob to dump
- * @len: number of bytes in the @buf
- *
- * Calls print_hex_dump(), with log level of KERN_DEBUG,
- * rowsize of 16, groupsize of 1, and ASCII output included.
- */
-void print_hex_dump_bytes(const char *prefix_str, int prefix_type,
- const void *buf, size_t len)
-{
- print_hex_dump(KERN_DEBUG, prefix_str, prefix_type, 16, 1,
- buf, len, true);
-}
-EXPORT_SYMBOL(print_hex_dump_bytes);
-#endif /* !defined(CONFIG_DYNAMIC_DEBUG) */
#endif /* defined(CONFIG_PRINTK) */
diff --git a/lib/idr.c b/lib/idr.c
index 66a374892482..da36054c3ca0 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -100,7 +100,7 @@ EXPORT_SYMBOL_GPL(idr_alloc);
* @end: The maximum ID (exclusive).
* @gfp: Memory allocation flags.
*
- * Allocates an unused ID in the range specified by @nextid and @end. If
+ * Allocates an unused ID in the range specified by @start and @end. If
* @end is <= 0, it is treated as one larger than %INT_MAX. This allows
* callers to use @start + N as @end as long as N is within integer range.
* The search for an unused ID will start at the last ID allocated and will
@@ -215,7 +215,7 @@ int idr_for_each(const struct idr *idr,
EXPORT_SYMBOL(idr_for_each);
/**
- * idr_get_next() - Find next populated entry.
+ * idr_get_next_ul() - Find next populated entry.
* @idr: IDR handle.
* @nextid: Pointer to an ID.
*
@@ -224,7 +224,7 @@ EXPORT_SYMBOL(idr_for_each);
* to the ID of the found value. To use in a loop, the value pointed to by
* nextid must be incremented by the user.
*/
-void *idr_get_next(struct idr *idr, int *nextid)
+void *idr_get_next_ul(struct idr *idr, unsigned long *nextid)
{
struct radix_tree_iter iter;
void __rcu **slot;
@@ -245,18 +245,14 @@ void *idr_get_next(struct idr *idr, int *nextid)
}
if (!slot)
return NULL;
- id = iter.index + base;
-
- if (WARN_ON_ONCE(id > INT_MAX))
- return NULL;
- *nextid = id;
+ *nextid = iter.index + base;
return entry;
}
-EXPORT_SYMBOL(idr_get_next);
+EXPORT_SYMBOL(idr_get_next_ul);
/**
- * idr_get_next_ul() - Find next populated entry.
+ * idr_get_next() - Find next populated entry.
* @idr: IDR handle.
* @nextid: Pointer to an ID.
*
@@ -265,22 +261,17 @@ EXPORT_SYMBOL(idr_get_next);
* to the ID of the found value. To use in a loop, the value pointed to by
* nextid must be incremented by the user.
*/
-void *idr_get_next_ul(struct idr *idr, unsigned long *nextid)
+void *idr_get_next(struct idr *idr, int *nextid)
{
- struct radix_tree_iter iter;
- void __rcu **slot;
- unsigned long base = idr->idr_base;
unsigned long id = *nextid;
+ void *entry = idr_get_next_ul(idr, &id);
- id = (id < base) ? 0 : id - base;
- slot = radix_tree_iter_find(&idr->idr_rt, &iter, id);
- if (!slot)
+ if (WARN_ON_ONCE(id > INT_MAX))
return NULL;
-
- *nextid = iter.index + base;
- return rcu_dereference_raw(*slot);
+ *nextid = id;
+ return entry;
}
-EXPORT_SYMBOL(idr_get_next_ul);
+EXPORT_SYMBOL(idr_get_next);
/**
* idr_replace() - replace pointer for given ID.
@@ -381,7 +372,8 @@ EXPORT_SYMBOL(idr_replace);
* Allocate an ID between @min and @max, inclusive. The allocated ID will
* not exceed %INT_MAX, even if @max is larger.
*
- * Context: Any context.
+ * Context: Any context. It is safe to call this function without
+ * locking in your code.
* Return: The allocated ID, or %-ENOMEM if memory could not be allocated,
* or %-ENOSPC if there are no free IDs.
*/
@@ -479,6 +471,7 @@ alloc:
goto retry;
nospc:
xas_unlock_irqrestore(&xas, flags);
+ kfree(alloc);
return -ENOSPC;
}
EXPORT_SYMBOL(ida_alloc_range);
@@ -488,7 +481,8 @@ EXPORT_SYMBOL(ida_alloc_range);
* @ida: IDA handle.
* @id: Previously allocated ID.
*
- * Context: Any context.
+ * Context: Any context. It is safe to call this function without
+ * locking in your code.
*/
void ida_free(struct ida *ida, unsigned int id)
{
@@ -497,7 +491,8 @@ void ida_free(struct ida *ida, unsigned int id)
struct ida_bitmap *bitmap;
unsigned long flags;
- BUG_ON((int)id < 0);
+ if ((int)id < 0)
+ return;
xas_lock_irqsave(&xas, flags);
bitmap = xas_load(&xas);
@@ -513,7 +508,7 @@ void ida_free(struct ida *ida, unsigned int id)
goto delete;
xas_store(&xas, xa_mk_value(v));
} else {
- if (!test_bit(bit, bitmap->bitmap))
+ if (!bitmap || !test_bit(bit, bitmap->bitmap))
goto err;
__clear_bit(bit, bitmap->bitmap);
xas_set_mark(&xas, XA_FREE_MARK);
@@ -540,7 +535,8 @@ EXPORT_SYMBOL(ida_free);
* or freed. If the IDA is already empty, there is no need to call this
* function.
*
- * Context: Any context.
+ * Context: Any context. It is safe to call this function without
+ * locking in your code.
*/
void ida_destroy(struct ida *ida)
{
diff --git a/lib/interval_tree.c b/lib/interval_tree.c
index 593ce56ece50..3412737ff365 100644
--- a/lib/interval_tree.c
+++ b/lib/interval_tree.c
@@ -15,3 +15,135 @@ EXPORT_SYMBOL_GPL(interval_tree_insert);
EXPORT_SYMBOL_GPL(interval_tree_remove);
EXPORT_SYMBOL_GPL(interval_tree_iter_first);
EXPORT_SYMBOL_GPL(interval_tree_iter_next);
+
+#ifdef CONFIG_INTERVAL_TREE_SPAN_ITER
+/*
+ * Roll nodes[1] into nodes[0] by advancing nodes[1] to the end of a contiguous
+ * span of nodes. This makes nodes[0]->last the end of that contiguous used span
+ * indexes that started at the original nodes[1]->start. nodes[1] is now the
+ * first node starting the next used span. A hole span is between nodes[0]->last
+ * and nodes[1]->start. nodes[1] must be !NULL.
+ */
+static void
+interval_tree_span_iter_next_gap(struct interval_tree_span_iter *state)
+{
+ struct interval_tree_node *cur = state->nodes[1];
+
+ state->nodes[0] = cur;
+ do {
+ if (cur->last > state->nodes[0]->last)
+ state->nodes[0] = cur;
+ cur = interval_tree_iter_next(cur, state->first_index,
+ state->last_index);
+ } while (cur && (state->nodes[0]->last >= cur->start ||
+ state->nodes[0]->last + 1 == cur->start));
+ state->nodes[1] = cur;
+}
+
+void interval_tree_span_iter_first(struct interval_tree_span_iter *iter,
+ struct rb_root_cached *itree,
+ unsigned long first_index,
+ unsigned long last_index)
+{
+ iter->first_index = first_index;
+ iter->last_index = last_index;
+ iter->nodes[0] = NULL;
+ iter->nodes[1] =
+ interval_tree_iter_first(itree, first_index, last_index);
+ if (!iter->nodes[1]) {
+ /* No nodes intersect the span, whole span is hole */
+ iter->start_hole = first_index;
+ iter->last_hole = last_index;
+ iter->is_hole = 1;
+ return;
+ }
+ if (iter->nodes[1]->start > first_index) {
+ /* Leading hole on first iteration */
+ iter->start_hole = first_index;
+ iter->last_hole = iter->nodes[1]->start - 1;
+ iter->is_hole = 1;
+ interval_tree_span_iter_next_gap(iter);
+ return;
+ }
+
+ /* Starting inside a used */
+ iter->start_used = first_index;
+ iter->is_hole = 0;
+ interval_tree_span_iter_next_gap(iter);
+ iter->last_used = iter->nodes[0]->last;
+ if (iter->last_used >= last_index) {
+ iter->last_used = last_index;
+ iter->nodes[0] = NULL;
+ iter->nodes[1] = NULL;
+ }
+}
+EXPORT_SYMBOL_GPL(interval_tree_span_iter_first);
+
+void interval_tree_span_iter_next(struct interval_tree_span_iter *iter)
+{
+ if (!iter->nodes[0] && !iter->nodes[1]) {
+ iter->is_hole = -1;
+ return;
+ }
+
+ if (iter->is_hole) {
+ iter->start_used = iter->last_hole + 1;
+ iter->last_used = iter->nodes[0]->last;
+ if (iter->last_used >= iter->last_index) {
+ iter->last_used = iter->last_index;
+ iter->nodes[0] = NULL;
+ iter->nodes[1] = NULL;
+ }
+ iter->is_hole = 0;
+ return;
+ }
+
+ if (!iter->nodes[1]) {
+ /* Trailing hole */
+ iter->start_hole = iter->nodes[0]->last + 1;
+ iter->last_hole = iter->last_index;
+ iter->nodes[0] = NULL;
+ iter->is_hole = 1;
+ return;
+ }
+
+ /* must have both nodes[0] and [1], interior hole */
+ iter->start_hole = iter->nodes[0]->last + 1;
+ iter->last_hole = iter->nodes[1]->start - 1;
+ iter->is_hole = 1;
+ interval_tree_span_iter_next_gap(iter);
+}
+EXPORT_SYMBOL_GPL(interval_tree_span_iter_next);
+
+/*
+ * Advance the iterator index to a specific position. The returned used/hole is
+ * updated to start at new_index. This is faster than calling
+ * interval_tree_span_iter_first() as it can avoid full searches in several
+ * cases where the iterator is already set.
+ */
+void interval_tree_span_iter_advance(struct interval_tree_span_iter *iter,
+ struct rb_root_cached *itree,
+ unsigned long new_index)
+{
+ if (iter->is_hole == -1)
+ return;
+
+ iter->first_index = new_index;
+ if (new_index > iter->last_index) {
+ iter->is_hole = -1;
+ return;
+ }
+
+ /* Rely on the union aliasing hole/used */
+ if (iter->start_hole <= new_index && new_index <= iter->last_hole) {
+ iter->start_hole = new_index;
+ return;
+ }
+ if (new_index == iter->last_hole + 1)
+ interval_tree_span_iter_next(iter);
+ else
+ interval_tree_span_iter_first(iter, itree, new_index,
+ iter->last_index);
+}
+EXPORT_SYMBOL_GPL(interval_tree_span_iter_advance);
+#endif
diff --git a/lib/iomap.c b/lib/iomap.c
index e909ab71e995..4f8b31baa575 100644
--- a/lib/iomap.c
+++ b/lib/iomap.c
@@ -6,6 +6,7 @@
*/
#include <linux/pci.h>
#include <linux/io.h>
+#include <linux/kmsan-checks.h>
#include <linux/export.h>
@@ -70,27 +71,36 @@ static void bad_io_access(unsigned long port, const char *access)
#define mmio_read64be(addr) swab64(readq(addr))
#endif
-unsigned int ioread8(void __iomem *addr)
+/*
+ * Here and below, we apply __no_kmsan_checks to functions reading data from
+ * hardware, to ensure that KMSAN marks their return values as initialized.
+ */
+__no_kmsan_checks
+unsigned int ioread8(const void __iomem *addr)
{
IO_COND(addr, return inb(port), return readb(addr));
return 0xff;
}
-unsigned int ioread16(void __iomem *addr)
+__no_kmsan_checks
+unsigned int ioread16(const void __iomem *addr)
{
IO_COND(addr, return inw(port), return readw(addr));
return 0xffff;
}
-unsigned int ioread16be(void __iomem *addr)
+__no_kmsan_checks
+unsigned int ioread16be(const void __iomem *addr)
{
IO_COND(addr, return pio_read16be(port), return mmio_read16be(addr));
return 0xffff;
}
-unsigned int ioread32(void __iomem *addr)
+__no_kmsan_checks
+unsigned int ioread32(const void __iomem *addr)
{
IO_COND(addr, return inl(port), return readl(addr));
return 0xffffffff;
}
-unsigned int ioread32be(void __iomem *addr)
+__no_kmsan_checks
+unsigned int ioread32be(const void __iomem *addr)
{
IO_COND(addr, return pio_read32be(port), return mmio_read32be(addr));
return 0xffffffff;
@@ -142,26 +152,30 @@ static u64 pio_read64be_hi_lo(unsigned long port)
return lo | (hi << 32);
}
-u64 ioread64_lo_hi(void __iomem *addr)
+__no_kmsan_checks
+u64 ioread64_lo_hi(const void __iomem *addr)
{
IO_COND(addr, return pio_read64_lo_hi(port), return readq(addr));
return 0xffffffffffffffffULL;
}
-u64 ioread64_hi_lo(void __iomem *addr)
+__no_kmsan_checks
+u64 ioread64_hi_lo(const void __iomem *addr)
{
IO_COND(addr, return pio_read64_hi_lo(port), return readq(addr));
return 0xffffffffffffffffULL;
}
-u64 ioread64be_lo_hi(void __iomem *addr)
+__no_kmsan_checks
+u64 ioread64be_lo_hi(const void __iomem *addr)
{
IO_COND(addr, return pio_read64be_lo_hi(port),
return mmio_read64be(addr));
return 0xffffffffffffffffULL;
}
-u64 ioread64be_hi_lo(void __iomem *addr)
+__no_kmsan_checks
+u64 ioread64be_hi_lo(const void __iomem *addr)
{
IO_COND(addr, return pio_read64be_hi_lo(port),
return mmio_read64be(addr));
@@ -188,22 +202,32 @@ EXPORT_SYMBOL(ioread64be_hi_lo);
void iowrite8(u8 val, void __iomem *addr)
{
+ /* Make sure uninitialized memory isn't copied to devices. */
+ kmsan_check_memory(&val, sizeof(val));
IO_COND(addr, outb(val,port), writeb(val, addr));
}
void iowrite16(u16 val, void __iomem *addr)
{
+ /* Make sure uninitialized memory isn't copied to devices. */
+ kmsan_check_memory(&val, sizeof(val));
IO_COND(addr, outw(val,port), writew(val, addr));
}
void iowrite16be(u16 val, void __iomem *addr)
{
+ /* Make sure uninitialized memory isn't copied to devices. */
+ kmsan_check_memory(&val, sizeof(val));
IO_COND(addr, pio_write16be(val,port), mmio_write16be(val, addr));
}
void iowrite32(u32 val, void __iomem *addr)
{
+ /* Make sure uninitialized memory isn't copied to devices. */
+ kmsan_check_memory(&val, sizeof(val));
IO_COND(addr, outl(val,port), writel(val, addr));
}
void iowrite32be(u32 val, void __iomem *addr)
{
+ /* Make sure uninitialized memory isn't copied to devices. */
+ kmsan_check_memory(&val, sizeof(val));
IO_COND(addr, pio_write32be(val,port), mmio_write32be(val, addr));
}
EXPORT_SYMBOL(iowrite8);
@@ -239,24 +263,32 @@ static void pio_write64be_hi_lo(u64 val, unsigned long port)
void iowrite64_lo_hi(u64 val, void __iomem *addr)
{
+ /* Make sure uninitialized memory isn't copied to devices. */
+ kmsan_check_memory(&val, sizeof(val));
IO_COND(addr, pio_write64_lo_hi(val, port),
writeq(val, addr));
}
void iowrite64_hi_lo(u64 val, void __iomem *addr)
{
+ /* Make sure uninitialized memory isn't copied to devices. */
+ kmsan_check_memory(&val, sizeof(val));
IO_COND(addr, pio_write64_hi_lo(val, port),
writeq(val, addr));
}
void iowrite64be_lo_hi(u64 val, void __iomem *addr)
{
+ /* Make sure uninitialized memory isn't copied to devices. */
+ kmsan_check_memory(&val, sizeof(val));
IO_COND(addr, pio_write64be_lo_hi(val, port),
mmio_write64be(val, addr));
}
void iowrite64be_hi_lo(u64 val, void __iomem *addr)
{
+ /* Make sure uninitialized memory isn't copied to devices. */
+ kmsan_check_memory(&val, sizeof(val));
IO_COND(addr, pio_write64be_hi_lo(val, port),
mmio_write64be(val, addr));
}
@@ -275,7 +307,7 @@ EXPORT_SYMBOL(iowrite64be_hi_lo);
* order" (we also don't have IO barriers).
*/
#ifndef mmio_insb
-static inline void mmio_insb(void __iomem *addr, u8 *dst, int count)
+static inline void mmio_insb(const void __iomem *addr, u8 *dst, int count)
{
while (--count >= 0) {
u8 data = __raw_readb(addr);
@@ -283,7 +315,7 @@ static inline void mmio_insb(void __iomem *addr, u8 *dst, int count)
dst++;
}
}
-static inline void mmio_insw(void __iomem *addr, u16 *dst, int count)
+static inline void mmio_insw(const void __iomem *addr, u16 *dst, int count)
{
while (--count >= 0) {
u16 data = __raw_readw(addr);
@@ -291,7 +323,7 @@ static inline void mmio_insw(void __iomem *addr, u16 *dst, int count)
dst++;
}
}
-static inline void mmio_insl(void __iomem *addr, u32 *dst, int count)
+static inline void mmio_insl(const void __iomem *addr, u32 *dst, int count)
{
while (--count >= 0) {
u32 data = __raw_readl(addr);
@@ -325,17 +357,23 @@ static inline void mmio_outsl(void __iomem *addr, const u32 *src, int count)
}
#endif
-void ioread8_rep(void __iomem *addr, void *dst, unsigned long count)
+void ioread8_rep(const void __iomem *addr, void *dst, unsigned long count)
{
IO_COND(addr, insb(port,dst,count), mmio_insb(addr, dst, count));
+ /* KMSAN must treat values read from devices as initialized. */
+ kmsan_unpoison_memory(dst, count);
}
-void ioread16_rep(void __iomem *addr, void *dst, unsigned long count)
+void ioread16_rep(const void __iomem *addr, void *dst, unsigned long count)
{
IO_COND(addr, insw(port,dst,count), mmio_insw(addr, dst, count));
+ /* KMSAN must treat values read from devices as initialized. */
+ kmsan_unpoison_memory(dst, count * 2);
}
-void ioread32_rep(void __iomem *addr, void *dst, unsigned long count)
+void ioread32_rep(const void __iomem *addr, void *dst, unsigned long count)
{
IO_COND(addr, insl(port,dst,count), mmio_insl(addr, dst, count));
+ /* KMSAN must treat values read from devices as initialized. */
+ kmsan_unpoison_memory(dst, count * 4);
}
EXPORT_SYMBOL(ioread8_rep);
EXPORT_SYMBOL(ioread16_rep);
@@ -343,14 +381,20 @@ EXPORT_SYMBOL(ioread32_rep);
void iowrite8_rep(void __iomem *addr, const void *src, unsigned long count)
{
+ /* Make sure uninitialized memory isn't copied to devices. */
+ kmsan_check_memory(src, count);
IO_COND(addr, outsb(port, src, count), mmio_outsb(addr, src, count));
}
void iowrite16_rep(void __iomem *addr, const void *src, unsigned long count)
{
+ /* Make sure uninitialized memory isn't copied to devices. */
+ kmsan_check_memory(src, count * 2);
IO_COND(addr, outsw(port, src, count), mmio_outsw(addr, src, count));
}
void iowrite32_rep(void __iomem *addr, const void *src, unsigned long count)
{
+ /* Make sure uninitialized memory isn't copied to devices. */
+ kmsan_check_memory(src, count * 4);
IO_COND(addr, outsl(port, src,count), mmio_outsl(addr, src, count));
}
EXPORT_SYMBOL(iowrite8_rep);
diff --git a/lib/ioremap.c b/lib/ioremap.c
deleted file mode 100644
index a95161d9c883..000000000000
--- a/lib/ioremap.c
+++ /dev/null
@@ -1,231 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Re-map IO memory to kernel address space so that we can access it.
- * This is needed for high PCI addresses that aren't mapped in the
- * 640k-1MB IO memory area on PC's
- *
- * (C) Copyright 1995 1996 Linus Torvalds
- */
-#include <linux/vmalloc.h>
-#include <linux/mm.h>
-#include <linux/sched.h>
-#include <linux/io.h>
-#include <linux/export.h>
-#include <asm/cacheflush.h>
-#include <asm/pgtable.h>
-
-#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
-static int __read_mostly ioremap_p4d_capable;
-static int __read_mostly ioremap_pud_capable;
-static int __read_mostly ioremap_pmd_capable;
-static int __read_mostly ioremap_huge_disabled;
-
-static int __init set_nohugeiomap(char *str)
-{
- ioremap_huge_disabled = 1;
- return 0;
-}
-early_param("nohugeiomap", set_nohugeiomap);
-
-void __init ioremap_huge_init(void)
-{
- if (!ioremap_huge_disabled) {
- if (arch_ioremap_pud_supported())
- ioremap_pud_capable = 1;
- if (arch_ioremap_pmd_supported())
- ioremap_pmd_capable = 1;
- }
-}
-
-static inline int ioremap_p4d_enabled(void)
-{
- return ioremap_p4d_capable;
-}
-
-static inline int ioremap_pud_enabled(void)
-{
- return ioremap_pud_capable;
-}
-
-static inline int ioremap_pmd_enabled(void)
-{
- return ioremap_pmd_capable;
-}
-
-#else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */
-static inline int ioremap_p4d_enabled(void) { return 0; }
-static inline int ioremap_pud_enabled(void) { return 0; }
-static inline int ioremap_pmd_enabled(void) { return 0; }
-#endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
-
-static int ioremap_pte_range(pmd_t *pmd, unsigned long addr,
- unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
-{
- pte_t *pte;
- u64 pfn;
-
- pfn = phys_addr >> PAGE_SHIFT;
- pte = pte_alloc_kernel(pmd, addr);
- if (!pte)
- return -ENOMEM;
- do {
- BUG_ON(!pte_none(*pte));
- set_pte_at(&init_mm, addr, pte, pfn_pte(pfn, prot));
- pfn++;
- } while (pte++, addr += PAGE_SIZE, addr != end);
- return 0;
-}
-
-static int ioremap_try_huge_pmd(pmd_t *pmd, unsigned long addr,
- unsigned long end, phys_addr_t phys_addr,
- pgprot_t prot)
-{
- if (!ioremap_pmd_enabled())
- return 0;
-
- if ((end - addr) != PMD_SIZE)
- return 0;
-
- if (!IS_ALIGNED(addr, PMD_SIZE))
- return 0;
-
- if (!IS_ALIGNED(phys_addr, PMD_SIZE))
- return 0;
-
- if (pmd_present(*pmd) && !pmd_free_pte_page(pmd, addr))
- return 0;
-
- return pmd_set_huge(pmd, phys_addr, prot);
-}
-
-static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
- unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
-{
- pmd_t *pmd;
- unsigned long next;
-
- pmd = pmd_alloc(&init_mm, pud, addr);
- if (!pmd)
- return -ENOMEM;
- do {
- next = pmd_addr_end(addr, end);
-
- if (ioremap_try_huge_pmd(pmd, addr, next, phys_addr, prot))
- continue;
-
- if (ioremap_pte_range(pmd, addr, next, phys_addr, prot))
- return -ENOMEM;
- } while (pmd++, phys_addr += (next - addr), addr = next, addr != end);
- return 0;
-}
-
-static int ioremap_try_huge_pud(pud_t *pud, unsigned long addr,
- unsigned long end, phys_addr_t phys_addr,
- pgprot_t prot)
-{
- if (!ioremap_pud_enabled())
- return 0;
-
- if ((end - addr) != PUD_SIZE)
- return 0;
-
- if (!IS_ALIGNED(addr, PUD_SIZE))
- return 0;
-
- if (!IS_ALIGNED(phys_addr, PUD_SIZE))
- return 0;
-
- if (pud_present(*pud) && !pud_free_pmd_page(pud, addr))
- return 0;
-
- return pud_set_huge(pud, phys_addr, prot);
-}
-
-static inline int ioremap_pud_range(p4d_t *p4d, unsigned long addr,
- unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
-{
- pud_t *pud;
- unsigned long next;
-
- pud = pud_alloc(&init_mm, p4d, addr);
- if (!pud)
- return -ENOMEM;
- do {
- next = pud_addr_end(addr, end);
-
- if (ioremap_try_huge_pud(pud, addr, next, phys_addr, prot))
- continue;
-
- if (ioremap_pmd_range(pud, addr, next, phys_addr, prot))
- return -ENOMEM;
- } while (pud++, phys_addr += (next - addr), addr = next, addr != end);
- return 0;
-}
-
-static int ioremap_try_huge_p4d(p4d_t *p4d, unsigned long addr,
- unsigned long end, phys_addr_t phys_addr,
- pgprot_t prot)
-{
- if (!ioremap_p4d_enabled())
- return 0;
-
- if ((end - addr) != P4D_SIZE)
- return 0;
-
- if (!IS_ALIGNED(addr, P4D_SIZE))
- return 0;
-
- if (!IS_ALIGNED(phys_addr, P4D_SIZE))
- return 0;
-
- if (p4d_present(*p4d) && !p4d_free_pud_page(p4d, addr))
- return 0;
-
- return p4d_set_huge(p4d, phys_addr, prot);
-}
-
-static inline int ioremap_p4d_range(pgd_t *pgd, unsigned long addr,
- unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
-{
- p4d_t *p4d;
- unsigned long next;
-
- p4d = p4d_alloc(&init_mm, pgd, addr);
- if (!p4d)
- return -ENOMEM;
- do {
- next = p4d_addr_end(addr, end);
-
- if (ioremap_try_huge_p4d(p4d, addr, next, phys_addr, prot))
- continue;
-
- if (ioremap_pud_range(p4d, addr, next, phys_addr, prot))
- return -ENOMEM;
- } while (p4d++, phys_addr += (next - addr), addr = next, addr != end);
- return 0;
-}
-
-int ioremap_page_range(unsigned long addr,
- unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
-{
- pgd_t *pgd;
- unsigned long start;
- unsigned long next;
- int err;
-
- might_sleep();
- BUG_ON(addr >= end);
-
- start = addr;
- pgd = pgd_offset_k(addr);
- do {
- next = pgd_addr_end(addr, end);
- err = ioremap_p4d_range(pgd, addr, next, phys_addr, prot);
- if (err)
- break;
- } while (pgd++, phys_addr += (next - addr), addr = next, addr != end);
-
- flush_cache_vmap(start, end);
-
- return err;
-}
diff --git a/lib/iov_iter.c b/lib/iov_iter.c
index f99c41d4eb54..cf2eb2b2f983 100644
--- a/lib/iov_iter.c
+++ b/lib/iov_iter.c
@@ -1,690 +1,224 @@
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/export.h>
#include <linux/bvec.h>
+#include <linux/fault-inject-usercopy.h>
#include <linux/uio.h>
#include <linux/pagemap.h>
+#include <linux/highmem.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/splice.h>
-#include <net/checksum.h>
+#include <linux/compat.h>
#include <linux/scatterlist.h>
+#include <linux/instrumented.h>
+#include <linux/iov_iter.h>
-#define PIPE_PARANOIA /* for now */
-
-#define iterate_iovec(i, n, __v, __p, skip, STEP) { \
- size_t left; \
- size_t wanted = n; \
- __p = i->iov; \
- __v.iov_len = min(n, __p->iov_len - skip); \
- if (likely(__v.iov_len)) { \
- __v.iov_base = __p->iov_base + skip; \
- left = (STEP); \
- __v.iov_len -= left; \
- skip += __v.iov_len; \
- n -= __v.iov_len; \
- } else { \
- left = 0; \
- } \
- while (unlikely(!left && n)) { \
- __p++; \
- __v.iov_len = min(n, __p->iov_len); \
- if (unlikely(!__v.iov_len)) \
- continue; \
- __v.iov_base = __p->iov_base; \
- left = (STEP); \
- __v.iov_len -= left; \
- skip = __v.iov_len; \
- n -= __v.iov_len; \
- } \
- n = wanted - n; \
-}
-
-#define iterate_kvec(i, n, __v, __p, skip, STEP) { \
- size_t wanted = n; \
- __p = i->kvec; \
- __v.iov_len = min(n, __p->iov_len - skip); \
- if (likely(__v.iov_len)) { \
- __v.iov_base = __p->iov_base + skip; \
- (void)(STEP); \
- skip += __v.iov_len; \
- n -= __v.iov_len; \
- } \
- while (unlikely(n)) { \
- __p++; \
- __v.iov_len = min(n, __p->iov_len); \
- if (unlikely(!__v.iov_len)) \
- continue; \
- __v.iov_base = __p->iov_base; \
- (void)(STEP); \
- skip = __v.iov_len; \
- n -= __v.iov_len; \
- } \
- n = wanted; \
-}
-
-#define iterate_bvec(i, n, __v, __bi, skip, STEP) { \
- struct bvec_iter __start; \
- __start.bi_size = n; \
- __start.bi_bvec_done = skip; \
- __start.bi_idx = 0; \
- for_each_bvec(__v, i->bvec, __bi, __start) { \
- if (!__v.bv_len) \
- continue; \
- (void)(STEP); \
- } \
-}
-
-#define iterate_all_kinds(i, n, v, I, B, K) { \
- if (likely(n)) { \
- size_t skip = i->iov_offset; \
- if (unlikely(i->type & ITER_BVEC)) { \
- struct bio_vec v; \
- struct bvec_iter __bi; \
- iterate_bvec(i, n, v, __bi, skip, (B)) \
- } else if (unlikely(i->type & ITER_KVEC)) { \
- const struct kvec *kvec; \
- struct kvec v; \
- iterate_kvec(i, n, v, kvec, skip, (K)) \
- } else if (unlikely(i->type & ITER_DISCARD)) { \
- } else { \
- const struct iovec *iov; \
- struct iovec v; \
- iterate_iovec(i, n, v, iov, skip, (I)) \
- } \
- } \
-}
-
-#define iterate_and_advance(i, n, v, I, B, K) { \
- if (unlikely(i->count < n)) \
- n = i->count; \
- if (i->count) { \
- size_t skip = i->iov_offset; \
- if (unlikely(i->type & ITER_BVEC)) { \
- const struct bio_vec *bvec = i->bvec; \
- struct bio_vec v; \
- struct bvec_iter __bi; \
- iterate_bvec(i, n, v, __bi, skip, (B)) \
- i->bvec = __bvec_iter_bvec(i->bvec, __bi); \
- i->nr_segs -= i->bvec - bvec; \
- skip = __bi.bi_bvec_done; \
- } else if (unlikely(i->type & ITER_KVEC)) { \
- const struct kvec *kvec; \
- struct kvec v; \
- iterate_kvec(i, n, v, kvec, skip, (K)) \
- if (skip == kvec->iov_len) { \
- kvec++; \
- skip = 0; \
- } \
- i->nr_segs -= kvec - i->kvec; \
- i->kvec = kvec; \
- } else if (unlikely(i->type & ITER_DISCARD)) { \
- skip += n; \
- } else { \
- const struct iovec *iov; \
- struct iovec v; \
- iterate_iovec(i, n, v, iov, skip, (I)) \
- if (skip == iov->iov_len) { \
- iov++; \
- skip = 0; \
- } \
- i->nr_segs -= iov - i->iov; \
- i->iov = iov; \
- } \
- i->count -= n; \
- i->iov_offset = skip; \
- } \
-}
-
-static int copyout(void __user *to, const void *from, size_t n)
-{
- if (access_ok(to, n)) {
- kasan_check_read(from, n);
- n = raw_copy_to_user(to, from, n);
- }
- return n;
-}
-
-static int copyin(void *to, const void __user *from, size_t n)
+static __always_inline
+size_t copy_to_user_iter(void __user *iter_to, size_t progress,
+ size_t len, void *from, void *priv2)
{
- if (access_ok(from, n)) {
- kasan_check_write(to, n);
- n = raw_copy_from_user(to, from, n);
+ if (should_fail_usercopy())
+ return len;
+ if (access_ok(iter_to, len)) {
+ from += progress;
+ instrument_copy_to_user(iter_to, from, len);
+ len = raw_copy_to_user(iter_to, from, len);
}
- return n;
+ return len;
}
-static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t bytes,
- struct iov_iter *i)
+static __always_inline
+size_t copy_to_user_iter_nofault(void __user *iter_to, size_t progress,
+ size_t len, void *from, void *priv2)
{
- size_t skip, copy, left, wanted;
- const struct iovec *iov;
- char __user *buf;
- void *kaddr, *from;
+ ssize_t res;
- if (unlikely(bytes > i->count))
- bytes = i->count;
-
- if (unlikely(!bytes))
- return 0;
-
- might_fault();
- wanted = bytes;
- iov = i->iov;
- skip = i->iov_offset;
- buf = iov->iov_base + skip;
- copy = min(bytes, iov->iov_len - skip);
-
- if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_pages_writeable(buf, copy)) {
- kaddr = kmap_atomic(page);
- from = kaddr + offset;
-
- /* first chunk, usually the only one */
- left = copyout(buf, from, copy);
- copy -= left;
- skip += copy;
- from += copy;
- bytes -= copy;
-
- while (unlikely(!left && bytes)) {
- iov++;
- buf = iov->iov_base;
- copy = min(bytes, iov->iov_len);
- left = copyout(buf, from, copy);
- copy -= left;
- skip = copy;
- from += copy;
- bytes -= copy;
- }
- if (likely(!bytes)) {
- kunmap_atomic(kaddr);
- goto done;
- }
- offset = from - kaddr;
- buf += copy;
- kunmap_atomic(kaddr);
- copy = min(bytes, iov->iov_len - skip);
- }
- /* Too bad - revert to non-atomic kmap */
-
- kaddr = kmap(page);
- from = kaddr + offset;
- left = copyout(buf, from, copy);
- copy -= left;
- skip += copy;
- from += copy;
- bytes -= copy;
- while (unlikely(!left && bytes)) {
- iov++;
- buf = iov->iov_base;
- copy = min(bytes, iov->iov_len);
- left = copyout(buf, from, copy);
- copy -= left;
- skip = copy;
- from += copy;
- bytes -= copy;
- }
- kunmap(page);
+ if (should_fail_usercopy())
+ return len;
-done:
- if (skip == iov->iov_len) {
- iov++;
- skip = 0;
- }
- i->count -= wanted - bytes;
- i->nr_segs -= iov - i->iov;
- i->iov = iov;
- i->iov_offset = skip;
- return wanted - bytes;
+ from += progress;
+ res = copy_to_user_nofault(iter_to, from, len);
+ return res < 0 ? len : res;
}
-static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t bytes,
- struct iov_iter *i)
+static __always_inline
+size_t copy_from_user_iter(void __user *iter_from, size_t progress,
+ size_t len, void *to, void *priv2)
{
- size_t skip, copy, left, wanted;
- const struct iovec *iov;
- char __user *buf;
- void *kaddr, *to;
-
- if (unlikely(bytes > i->count))
- bytes = i->count;
-
- if (unlikely(!bytes))
- return 0;
-
- might_fault();
- wanted = bytes;
- iov = i->iov;
- skip = i->iov_offset;
- buf = iov->iov_base + skip;
- copy = min(bytes, iov->iov_len - skip);
-
- if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_pages_readable(buf, copy)) {
- kaddr = kmap_atomic(page);
- to = kaddr + offset;
-
- /* first chunk, usually the only one */
- left = copyin(to, buf, copy);
- copy -= left;
- skip += copy;
- to += copy;
- bytes -= copy;
-
- while (unlikely(!left && bytes)) {
- iov++;
- buf = iov->iov_base;
- copy = min(bytes, iov->iov_len);
- left = copyin(to, buf, copy);
- copy -= left;
- skip = copy;
- to += copy;
- bytes -= copy;
- }
- if (likely(!bytes)) {
- kunmap_atomic(kaddr);
- goto done;
- }
- offset = to - kaddr;
- buf += copy;
- kunmap_atomic(kaddr);
- copy = min(bytes, iov->iov_len - skip);
+ size_t res = len;
+
+ if (should_fail_usercopy())
+ return len;
+ if (access_ok(iter_from, len)) {
+ to += progress;
+ instrument_copy_from_user_before(to, iter_from, len);
+ res = raw_copy_from_user(to, iter_from, len);
+ instrument_copy_from_user_after(to, iter_from, len, res);
}
- /* Too bad - revert to non-atomic kmap */
-
- kaddr = kmap(page);
- to = kaddr + offset;
- left = copyin(to, buf, copy);
- copy -= left;
- skip += copy;
- to += copy;
- bytes -= copy;
- while (unlikely(!left && bytes)) {
- iov++;
- buf = iov->iov_base;
- copy = min(bytes, iov->iov_len);
- left = copyin(to, buf, copy);
- copy -= left;
- skip = copy;
- to += copy;
- bytes -= copy;
- }
- kunmap(page);
-
-done:
- if (skip == iov->iov_len) {
- iov++;
- skip = 0;
- }
- i->count -= wanted - bytes;
- i->nr_segs -= iov - i->iov;
- i->iov = iov;
- i->iov_offset = skip;
- return wanted - bytes;
-}
-
-#ifdef PIPE_PARANOIA
-static bool sanity(const struct iov_iter *i)
-{
- struct pipe_inode_info *pipe = i->pipe;
- int idx = i->idx;
- int next = pipe->curbuf + pipe->nrbufs;
- if (i->iov_offset) {
- struct pipe_buffer *p;
- if (unlikely(!pipe->nrbufs))
- goto Bad; // pipe must be non-empty
- if (unlikely(idx != ((next - 1) & (pipe->buffers - 1))))
- goto Bad; // must be at the last buffer...
-
- p = &pipe->bufs[idx];
- if (unlikely(p->offset + p->len != i->iov_offset))
- goto Bad; // ... at the end of segment
- } else {
- if (idx != (next & (pipe->buffers - 1)))
- goto Bad; // must be right after the last buffer
- }
- return true;
-Bad:
- printk(KERN_ERR "idx = %d, offset = %zd\n", i->idx, i->iov_offset);
- printk(KERN_ERR "curbuf = %d, nrbufs = %d, buffers = %d\n",
- pipe->curbuf, pipe->nrbufs, pipe->buffers);
- for (idx = 0; idx < pipe->buffers; idx++)
- printk(KERN_ERR "[%p %p %d %d]\n",
- pipe->bufs[idx].ops,
- pipe->bufs[idx].page,
- pipe->bufs[idx].offset,
- pipe->bufs[idx].len);
- WARN_ON(1);
- return false;
-}
-#else
-#define sanity(i) true
-#endif
+ return res;
+}
-static inline int next_idx(int idx, struct pipe_inode_info *pipe)
+static __always_inline
+size_t memcpy_to_iter(void *iter_to, size_t progress,
+ size_t len, void *from, void *priv2)
{
- return (idx + 1) & (pipe->buffers - 1);
+ memcpy(iter_to, from + progress, len);
+ return 0;
}
-static size_t copy_page_to_iter_pipe(struct page *page, size_t offset, size_t bytes,
- struct iov_iter *i)
+static __always_inline
+size_t memcpy_from_iter(void *iter_from, size_t progress,
+ size_t len, void *to, void *priv2)
{
- struct pipe_inode_info *pipe = i->pipe;
- struct pipe_buffer *buf;
- size_t off;
- int idx;
-
- if (unlikely(bytes > i->count))
- bytes = i->count;
-
- if (unlikely(!bytes))
- return 0;
-
- if (!sanity(i))
- return 0;
+ memcpy(to + progress, iter_from, len);
+ return 0;
+}
- off = i->iov_offset;
- idx = i->idx;
- buf = &pipe->bufs[idx];
- if (off) {
- if (offset == off && buf->page == page) {
- /* merge with the last one */
- buf->len += bytes;
- i->iov_offset += bytes;
- goto out;
+/*
+ * fault_in_iov_iter_readable - fault in iov iterator for reading
+ * @i: iterator
+ * @size: maximum length
+ *
+ * Fault in one or more iovecs of the given iov_iter, to a maximum length of
+ * @size. For each iovec, fault in each page that constitutes the iovec.
+ *
+ * Returns the number of bytes not faulted in (like copy_to_user() and
+ * copy_from_user()).
+ *
+ * Always returns 0 for non-userspace iterators.
+ */
+size_t fault_in_iov_iter_readable(const struct iov_iter *i, size_t size)
+{
+ if (iter_is_ubuf(i)) {
+ size_t n = min(size, iov_iter_count(i));
+ n -= fault_in_readable(i->ubuf + i->iov_offset, n);
+ return size - n;
+ } else if (iter_is_iovec(i)) {
+ size_t count = min(size, iov_iter_count(i));
+ const struct iovec *p;
+ size_t skip;
+
+ size -= count;
+ for (p = iter_iov(i), skip = i->iov_offset; count; p++, skip = 0) {
+ size_t len = min(count, p->iov_len - skip);
+ size_t ret;
+
+ if (unlikely(!len))
+ continue;
+ ret = fault_in_readable(p->iov_base + skip, len);
+ count -= len - ret;
+ if (ret)
+ break;
}
- idx = next_idx(idx, pipe);
- buf = &pipe->bufs[idx];
+ return count + size;
}
- if (idx == pipe->curbuf && pipe->nrbufs)
- return 0;
- pipe->nrbufs++;
- buf->ops = &page_cache_pipe_buf_ops;
- get_page(buf->page = page);
- buf->offset = offset;
- buf->len = bytes;
- i->iov_offset = offset + bytes;
- i->idx = idx;
-out:
- i->count -= bytes;
- return bytes;
+ return 0;
}
+EXPORT_SYMBOL(fault_in_iov_iter_readable);
/*
- * Fault in one or more iovecs of the given iov_iter, to a maximum length of
- * bytes. For each iovec, fault in each page that constitutes the iovec.
+ * fault_in_iov_iter_writeable - fault in iov iterator for writing
+ * @i: iterator
+ * @size: maximum length
*
- * Return 0 on success, or non-zero if the memory could not be accessed (i.e.
- * because it is an invalid address).
+ * Faults in the iterator using get_user_pages(), i.e., without triggering
+ * hardware page faults. This is primarily useful when we already know that
+ * some or all of the pages in @i aren't in memory.
+ *
+ * Returns the number of bytes not faulted in, like copy_to_user() and
+ * copy_from_user().
+ *
+ * Always returns 0 for non-user-space iterators.
*/
-int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes)
+size_t fault_in_iov_iter_writeable(const struct iov_iter *i, size_t size)
{
- size_t skip = i->iov_offset;
- const struct iovec *iov;
- int err;
- struct iovec v;
-
- if (!(i->type & (ITER_BVEC|ITER_KVEC))) {
- iterate_iovec(i, bytes, v, iov, skip, ({
- err = fault_in_pages_readable(v.iov_base, v.iov_len);
- if (unlikely(err))
- return err;
- 0;}))
+ if (iter_is_ubuf(i)) {
+ size_t n = min(size, iov_iter_count(i));
+ n -= fault_in_safe_writeable(i->ubuf + i->iov_offset, n);
+ return size - n;
+ } else if (iter_is_iovec(i)) {
+ size_t count = min(size, iov_iter_count(i));
+ const struct iovec *p;
+ size_t skip;
+
+ size -= count;
+ for (p = iter_iov(i), skip = i->iov_offset; count; p++, skip = 0) {
+ size_t len = min(count, p->iov_len - skip);
+ size_t ret;
+
+ if (unlikely(!len))
+ continue;
+ ret = fault_in_safe_writeable(p->iov_base + skip, len);
+ count -= len - ret;
+ if (ret)
+ break;
+ }
+ return count + size;
}
return 0;
}
-EXPORT_SYMBOL(iov_iter_fault_in_readable);
+EXPORT_SYMBOL(fault_in_iov_iter_writeable);
void iov_iter_init(struct iov_iter *i, unsigned int direction,
const struct iovec *iov, unsigned long nr_segs,
size_t count)
{
WARN_ON(direction & ~(READ | WRITE));
- direction &= READ | WRITE;
-
- /* It will get better. Eventually... */
- if (uaccess_kernel()) {
- i->type = ITER_KVEC | direction;
- i->kvec = (struct kvec *)iov;
- } else {
- i->type = ITER_IOVEC | direction;
- i->iov = iov;
- }
- i->nr_segs = nr_segs;
- i->iov_offset = 0;
- i->count = count;
+ *i = (struct iov_iter) {
+ .iter_type = ITER_IOVEC,
+ .nofault = false,
+ .data_source = direction,
+ .__iov = iov,
+ .nr_segs = nr_segs,
+ .iov_offset = 0,
+ .count = count
+ };
}
EXPORT_SYMBOL(iov_iter_init);
-static void memcpy_from_page(char *to, struct page *page, size_t offset, size_t len)
-{
- char *from = kmap_atomic(page);
- memcpy(to, from + offset, len);
- kunmap_atomic(from);
-}
-
-static void memcpy_to_page(struct page *page, size_t offset, const char *from, size_t len)
-{
- char *to = kmap_atomic(page);
- memcpy(to + offset, from, len);
- kunmap_atomic(to);
-}
-
-static void memzero_page(struct page *page, size_t offset, size_t len)
-{
- char *addr = kmap_atomic(page);
- memset(addr + offset, 0, len);
- kunmap_atomic(addr);
-}
-
-static inline bool allocated(struct pipe_buffer *buf)
-{
- return buf->ops == &default_pipe_buf_ops;
-}
-
-static inline void data_start(const struct iov_iter *i, int *idxp, size_t *offp)
-{
- size_t off = i->iov_offset;
- int idx = i->idx;
- if (off && (!allocated(&i->pipe->bufs[idx]) || off == PAGE_SIZE)) {
- idx = next_idx(idx, i->pipe);
- off = 0;
- }
- *idxp = idx;
- *offp = off;
-}
-
-static size_t push_pipe(struct iov_iter *i, size_t size,
- int *idxp, size_t *offp)
-{
- struct pipe_inode_info *pipe = i->pipe;
- size_t off;
- int idx;
- ssize_t left;
-
- if (unlikely(size > i->count))
- size = i->count;
- if (unlikely(!size))
- return 0;
-
- left = size;
- data_start(i, &idx, &off);
- *idxp = idx;
- *offp = off;
- if (off) {
- left -= PAGE_SIZE - off;
- if (left <= 0) {
- pipe->bufs[idx].len += size;
- return size;
- }
- pipe->bufs[idx].len = PAGE_SIZE;
- idx = next_idx(idx, pipe);
- }
- while (idx != pipe->curbuf || !pipe->nrbufs) {
- struct page *page = alloc_page(GFP_USER);
- if (!page)
- break;
- pipe->nrbufs++;
- pipe->bufs[idx].ops = &default_pipe_buf_ops;
- pipe->bufs[idx].page = page;
- pipe->bufs[idx].offset = 0;
- if (left <= PAGE_SIZE) {
- pipe->bufs[idx].len = left;
- return size;
- }
- pipe->bufs[idx].len = PAGE_SIZE;
- left -= PAGE_SIZE;
- idx = next_idx(idx, pipe);
- }
- return size - left;
-}
-
-static size_t copy_pipe_to_iter(const void *addr, size_t bytes,
- struct iov_iter *i)
-{
- struct pipe_inode_info *pipe = i->pipe;
- size_t n, off;
- int idx;
-
- if (!sanity(i))
- return 0;
-
- bytes = n = push_pipe(i, bytes, &idx, &off);
- if (unlikely(!n))
- return 0;
- for ( ; n; idx = next_idx(idx, pipe), off = 0) {
- size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
- memcpy_to_page(pipe->bufs[idx].page, off, addr, chunk);
- i->idx = idx;
- i->iov_offset = off + chunk;
- n -= chunk;
- addr += chunk;
- }
- i->count -= bytes;
- return bytes;
-}
-
-static __wsum csum_and_memcpy(void *to, const void *from, size_t len,
- __wsum sum, size_t off)
-{
- __wsum next = csum_partial_copy_nocheck(from, to, len, 0);
- return csum_block_add(sum, next, off);
-}
-
-static size_t csum_and_copy_to_pipe_iter(const void *addr, size_t bytes,
- __wsum *csum, struct iov_iter *i)
-{
- struct pipe_inode_info *pipe = i->pipe;
- size_t n, r;
- size_t off = 0;
- __wsum sum = *csum;
- int idx;
-
- if (!sanity(i))
- return 0;
-
- bytes = n = push_pipe(i, bytes, &idx, &r);
- if (unlikely(!n))
- return 0;
- for ( ; n; idx = next_idx(idx, pipe), r = 0) {
- size_t chunk = min_t(size_t, n, PAGE_SIZE - r);
- char *p = kmap_atomic(pipe->bufs[idx].page);
- sum = csum_and_memcpy(p + r, addr, chunk, sum, off);
- kunmap_atomic(p);
- i->idx = idx;
- i->iov_offset = r + chunk;
- n -= chunk;
- off += chunk;
- addr += chunk;
- }
- i->count -= bytes;
- *csum = sum;
- return bytes;
-}
-
size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
{
- const char *from = addr;
- if (unlikely(iov_iter_is_pipe(i)))
- return copy_pipe_to_iter(addr, bytes, i);
- if (iter_is_iovec(i))
+ if (WARN_ON_ONCE(i->data_source))
+ return 0;
+ if (user_backed_iter(i))
might_fault();
- iterate_and_advance(i, bytes, v,
- copyout(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len),
- memcpy_to_page(v.bv_page, v.bv_offset,
- (from += v.bv_len) - v.bv_len, v.bv_len),
- memcpy(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len)
- )
-
- return bytes;
+ return iterate_and_advance(i, bytes, (void *)addr,
+ copy_to_user_iter, memcpy_to_iter);
}
EXPORT_SYMBOL(_copy_to_iter);
-#ifdef CONFIG_ARCH_HAS_UACCESS_MCSAFE
-static int copyout_mcsafe(void __user *to, const void *from, size_t n)
+#ifdef CONFIG_ARCH_HAS_COPY_MC
+static __always_inline
+size_t copy_to_user_iter_mc(void __user *iter_to, size_t progress,
+ size_t len, void *from, void *priv2)
{
- if (access_ok(to, n)) {
- kasan_check_read(from, n);
- n = copy_to_user_mcsafe((__force void *) to, from, n);
+ if (access_ok(iter_to, len)) {
+ from += progress;
+ instrument_copy_to_user(iter_to, from, len);
+ len = copy_mc_to_user(iter_to, from, len);
}
- return n;
+ return len;
}
-static unsigned long memcpy_mcsafe_to_page(struct page *page, size_t offset,
- const char *from, size_t len)
+static __always_inline
+size_t memcpy_to_iter_mc(void *iter_to, size_t progress,
+ size_t len, void *from, void *priv2)
{
- unsigned long ret;
- char *to;
-
- to = kmap_atomic(page);
- ret = memcpy_mcsafe(to + offset, from, len);
- kunmap_atomic(to);
-
- return ret;
-}
-
-static size_t copy_pipe_to_iter_mcsafe(const void *addr, size_t bytes,
- struct iov_iter *i)
-{
- struct pipe_inode_info *pipe = i->pipe;
- size_t n, off, xfer = 0;
- int idx;
-
- if (!sanity(i))
- return 0;
-
- bytes = n = push_pipe(i, bytes, &idx, &off);
- if (unlikely(!n))
- return 0;
- for ( ; n; idx = next_idx(idx, pipe), off = 0) {
- size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
- unsigned long rem;
-
- rem = memcpy_mcsafe_to_page(pipe->bufs[idx].page, off, addr,
- chunk);
- i->idx = idx;
- i->iov_offset = off + chunk - rem;
- xfer += chunk - rem;
- if (rem)
- break;
- n -= chunk;
- addr += chunk;
- }
- i->count -= xfer;
- return xfer;
+ return copy_mc_to_kernel(iter_to, from + progress, len);
}
/**
- * _copy_to_iter_mcsafe - copy to user with source-read error exception handling
+ * _copy_mc_to_iter - copy to iter with source memory error exception handling
* @addr: source kernel address
* @bytes: total transfer length
- * @iter: destination iterator
+ * @i: destination iterator
*
- * The pmem driver arranges for filesystem-dax to use this facility via
- * dax_copy_to_iter() for protecting read/write to persistent memory.
- * Unless / until an architecture can guarantee identical performance
- * between _copy_to_iter_mcsafe() and _copy_to_iter() it would be a
- * performance regression to switch more users to the mcsafe version.
+ * The pmem driver deploys this for the dax operation
+ * (dax_copy_to_iter()) for dax reads (bypass page-cache and the
+ * block-layer). Upon #MC read(2) aborts and returns EIO or the bytes
+ * successfully copied.
*
- * Otherwise, the main differences between this and typical _copy_to_iter().
+ * The main differences between this and typical _copy_to_iter().
*
* * Typical tail/residue handling after a fault retries the copy
* byte-by-byte until the fault happens again. Re-triggering machine
@@ -692,120 +226,80 @@ static size_t copy_pipe_to_iter_mcsafe(const void *addr, size_t bytes,
* alignment and poison alignment assumptions to avoid re-triggering
* hardware exceptions.
*
- * * ITER_KVEC, ITER_PIPE, and ITER_BVEC can return short copies.
- * Compare to copy_to_iter() where only ITER_IOVEC attempts might return
- * a short copy.
+ * * ITER_KVEC and ITER_BVEC can return short copies. Compare to
+ * copy_to_iter() where only ITER_IOVEC attempts might return a short copy.
*
- * See MCSAFE_TEST for self-test.
+ * Return: number of bytes copied (may be %0)
*/
-size_t _copy_to_iter_mcsafe(const void *addr, size_t bytes, struct iov_iter *i)
+size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
{
- const char *from = addr;
- unsigned long rem, curr_addr, s_addr = (unsigned long) addr;
-
- if (unlikely(iov_iter_is_pipe(i)))
- return copy_pipe_to_iter_mcsafe(addr, bytes, i);
- if (iter_is_iovec(i))
+ if (WARN_ON_ONCE(i->data_source))
+ return 0;
+ if (user_backed_iter(i))
might_fault();
- iterate_and_advance(i, bytes, v,
- copyout_mcsafe(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len),
- ({
- rem = memcpy_mcsafe_to_page(v.bv_page, v.bv_offset,
- (from += v.bv_len) - v.bv_len, v.bv_len);
- if (rem) {
- curr_addr = (unsigned long) from;
- bytes = curr_addr - s_addr - rem;
- return bytes;
- }
- }),
- ({
- rem = memcpy_mcsafe(v.iov_base, (from += v.iov_len) - v.iov_len,
- v.iov_len);
- if (rem) {
- curr_addr = (unsigned long) from;
- bytes = curr_addr - s_addr - rem;
- return bytes;
- }
- })
- )
+ return iterate_and_advance(i, bytes, (void *)addr,
+ copy_to_user_iter_mc, memcpy_to_iter_mc);
+}
+EXPORT_SYMBOL_GPL(_copy_mc_to_iter);
+#endif /* CONFIG_ARCH_HAS_COPY_MC */
- return bytes;
+static __always_inline
+size_t __copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
+{
+ return iterate_and_advance(i, bytes, addr,
+ copy_from_user_iter, memcpy_from_iter);
}
-EXPORT_SYMBOL_GPL(_copy_to_iter_mcsafe);
-#endif /* CONFIG_ARCH_HAS_UACCESS_MCSAFE */
size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
{
- char *to = addr;
- if (unlikely(iov_iter_is_pipe(i))) {
- WARN_ON(1);
+ if (WARN_ON_ONCE(!i->data_source))
return 0;
- }
- if (iter_is_iovec(i))
- might_fault();
- iterate_and_advance(i, bytes, v,
- copyin((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len),
- memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
- v.bv_offset, v.bv_len),
- memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
- )
- return bytes;
+ if (user_backed_iter(i))
+ might_fault();
+ return __copy_from_iter(addr, bytes, i);
}
EXPORT_SYMBOL(_copy_from_iter);
-bool _copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i)
+static __always_inline
+size_t copy_from_user_iter_nocache(void __user *iter_from, size_t progress,
+ size_t len, void *to, void *priv2)
{
- char *to = addr;
- if (unlikely(iov_iter_is_pipe(i))) {
- WARN_ON(1);
- return false;
- }
- if (unlikely(i->count < bytes))
- return false;
-
- if (iter_is_iovec(i))
- might_fault();
- iterate_all_kinds(i, bytes, v, ({
- if (copyin((to += v.iov_len) - v.iov_len,
- v.iov_base, v.iov_len))
- return false;
- 0;}),
- memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
- v.bv_offset, v.bv_len),
- memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
- )
-
- iov_iter_advance(i, bytes);
- return true;
+ return __copy_from_user_inatomic_nocache(to + progress, iter_from, len);
}
-EXPORT_SYMBOL(_copy_from_iter_full);
size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
{
- char *to = addr;
- if (unlikely(iov_iter_is_pipe(i))) {
- WARN_ON(1);
+ if (WARN_ON_ONCE(!i->data_source))
return 0;
- }
- iterate_and_advance(i, bytes, v,
- __copy_from_user_inatomic_nocache((to += v.iov_len) - v.iov_len,
- v.iov_base, v.iov_len),
- memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
- v.bv_offset, v.bv_len),
- memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
- )
- return bytes;
+ return iterate_and_advance(i, bytes, addr,
+ copy_from_user_iter_nocache,
+ memcpy_from_iter);
}
EXPORT_SYMBOL(_copy_from_iter_nocache);
#ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
+static __always_inline
+size_t copy_from_user_iter_flushcache(void __user *iter_from, size_t progress,
+ size_t len, void *to, void *priv2)
+{
+ return __copy_from_user_flushcache(to + progress, iter_from, len);
+}
+
+static __always_inline
+size_t memcpy_from_iter_flushcache(void *iter_from, size_t progress,
+ size_t len, void *to, void *priv2)
+{
+ memcpy_flushcache(to + progress, iter_from, len);
+ return 0;
+}
+
/**
* _copy_from_iter_flushcache - write destination through cpu cache
* @addr: destination kernel address
* @bytes: total transfer length
- * @iter: source iterator
+ * @i: source iterator
*
* The pmem driver arranges for filesystem-dax to use this facility via
* dax_copy_from_iter() for ensuring that writes to persistent memory
@@ -814,52 +308,21 @@ EXPORT_SYMBOL(_copy_from_iter_nocache);
* all iterator types. The _copy_from_iter_nocache() only attempts to
* bypass the cache for the ITER_IOVEC case, and on some archs may use
* instructions that strand dirty-data in the cache.
+ *
+ * Return: number of bytes copied (may be %0)
*/
size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i)
{
- char *to = addr;
- if (unlikely(iov_iter_is_pipe(i))) {
- WARN_ON(1);
+ if (WARN_ON_ONCE(!i->data_source))
return 0;
- }
- iterate_and_advance(i, bytes, v,
- __copy_from_user_flushcache((to += v.iov_len) - v.iov_len,
- v.iov_base, v.iov_len),
- memcpy_page_flushcache((to += v.bv_len) - v.bv_len, v.bv_page,
- v.bv_offset, v.bv_len),
- memcpy_flushcache((to += v.iov_len) - v.iov_len, v.iov_base,
- v.iov_len)
- )
- return bytes;
+ return iterate_and_advance(i, bytes, addr,
+ copy_from_user_iter_flushcache,
+ memcpy_from_iter_flushcache);
}
EXPORT_SYMBOL_GPL(_copy_from_iter_flushcache);
#endif
-bool _copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i)
-{
- char *to = addr;
- if (unlikely(iov_iter_is_pipe(i))) {
- WARN_ON(1);
- return false;
- }
- if (unlikely(i->count < bytes))
- return false;
- iterate_all_kinds(i, bytes, v, ({
- if (__copy_from_user_inatomic_nocache((to += v.iov_len) - v.iov_len,
- v.iov_base, v.iov_len))
- return false;
- 0;}),
- memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
- v.bv_offset, v.bv_len),
- memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
- )
-
- iov_iter_advance(i, bytes);
- return true;
-}
-EXPORT_SYMBOL(_copy_from_iter_full_nocache);
-
static inline bool page_copy_sane(struct page *page, size_t offset, size_t n)
{
struct page *head;
@@ -878,169 +341,207 @@ static inline bool page_copy_sane(struct page *page, size_t offset, size_t n)
head = compound_head(page);
v += (page - head) << PAGE_SHIFT;
- if (likely(n <= v && v <= (PAGE_SIZE << compound_order(head))))
- return true;
- WARN_ON(1);
- return false;
+ if (WARN_ON(n > v || v > page_size(head)))
+ return false;
+ return true;
}
size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
struct iov_iter *i)
{
- if (unlikely(!page_copy_sane(page, offset, bytes)))
+ size_t res = 0;
+ if (!page_copy_sane(page, offset, bytes))
return 0;
- if (i->type & (ITER_BVEC|ITER_KVEC)) {
- void *kaddr = kmap_atomic(page);
- size_t wanted = copy_to_iter(kaddr + offset, bytes, i);
- kunmap_atomic(kaddr);
- return wanted;
- } else if (unlikely(iov_iter_is_discard(i)))
- return bytes;
- else if (likely(!iov_iter_is_pipe(i)))
- return copy_page_to_iter_iovec(page, offset, bytes, i);
- else
- return copy_page_to_iter_pipe(page, offset, bytes, i);
+ if (WARN_ON_ONCE(i->data_source))
+ return 0;
+ page += offset / PAGE_SIZE; // first subpage
+ offset %= PAGE_SIZE;
+ while (1) {
+ void *kaddr = kmap_local_page(page);
+ size_t n = min(bytes, (size_t)PAGE_SIZE - offset);
+ n = _copy_to_iter(kaddr + offset, n, i);
+ kunmap_local(kaddr);
+ res += n;
+ bytes -= n;
+ if (!bytes || !n)
+ break;
+ offset += n;
+ if (offset == PAGE_SIZE) {
+ page++;
+ offset = 0;
+ }
+ }
+ return res;
}
EXPORT_SYMBOL(copy_page_to_iter);
-size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
- struct iov_iter *i)
+size_t copy_page_to_iter_nofault(struct page *page, unsigned offset, size_t bytes,
+ struct iov_iter *i)
{
- if (unlikely(!page_copy_sane(page, offset, bytes)))
+ size_t res = 0;
+
+ if (!page_copy_sane(page, offset, bytes))
return 0;
- if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
- WARN_ON(1);
+ if (WARN_ON_ONCE(i->data_source))
return 0;
+ page += offset / PAGE_SIZE; // first subpage
+ offset %= PAGE_SIZE;
+ while (1) {
+ void *kaddr = kmap_local_page(page);
+ size_t n = min(bytes, (size_t)PAGE_SIZE - offset);
+
+ n = iterate_and_advance(i, n, kaddr + offset,
+ copy_to_user_iter_nofault,
+ memcpy_to_iter);
+ kunmap_local(kaddr);
+ res += n;
+ bytes -= n;
+ if (!bytes || !n)
+ break;
+ offset += n;
+ if (offset == PAGE_SIZE) {
+ page++;
+ offset = 0;
+ }
}
- if (i->type & (ITER_BVEC|ITER_KVEC)) {
- void *kaddr = kmap_atomic(page);
- size_t wanted = _copy_from_iter(kaddr + offset, bytes, i);
- kunmap_atomic(kaddr);
- return wanted;
- } else
- return copy_page_from_iter_iovec(page, offset, bytes, i);
+ return res;
}
-EXPORT_SYMBOL(copy_page_from_iter);
+EXPORT_SYMBOL(copy_page_to_iter_nofault);
-static size_t pipe_zero(size_t bytes, struct iov_iter *i)
+size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
+ struct iov_iter *i)
{
- struct pipe_inode_info *pipe = i->pipe;
- size_t n, off;
- int idx;
-
- if (!sanity(i))
+ size_t res = 0;
+ if (!page_copy_sane(page, offset, bytes))
return 0;
+ page += offset / PAGE_SIZE; // first subpage
+ offset %= PAGE_SIZE;
+ while (1) {
+ void *kaddr = kmap_local_page(page);
+ size_t n = min(bytes, (size_t)PAGE_SIZE - offset);
+ n = _copy_from_iter(kaddr + offset, n, i);
+ kunmap_local(kaddr);
+ res += n;
+ bytes -= n;
+ if (!bytes || !n)
+ break;
+ offset += n;
+ if (offset == PAGE_SIZE) {
+ page++;
+ offset = 0;
+ }
+ }
+ return res;
+}
+EXPORT_SYMBOL(copy_page_from_iter);
- bytes = n = push_pipe(i, bytes, &idx, &off);
- if (unlikely(!n))
- return 0;
+static __always_inline
+size_t zero_to_user_iter(void __user *iter_to, size_t progress,
+ size_t len, void *priv, void *priv2)
+{
+ return clear_user(iter_to, len);
+}
- for ( ; n; idx = next_idx(idx, pipe), off = 0) {
- size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
- memzero_page(pipe->bufs[idx].page, off, chunk);
- i->idx = idx;
- i->iov_offset = off + chunk;
- n -= chunk;
- }
- i->count -= bytes;
- return bytes;
+static __always_inline
+size_t zero_to_iter(void *iter_to, size_t progress,
+ size_t len, void *priv, void *priv2)
+{
+ memset(iter_to, 0, len);
+ return 0;
}
size_t iov_iter_zero(size_t bytes, struct iov_iter *i)
{
- if (unlikely(iov_iter_is_pipe(i)))
- return pipe_zero(bytes, i);
- iterate_and_advance(i, bytes, v,
- clear_user(v.iov_base, v.iov_len),
- memzero_page(v.bv_page, v.bv_offset, v.bv_len),
- memset(v.iov_base, 0, v.iov_len)
- )
-
- return bytes;
+ return iterate_and_advance(i, bytes, NULL,
+ zero_to_user_iter, zero_to_iter);
}
EXPORT_SYMBOL(iov_iter_zero);
-size_t iov_iter_copy_from_user_atomic(struct page *page,
- struct iov_iter *i, unsigned long offset, size_t bytes)
+size_t copy_page_from_iter_atomic(struct page *page, size_t offset,
+ size_t bytes, struct iov_iter *i)
{
- char *kaddr = kmap_atomic(page), *p = kaddr + offset;
- if (unlikely(!page_copy_sane(page, offset, bytes))) {
- kunmap_atomic(kaddr);
+ size_t n, copied = 0;
+
+ if (!page_copy_sane(page, offset, bytes))
return 0;
- }
- if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
- kunmap_atomic(kaddr);
- WARN_ON(1);
+ if (WARN_ON_ONCE(!i->data_source))
return 0;
- }
- iterate_all_kinds(i, bytes, v,
- copyin((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len),
- memcpy_from_page((p += v.bv_len) - v.bv_len, v.bv_page,
- v.bv_offset, v.bv_len),
- memcpy((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
- )
- kunmap_atomic(kaddr);
- return bytes;
-}
-EXPORT_SYMBOL(iov_iter_copy_from_user_atomic);
-
-static inline void pipe_truncate(struct iov_iter *i)
-{
- struct pipe_inode_info *pipe = i->pipe;
- if (pipe->nrbufs) {
- size_t off = i->iov_offset;
- int idx = i->idx;
- int nrbufs = (idx - pipe->curbuf) & (pipe->buffers - 1);
- if (off) {
- pipe->bufs[idx].len = off - pipe->bufs[idx].offset;
- idx = next_idx(idx, pipe);
- nrbufs++;
- }
- while (pipe->nrbufs > nrbufs) {
- pipe_buf_release(pipe, &pipe->bufs[idx]);
- idx = next_idx(idx, pipe);
- pipe->nrbufs--;
+
+ do {
+ char *p;
+
+ n = bytes - copied;
+ if (PageHighMem(page)) {
+ page += offset / PAGE_SIZE;
+ offset %= PAGE_SIZE;
+ n = min_t(size_t, n, PAGE_SIZE - offset);
}
- }
+
+ p = kmap_atomic(page) + offset;
+ n = __copy_from_iter(p, n, i);
+ kunmap_atomic(p);
+ copied += n;
+ offset += n;
+ } while (PageHighMem(page) && copied != bytes && n > 0);
+
+ return copied;
}
+EXPORT_SYMBOL(copy_page_from_iter_atomic);
-static void pipe_advance(struct iov_iter *i, size_t size)
+static void iov_iter_bvec_advance(struct iov_iter *i, size_t size)
{
- struct pipe_inode_info *pipe = i->pipe;
- if (unlikely(i->count < size))
- size = i->count;
- if (size) {
- struct pipe_buffer *buf;
- size_t off = i->iov_offset, left = size;
- int idx = i->idx;
- if (off) /* make it relative to the beginning of buffer */
- left += off - pipe->bufs[idx].offset;
- while (1) {
- buf = &pipe->bufs[idx];
- if (left <= buf->len)
- break;
- left -= buf->len;
- idx = next_idx(idx, pipe);
- }
- i->idx = idx;
- i->iov_offset = buf->offset + left;
- }
+ const struct bio_vec *bvec, *end;
+
+ if (!i->count)
+ return;
i->count -= size;
- /* ... and discard everything past that point */
- pipe_truncate(i);
+
+ size += i->iov_offset;
+
+ for (bvec = i->bvec, end = bvec + i->nr_segs; bvec < end; bvec++) {
+ if (likely(size < bvec->bv_len))
+ break;
+ size -= bvec->bv_len;
+ }
+ i->iov_offset = size;
+ i->nr_segs -= bvec - i->bvec;
+ i->bvec = bvec;
}
-void iov_iter_advance(struct iov_iter *i, size_t size)
+static void iov_iter_iovec_advance(struct iov_iter *i, size_t size)
{
- if (unlikely(iov_iter_is_pipe(i))) {
- pipe_advance(i, size);
+ const struct iovec *iov, *end;
+
+ if (!i->count)
return;
+ i->count -= size;
+
+ size += i->iov_offset; // from beginning of current segment
+ for (iov = iter_iov(i), end = iov + i->nr_segs; iov < end; iov++) {
+ if (likely(size < iov->iov_len))
+ break;
+ size -= iov->iov_len;
}
- if (unlikely(iov_iter_is_discard(i))) {
+ i->iov_offset = size;
+ i->nr_segs -= iov - iter_iov(i);
+ i->__iov = iov;
+}
+
+void iov_iter_advance(struct iov_iter *i, size_t size)
+{
+ if (unlikely(i->count < size))
+ size = i->count;
+ if (likely(iter_is_ubuf(i)) || unlikely(iov_iter_is_xarray(i))) {
+ i->iov_offset += size;
+ i->count -= size;
+ } else if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i))) {
+ /* iovec and kvec have identical layouts */
+ iov_iter_iovec_advance(i, size);
+ } else if (iov_iter_is_bvec(i)) {
+ iov_iter_bvec_advance(i, size);
+ } else if (iov_iter_is_discard(i)) {
i->count -= size;
- return;
}
- iterate_and_advance(i, size, v, 0, 0, 0)
}
EXPORT_SYMBOL(iov_iter_advance);
@@ -1051,30 +552,6 @@ void iov_iter_revert(struct iov_iter *i, size_t unroll)
if (WARN_ON(unroll > MAX_RW_COUNT))
return;
i->count += unroll;
- if (unlikely(iov_iter_is_pipe(i))) {
- struct pipe_inode_info *pipe = i->pipe;
- int idx = i->idx;
- size_t off = i->iov_offset;
- while (1) {
- size_t n = off - pipe->bufs[idx].offset;
- if (unroll < n) {
- off -= unroll;
- break;
- }
- unroll -= n;
- if (!unroll && idx == i->start_idx) {
- off = 0;
- break;
- }
- if (!idx--)
- idx = pipe->buffers - 1;
- off = pipe->bufs[idx].offset + pipe->bufs[idx].len;
- }
- i->iov_offset = off;
- i->idx = idx;
- pipe_truncate(i);
- return;
- }
if (unlikely(iov_iter_is_discard(i)))
return;
if (unroll <= i->iov_offset) {
@@ -1082,7 +559,12 @@ void iov_iter_revert(struct iov_iter *i, size_t unroll)
return;
}
unroll -= i->iov_offset;
- if (iov_iter_is_bvec(i)) {
+ if (iov_iter_is_xarray(i) || iter_is_ubuf(i)) {
+ BUG(); /* We should never go beyond the start of the specified
+ * range since we might then be straying into pages that
+ * aren't pinned.
+ */
+ } else if (iov_iter_is_bvec(i)) {
const struct bio_vec *bvec = i->bvec;
while (1) {
size_t n = (--bvec)->bv_len;
@@ -1095,12 +577,12 @@ void iov_iter_revert(struct iov_iter *i, size_t unroll)
unroll -= n;
}
} else { /* same logics for iovec and kvec */
- const struct iovec *iov = i->iov;
+ const struct iovec *iov = iter_iov(i);
while (1) {
size_t n = (--iov)->iov_len;
i->nr_segs++;
if (unroll <= n) {
- i->iov = iov;
+ i->__iov = iov;
i->iov_offset = n - unroll;
return;
}
@@ -1115,16 +597,13 @@ EXPORT_SYMBOL(iov_iter_revert);
*/
size_t iov_iter_single_seg_count(const struct iov_iter *i)
{
- if (unlikely(iov_iter_is_pipe(i)))
- return i->count; // it is a silly place, anyway
- if (i->nr_segs == 1)
- return i->count;
- if (unlikely(iov_iter_is_discard(i)))
- return i->count;
- else if (iov_iter_is_bvec(i))
- return min(i->count, i->bvec->bv_len - i->iov_offset);
- else
- return min(i->count, i->iov->iov_len - i->iov_offset);
+ if (i->nr_segs > 1) {
+ if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i)))
+ return min(i->count, iter_iov(i)->iov_len - i->iov_offset);
+ if (iov_iter_is_bvec(i))
+ return min(i->count, i->bvec->bv_len - i->iov_offset);
+ }
+ return i->count;
}
EXPORT_SYMBOL(iov_iter_single_seg_count);
@@ -1133,11 +612,14 @@ void iov_iter_kvec(struct iov_iter *i, unsigned int direction,
size_t count)
{
WARN_ON(direction & ~(READ | WRITE));
- i->type = ITER_KVEC | (direction & (READ | WRITE));
- i->kvec = kvec;
- i->nr_segs = nr_segs;
- i->iov_offset = 0;
- i->count = count;
+ *i = (struct iov_iter){
+ .iter_type = ITER_KVEC,
+ .data_source = direction,
+ .kvec = kvec,
+ .nr_segs = nr_segs,
+ .iov_offset = 0,
+ .count = count
+ };
}
EXPORT_SYMBOL(iov_iter_kvec);
@@ -1146,28 +628,44 @@ void iov_iter_bvec(struct iov_iter *i, unsigned int direction,
size_t count)
{
WARN_ON(direction & ~(READ | WRITE));
- i->type = ITER_BVEC | (direction & (READ | WRITE));
- i->bvec = bvec;
- i->nr_segs = nr_segs;
- i->iov_offset = 0;
- i->count = count;
+ *i = (struct iov_iter){
+ .iter_type = ITER_BVEC,
+ .data_source = direction,
+ .bvec = bvec,
+ .nr_segs = nr_segs,
+ .iov_offset = 0,
+ .count = count
+ };
}
EXPORT_SYMBOL(iov_iter_bvec);
-void iov_iter_pipe(struct iov_iter *i, unsigned int direction,
- struct pipe_inode_info *pipe,
- size_t count)
+/**
+ * iov_iter_xarray - Initialise an I/O iterator to use the pages in an xarray
+ * @i: The iterator to initialise.
+ * @direction: The direction of the transfer.
+ * @xarray: The xarray to access.
+ * @start: The start file position.
+ * @count: The size of the I/O buffer in bytes.
+ *
+ * Set up an I/O iterator to either draw data out of the pages attached to an
+ * inode or to inject data into those pages. The pages *must* be prevented
+ * from evaporation, either by taking a ref on them or locking them by the
+ * caller.
+ */
+void iov_iter_xarray(struct iov_iter *i, unsigned int direction,
+ struct xarray *xarray, loff_t start, size_t count)
{
- BUG_ON(direction != READ);
- WARN_ON(pipe->nrbufs == pipe->buffers);
- i->type = ITER_PIPE | READ;
- i->pipe = pipe;
- i->idx = (pipe->curbuf + pipe->nrbufs) & (pipe->buffers - 1);
- i->iov_offset = 0;
- i->count = count;
- i->start_idx = i->idx;
-}
-EXPORT_SYMBOL(iov_iter_pipe);
+ BUG_ON(direction & ~1);
+ *i = (struct iov_iter) {
+ .iter_type = ITER_XARRAY,
+ .data_source = direction,
+ .xarray = xarray,
+ .xarray_start = start,
+ .count = count,
+ .iov_offset = 0
+ };
+}
+EXPORT_SYMBOL(iov_iter_xarray);
/**
* iov_iter_discard - Initialise an I/O iterator that discards data
@@ -1181,449 +679,652 @@ EXPORT_SYMBOL(iov_iter_pipe);
void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count)
{
BUG_ON(direction != READ);
- i->type = ITER_DISCARD | READ;
- i->count = count;
- i->iov_offset = 0;
+ *i = (struct iov_iter){
+ .iter_type = ITER_DISCARD,
+ .data_source = false,
+ .count = count,
+ .iov_offset = 0
+ };
}
EXPORT_SYMBOL(iov_iter_discard);
-unsigned long iov_iter_alignment(const struct iov_iter *i)
+static bool iov_iter_aligned_iovec(const struct iov_iter *i, unsigned addr_mask,
+ unsigned len_mask)
{
- unsigned long res = 0;
size_t size = i->count;
+ size_t skip = i->iov_offset;
+ unsigned k;
+
+ for (k = 0; k < i->nr_segs; k++, skip = 0) {
+ const struct iovec *iov = iter_iov(i) + k;
+ size_t len = iov->iov_len - skip;
- if (unlikely(iov_iter_is_pipe(i))) {
- if (size && i->iov_offset && allocated(&i->pipe->bufs[i->idx]))
- return size | i->iov_offset;
- return size;
+ if (len > size)
+ len = size;
+ if (len & len_mask)
+ return false;
+ if ((unsigned long)(iov->iov_base + skip) & addr_mask)
+ return false;
+
+ size -= len;
+ if (!size)
+ break;
}
- iterate_all_kinds(i, size, v,
- (res |= (unsigned long)v.iov_base | v.iov_len, 0),
- res |= v.bv_offset | v.bv_len,
- res |= (unsigned long)v.iov_base | v.iov_len
- )
- return res;
+ return true;
}
-EXPORT_SYMBOL(iov_iter_alignment);
-unsigned long iov_iter_gap_alignment(const struct iov_iter *i)
+static bool iov_iter_aligned_bvec(const struct iov_iter *i, unsigned addr_mask,
+ unsigned len_mask)
{
- unsigned long res = 0;
size_t size = i->count;
+ unsigned skip = i->iov_offset;
+ unsigned k;
- if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
- WARN_ON(1);
- return ~0U;
- }
+ for (k = 0; k < i->nr_segs; k++, skip = 0) {
+ size_t len = i->bvec[k].bv_len - skip;
- iterate_all_kinds(i, size, v,
- (res |= (!res ? 0 : (unsigned long)v.iov_base) |
- (size != v.iov_len ? size : 0), 0),
- (res |= (!res ? 0 : (unsigned long)v.bv_offset) |
- (size != v.bv_len ? size : 0)),
- (res |= (!res ? 0 : (unsigned long)v.iov_base) |
- (size != v.iov_len ? size : 0))
- );
- return res;
+ if (len > size)
+ len = size;
+ if (len & len_mask)
+ return false;
+ if ((unsigned long)(i->bvec[k].bv_offset + skip) & addr_mask)
+ return false;
+
+ size -= len;
+ if (!size)
+ break;
+ }
+ return true;
}
-EXPORT_SYMBOL(iov_iter_gap_alignment);
-static inline ssize_t __pipe_get_pages(struct iov_iter *i,
- size_t maxsize,
- struct page **pages,
- int idx,
- size_t *start)
+/**
+ * iov_iter_is_aligned() - Check if the addresses and lengths of each segments
+ * are aligned to the parameters.
+ *
+ * @i: &struct iov_iter to restore
+ * @addr_mask: bit mask to check against the iov element's addresses
+ * @len_mask: bit mask to check against the iov element's lengths
+ *
+ * Return: false if any addresses or lengths intersect with the provided masks
+ */
+bool iov_iter_is_aligned(const struct iov_iter *i, unsigned addr_mask,
+ unsigned len_mask)
{
- struct pipe_inode_info *pipe = i->pipe;
- ssize_t n = push_pipe(i, maxsize, &idx, start);
- if (!n)
- return -EFAULT;
+ if (likely(iter_is_ubuf(i))) {
+ if (i->count & len_mask)
+ return false;
+ if ((unsigned long)(i->ubuf + i->iov_offset) & addr_mask)
+ return false;
+ return true;
+ }
- maxsize = n;
- n += *start;
- while (n > 0) {
- get_page(*pages++ = pipe->bufs[idx].page);
- idx = next_idx(idx, pipe);
- n -= PAGE_SIZE;
+ if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i)))
+ return iov_iter_aligned_iovec(i, addr_mask, len_mask);
+
+ if (iov_iter_is_bvec(i))
+ return iov_iter_aligned_bvec(i, addr_mask, len_mask);
+
+ if (iov_iter_is_xarray(i)) {
+ if (i->count & len_mask)
+ return false;
+ if ((i->xarray_start + i->iov_offset) & addr_mask)
+ return false;
}
- return maxsize;
+ return true;
+}
+EXPORT_SYMBOL_GPL(iov_iter_is_aligned);
+
+static unsigned long iov_iter_alignment_iovec(const struct iov_iter *i)
+{
+ unsigned long res = 0;
+ size_t size = i->count;
+ size_t skip = i->iov_offset;
+ unsigned k;
+
+ for (k = 0; k < i->nr_segs; k++, skip = 0) {
+ const struct iovec *iov = iter_iov(i) + k;
+ size_t len = iov->iov_len - skip;
+ if (len) {
+ res |= (unsigned long)iov->iov_base + skip;
+ if (len > size)
+ len = size;
+ res |= len;
+ size -= len;
+ if (!size)
+ break;
+ }
+ }
+ return res;
}
-static ssize_t pipe_get_pages(struct iov_iter *i,
- struct page **pages, size_t maxsize, unsigned maxpages,
- size_t *start)
+static unsigned long iov_iter_alignment_bvec(const struct iov_iter *i)
{
- unsigned npages;
- size_t capacity;
- int idx;
+ unsigned res = 0;
+ size_t size = i->count;
+ unsigned skip = i->iov_offset;
+ unsigned k;
+
+ for (k = 0; k < i->nr_segs; k++, skip = 0) {
+ size_t len = i->bvec[k].bv_len - skip;
+ res |= (unsigned long)i->bvec[k].bv_offset + skip;
+ if (len > size)
+ len = size;
+ res |= len;
+ size -= len;
+ if (!size)
+ break;
+ }
+ return res;
+}
- if (!maxsize)
+unsigned long iov_iter_alignment(const struct iov_iter *i)
+{
+ if (likely(iter_is_ubuf(i))) {
+ size_t size = i->count;
+ if (size)
+ return ((unsigned long)i->ubuf + i->iov_offset) | size;
return 0;
+ }
- if (!sanity(i))
- return -EFAULT;
+ /* iovec and kvec have identical layouts */
+ if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i)))
+ return iov_iter_alignment_iovec(i);
+
+ if (iov_iter_is_bvec(i))
+ return iov_iter_alignment_bvec(i);
- data_start(i, &idx, start);
- /* some of this one + all after this one */
- npages = ((i->pipe->curbuf - idx - 1) & (i->pipe->buffers - 1)) + 1;
- capacity = min(npages,maxpages) * PAGE_SIZE - *start;
+ if (iov_iter_is_xarray(i))
+ return (i->xarray_start + i->iov_offset) | i->count;
- return __pipe_get_pages(i, min(maxsize, capacity), pages, idx, start);
+ return 0;
}
+EXPORT_SYMBOL(iov_iter_alignment);
-ssize_t iov_iter_get_pages(struct iov_iter *i,
- struct page **pages, size_t maxsize, unsigned maxpages,
- size_t *start)
+unsigned long iov_iter_gap_alignment(const struct iov_iter *i)
{
- if (maxsize > i->count)
- maxsize = i->count;
+ unsigned long res = 0;
+ unsigned long v = 0;
+ size_t size = i->count;
+ unsigned k;
- if (unlikely(iov_iter_is_pipe(i)))
- return pipe_get_pages(i, pages, maxsize, maxpages, start);
- if (unlikely(iov_iter_is_discard(i)))
- return -EFAULT;
+ if (iter_is_ubuf(i))
+ return 0;
- iterate_all_kinds(i, maxsize, v, ({
- unsigned long addr = (unsigned long)v.iov_base;
- size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
- int n;
- int res;
+ if (WARN_ON(!iter_is_iovec(i)))
+ return ~0U;
- if (len > maxpages * PAGE_SIZE)
- len = maxpages * PAGE_SIZE;
- addr &= ~(PAGE_SIZE - 1);
- n = DIV_ROUND_UP(len, PAGE_SIZE);
- res = get_user_pages_fast(addr, n,
- iov_iter_rw(i) != WRITE ? FOLL_WRITE : 0,
- pages);
- if (unlikely(res < 0))
- return res;
- return (res == n ? len : res * PAGE_SIZE) - *start;
- 0;}),({
- /* can't be more than PAGE_SIZE */
- *start = v.bv_offset;
- get_page(*pages = v.bv_page);
- return v.bv_len;
- }),({
- return -EFAULT;
- })
- )
- return 0;
+ for (k = 0; k < i->nr_segs; k++) {
+ const struct iovec *iov = iter_iov(i) + k;
+ if (iov->iov_len) {
+ unsigned long base = (unsigned long)iov->iov_base;
+ if (v) // if not the first one
+ res |= base | v; // this start | previous end
+ v = base + iov->iov_len;
+ if (size <= iov->iov_len)
+ break;
+ size -= iov->iov_len;
+ }
+ }
+ return res;
}
-EXPORT_SYMBOL(iov_iter_get_pages);
+EXPORT_SYMBOL(iov_iter_gap_alignment);
-static struct page **get_pages_array(size_t n)
+static int want_pages_array(struct page ***res, size_t size,
+ size_t start, unsigned int maxpages)
{
- return kvmalloc_array(n, sizeof(struct page *), GFP_KERNEL);
+ unsigned int count = DIV_ROUND_UP(size + start, PAGE_SIZE);
+
+ if (count > maxpages)
+ count = maxpages;
+ WARN_ON(!count); // caller should've prevented that
+ if (!*res) {
+ *res = kvmalloc_array(count, sizeof(struct page *), GFP_KERNEL);
+ if (!*res)
+ return 0;
+ }
+ return count;
}
-static ssize_t pipe_get_pages_alloc(struct iov_iter *i,
- struct page ***pages, size_t maxsize,
- size_t *start)
+static ssize_t iter_xarray_populate_pages(struct page **pages, struct xarray *xa,
+ pgoff_t index, unsigned int nr_pages)
{
- struct page **p;
- ssize_t n;
- int idx;
- int npages;
+ XA_STATE(xas, xa, index);
+ struct page *page;
+ unsigned int ret = 0;
+
+ rcu_read_lock();
+ for (page = xas_load(&xas); page; page = xas_next(&xas)) {
+ if (xas_retry(&xas, page))
+ continue;
+
+ /* Has the page moved or been split? */
+ if (unlikely(page != xas_reload(&xas))) {
+ xas_reset(&xas);
+ continue;
+ }
- if (!maxsize)
- return 0;
+ pages[ret] = find_subpage(page, xas.xa_index);
+ get_page(pages[ret]);
+ if (++ret == nr_pages)
+ break;
+ }
+ rcu_read_unlock();
+ return ret;
+}
- if (!sanity(i))
- return -EFAULT;
+static ssize_t iter_xarray_get_pages(struct iov_iter *i,
+ struct page ***pages, size_t maxsize,
+ unsigned maxpages, size_t *_start_offset)
+{
+ unsigned nr, offset, count;
+ pgoff_t index;
+ loff_t pos;
- data_start(i, &idx, start);
- /* some of this one + all after this one */
- npages = ((i->pipe->curbuf - idx - 1) & (i->pipe->buffers - 1)) + 1;
- n = npages * PAGE_SIZE - *start;
- if (maxsize > n)
- maxsize = n;
- else
- npages = DIV_ROUND_UP(maxsize + *start, PAGE_SIZE);
- p = get_pages_array(npages);
- if (!p)
+ pos = i->xarray_start + i->iov_offset;
+ index = pos >> PAGE_SHIFT;
+ offset = pos & ~PAGE_MASK;
+ *_start_offset = offset;
+
+ count = want_pages_array(pages, maxsize, offset, maxpages);
+ if (!count)
return -ENOMEM;
- n = __pipe_get_pages(i, maxsize, p, idx, start);
- if (n > 0)
- *pages = p;
- else
- kvfree(p);
- return n;
+ nr = iter_xarray_populate_pages(*pages, i->xarray, index, count);
+ if (nr == 0)
+ return 0;
+
+ maxsize = min_t(size_t, nr * PAGE_SIZE - offset, maxsize);
+ i->iov_offset += maxsize;
+ i->count -= maxsize;
+ return maxsize;
}
-ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
+/* must be done on non-empty ITER_UBUF or ITER_IOVEC one */
+static unsigned long first_iovec_segment(const struct iov_iter *i, size_t *size)
+{
+ size_t skip;
+ long k;
+
+ if (iter_is_ubuf(i))
+ return (unsigned long)i->ubuf + i->iov_offset;
+
+ for (k = 0, skip = i->iov_offset; k < i->nr_segs; k++, skip = 0) {
+ const struct iovec *iov = iter_iov(i) + k;
+ size_t len = iov->iov_len - skip;
+
+ if (unlikely(!len))
+ continue;
+ if (*size > len)
+ *size = len;
+ return (unsigned long)iov->iov_base + skip;
+ }
+ BUG(); // if it had been empty, we wouldn't get called
+}
+
+/* must be done on non-empty ITER_BVEC one */
+static struct page *first_bvec_segment(const struct iov_iter *i,
+ size_t *size, size_t *start)
+{
+ struct page *page;
+ size_t skip = i->iov_offset, len;
+
+ len = i->bvec->bv_len - skip;
+ if (*size > len)
+ *size = len;
+ skip += i->bvec->bv_offset;
+ page = i->bvec->bv_page + skip / PAGE_SIZE;
+ *start = skip % PAGE_SIZE;
+ return page;
+}
+
+static ssize_t __iov_iter_get_pages_alloc(struct iov_iter *i,
struct page ***pages, size_t maxsize,
- size_t *start)
+ unsigned int maxpages, size_t *start)
{
- struct page **p;
+ unsigned int n, gup_flags = 0;
if (maxsize > i->count)
maxsize = i->count;
+ if (!maxsize)
+ return 0;
+ if (maxsize > MAX_RW_COUNT)
+ maxsize = MAX_RW_COUNT;
- if (unlikely(iov_iter_is_pipe(i)))
- return pipe_get_pages_alloc(i, pages, maxsize, start);
- if (unlikely(iov_iter_is_discard(i)))
- return -EFAULT;
-
- iterate_all_kinds(i, maxsize, v, ({
- unsigned long addr = (unsigned long)v.iov_base;
- size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
- int n;
+ if (likely(user_backed_iter(i))) {
+ unsigned long addr;
int res;
- addr &= ~(PAGE_SIZE - 1);
- n = DIV_ROUND_UP(len, PAGE_SIZE);
- p = get_pages_array(n);
- if (!p)
+ if (iov_iter_rw(i) != WRITE)
+ gup_flags |= FOLL_WRITE;
+ if (i->nofault)
+ gup_flags |= FOLL_NOFAULT;
+
+ addr = first_iovec_segment(i, &maxsize);
+ *start = addr % PAGE_SIZE;
+ addr &= PAGE_MASK;
+ n = want_pages_array(pages, maxsize, *start, maxpages);
+ if (!n)
return -ENOMEM;
- res = get_user_pages_fast(addr, n,
- iov_iter_rw(i) != WRITE ? FOLL_WRITE : 0, p);
- if (unlikely(res < 0)) {
- kvfree(p);
+ res = get_user_pages_fast(addr, n, gup_flags, *pages);
+ if (unlikely(res <= 0))
return res;
- }
- *pages = p;
- return (res == n ? len : res * PAGE_SIZE) - *start;
- 0;}),({
- /* can't be more than PAGE_SIZE */
- *start = v.bv_offset;
- *pages = p = get_pages_array(1);
- if (!p)
+ maxsize = min_t(size_t, maxsize, res * PAGE_SIZE - *start);
+ iov_iter_advance(i, maxsize);
+ return maxsize;
+ }
+ if (iov_iter_is_bvec(i)) {
+ struct page **p;
+ struct page *page;
+
+ page = first_bvec_segment(i, &maxsize, start);
+ n = want_pages_array(pages, maxsize, *start, maxpages);
+ if (!n)
return -ENOMEM;
- get_page(*p = v.bv_page);
- return v.bv_len;
- }),({
- return -EFAULT;
- })
- )
- return 0;
+ p = *pages;
+ for (int k = 0; k < n; k++)
+ get_page(p[k] = page + k);
+ maxsize = min_t(size_t, maxsize, n * PAGE_SIZE - *start);
+ i->count -= maxsize;
+ i->iov_offset += maxsize;
+ if (i->iov_offset == i->bvec->bv_len) {
+ i->iov_offset = 0;
+ i->bvec++;
+ i->nr_segs--;
+ }
+ return maxsize;
+ }
+ if (iov_iter_is_xarray(i))
+ return iter_xarray_get_pages(i, pages, maxsize, maxpages, start);
+ return -EFAULT;
}
-EXPORT_SYMBOL(iov_iter_get_pages_alloc);
-size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum,
- struct iov_iter *i)
+ssize_t iov_iter_get_pages2(struct iov_iter *i, struct page **pages,
+ size_t maxsize, unsigned maxpages, size_t *start)
{
- char *to = addr;
- __wsum sum, next;
- size_t off = 0;
- sum = *csum;
- if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
- WARN_ON(1);
+ if (!maxpages)
return 0;
- }
- iterate_and_advance(i, bytes, v, ({
- int err = 0;
- next = csum_and_copy_from_user(v.iov_base,
- (to += v.iov_len) - v.iov_len,
- v.iov_len, 0, &err);
- if (!err) {
- sum = csum_block_add(sum, next, off);
- off += v.iov_len;
- }
- err ? v.iov_len : 0;
- }), ({
- char *p = kmap_atomic(v.bv_page);
- sum = csum_and_memcpy((to += v.bv_len) - v.bv_len,
- p + v.bv_offset, v.bv_len,
- sum, off);
- kunmap_atomic(p);
- off += v.bv_len;
- }),({
- sum = csum_and_memcpy((to += v.iov_len) - v.iov_len,
- v.iov_base, v.iov_len,
- sum, off);
- off += v.iov_len;
- })
- )
- *csum = sum;
- return bytes;
-}
-EXPORT_SYMBOL(csum_and_copy_from_iter);
-
-bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum,
- struct iov_iter *i)
-{
- char *to = addr;
- __wsum sum, next;
- size_t off = 0;
- sum = *csum;
- if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
- WARN_ON(1);
- return false;
- }
- if (unlikely(i->count < bytes))
- return false;
- iterate_all_kinds(i, bytes, v, ({
- int err = 0;
- next = csum_and_copy_from_user(v.iov_base,
- (to += v.iov_len) - v.iov_len,
- v.iov_len, 0, &err);
- if (err)
- return false;
- sum = csum_block_add(sum, next, off);
- off += v.iov_len;
- 0;
- }), ({
- char *p = kmap_atomic(v.bv_page);
- sum = csum_and_memcpy((to += v.bv_len) - v.bv_len,
- p + v.bv_offset, v.bv_len,
- sum, off);
- kunmap_atomic(p);
- off += v.bv_len;
- }),({
- sum = csum_and_memcpy((to += v.iov_len) - v.iov_len,
- v.iov_base, v.iov_len,
- sum, off);
- off += v.iov_len;
- })
- )
- *csum = sum;
- iov_iter_advance(i, bytes);
- return true;
+ BUG_ON(!pages);
+
+ return __iov_iter_get_pages_alloc(i, &pages, maxsize, maxpages, start);
}
-EXPORT_SYMBOL(csum_and_copy_from_iter_full);
+EXPORT_SYMBOL(iov_iter_get_pages2);
-size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *csump,
- struct iov_iter *i)
+ssize_t iov_iter_get_pages_alloc2(struct iov_iter *i,
+ struct page ***pages, size_t maxsize, size_t *start)
{
- const char *from = addr;
- __wsum *csum = csump;
- __wsum sum, next;
- size_t off = 0;
+ ssize_t len;
- if (unlikely(iov_iter_is_pipe(i)))
- return csum_and_copy_to_pipe_iter(addr, bytes, csum, i);
+ *pages = NULL;
- sum = *csum;
- if (unlikely(iov_iter_is_discard(i))) {
- WARN_ON(1); /* for now */
- return 0;
+ len = __iov_iter_get_pages_alloc(i, pages, maxsize, ~0U, start);
+ if (len <= 0) {
+ kvfree(*pages);
+ *pages = NULL;
}
- iterate_and_advance(i, bytes, v, ({
- int err = 0;
- next = csum_and_copy_to_user((from += v.iov_len) - v.iov_len,
- v.iov_base,
- v.iov_len, 0, &err);
- if (!err) {
- sum = csum_block_add(sum, next, off);
- off += v.iov_len;
- }
- err ? v.iov_len : 0;
- }), ({
- char *p = kmap_atomic(v.bv_page);
- sum = csum_and_memcpy(p + v.bv_offset,
- (from += v.bv_len) - v.bv_len,
- v.bv_len, sum, off);
- kunmap_atomic(p);
- off += v.bv_len;
- }),({
- sum = csum_and_memcpy(v.iov_base,
- (from += v.iov_len) - v.iov_len,
- v.iov_len, sum, off);
- off += v.iov_len;
- })
- )
- *csum = sum;
- return bytes;
-}
-EXPORT_SYMBOL(csum_and_copy_to_iter);
-
-size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp,
- struct iov_iter *i)
-{
-#ifdef CONFIG_CRYPTO
- struct ahash_request *hash = hashp;
- struct scatterlist sg;
- size_t copied;
-
- copied = copy_to_iter(addr, bytes, i);
- sg_init_one(&sg, addr, copied);
- ahash_request_set_crypt(hash, &sg, NULL, copied);
- crypto_ahash_update(hash);
- return copied;
-#else
- return 0;
-#endif
+ return len;
}
-EXPORT_SYMBOL(hash_and_copy_to_iter);
+EXPORT_SYMBOL(iov_iter_get_pages_alloc2);
-int iov_iter_npages(const struct iov_iter *i, int maxpages)
+static int iov_npages(const struct iov_iter *i, int maxpages)
{
- size_t size = i->count;
+ size_t skip = i->iov_offset, size = i->count;
+ const struct iovec *p;
int npages = 0;
- if (!size)
- return 0;
- if (unlikely(iov_iter_is_discard(i)))
- return 0;
+ for (p = iter_iov(i); size; skip = 0, p++) {
+ unsigned offs = offset_in_page(p->iov_base + skip);
+ size_t len = min(p->iov_len - skip, size);
- if (unlikely(iov_iter_is_pipe(i))) {
- struct pipe_inode_info *pipe = i->pipe;
- size_t off;
- int idx;
+ if (len) {
+ size -= len;
+ npages += DIV_ROUND_UP(offs + len, PAGE_SIZE);
+ if (unlikely(npages > maxpages))
+ return maxpages;
+ }
+ }
+ return npages;
+}
- if (!sanity(i))
- return 0;
+static int bvec_npages(const struct iov_iter *i, int maxpages)
+{
+ size_t skip = i->iov_offset, size = i->count;
+ const struct bio_vec *p;
+ int npages = 0;
- data_start(i, &idx, &off);
- /* some of this one + all after this one */
- npages = ((pipe->curbuf - idx - 1) & (pipe->buffers - 1)) + 1;
- if (npages >= maxpages)
- return maxpages;
- } else iterate_all_kinds(i, size, v, ({
- unsigned long p = (unsigned long)v.iov_base;
- npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE)
- - p / PAGE_SIZE;
- if (npages >= maxpages)
- return maxpages;
- 0;}),({
- npages++;
- if (npages >= maxpages)
- return maxpages;
- }),({
- unsigned long p = (unsigned long)v.iov_base;
- npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE)
- - p / PAGE_SIZE;
- if (npages >= maxpages)
+ for (p = i->bvec; size; skip = 0, p++) {
+ unsigned offs = (p->bv_offset + skip) % PAGE_SIZE;
+ size_t len = min(p->bv_len - skip, size);
+
+ size -= len;
+ npages += DIV_ROUND_UP(offs + len, PAGE_SIZE);
+ if (unlikely(npages > maxpages))
return maxpages;
- })
- )
+ }
return npages;
}
+
+int iov_iter_npages(const struct iov_iter *i, int maxpages)
+{
+ if (unlikely(!i->count))
+ return 0;
+ if (likely(iter_is_ubuf(i))) {
+ unsigned offs = offset_in_page(i->ubuf + i->iov_offset);
+ int npages = DIV_ROUND_UP(offs + i->count, PAGE_SIZE);
+ return min(npages, maxpages);
+ }
+ /* iovec and kvec have identical layouts */
+ if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i)))
+ return iov_npages(i, maxpages);
+ if (iov_iter_is_bvec(i))
+ return bvec_npages(i, maxpages);
+ if (iov_iter_is_xarray(i)) {
+ unsigned offset = (i->xarray_start + i->iov_offset) % PAGE_SIZE;
+ int npages = DIV_ROUND_UP(offset + i->count, PAGE_SIZE);
+ return min(npages, maxpages);
+ }
+ return 0;
+}
EXPORT_SYMBOL(iov_iter_npages);
const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags)
{
*new = *old;
- if (unlikely(iov_iter_is_pipe(new))) {
- WARN_ON(1);
- return NULL;
- }
- if (unlikely(iov_iter_is_discard(new)))
- return NULL;
if (iov_iter_is_bvec(new))
return new->bvec = kmemdup(new->bvec,
new->nr_segs * sizeof(struct bio_vec),
flags);
- else
+ else if (iov_iter_is_kvec(new) || iter_is_iovec(new))
/* iovec and kvec have identical layout */
- return new->iov = kmemdup(new->iov,
+ return new->__iov = kmemdup(new->__iov,
new->nr_segs * sizeof(struct iovec),
flags);
+ return NULL;
}
EXPORT_SYMBOL(dup_iter);
+static __noclone int copy_compat_iovec_from_user(struct iovec *iov,
+ const struct iovec __user *uvec, unsigned long nr_segs)
+{
+ const struct compat_iovec __user *uiov =
+ (const struct compat_iovec __user *)uvec;
+ int ret = -EFAULT, i;
+
+ if (!user_access_begin(uiov, nr_segs * sizeof(*uiov)))
+ return -EFAULT;
+
+ for (i = 0; i < nr_segs; i++) {
+ compat_uptr_t buf;
+ compat_ssize_t len;
+
+ unsafe_get_user(len, &uiov[i].iov_len, uaccess_end);
+ unsafe_get_user(buf, &uiov[i].iov_base, uaccess_end);
+
+ /* check for compat_size_t not fitting in compat_ssize_t .. */
+ if (len < 0) {
+ ret = -EINVAL;
+ goto uaccess_end;
+ }
+ iov[i].iov_base = compat_ptr(buf);
+ iov[i].iov_len = len;
+ }
+
+ ret = 0;
+uaccess_end:
+ user_access_end();
+ return ret;
+}
+
+static __noclone int copy_iovec_from_user(struct iovec *iov,
+ const struct iovec __user *uiov, unsigned long nr_segs)
+{
+ int ret = -EFAULT;
+
+ if (!user_access_begin(uiov, nr_segs * sizeof(*uiov)))
+ return -EFAULT;
+
+ do {
+ void __user *buf;
+ ssize_t len;
+
+ unsafe_get_user(len, &uiov->iov_len, uaccess_end);
+ unsafe_get_user(buf, &uiov->iov_base, uaccess_end);
+
+ /* check for size_t not fitting in ssize_t .. */
+ if (unlikely(len < 0)) {
+ ret = -EINVAL;
+ goto uaccess_end;
+ }
+ iov->iov_base = buf;
+ iov->iov_len = len;
+
+ uiov++; iov++;
+ } while (--nr_segs);
+
+ ret = 0;
+uaccess_end:
+ user_access_end();
+ return ret;
+}
+
+struct iovec *iovec_from_user(const struct iovec __user *uvec,
+ unsigned long nr_segs, unsigned long fast_segs,
+ struct iovec *fast_iov, bool compat)
+{
+ struct iovec *iov = fast_iov;
+ int ret;
+
+ /*
+ * SuS says "The readv() function *may* fail if the iovcnt argument was
+ * less than or equal to 0, or greater than {IOV_MAX}. Linux has
+ * traditionally returned zero for zero segments, so...
+ */
+ if (nr_segs == 0)
+ return iov;
+ if (nr_segs > UIO_MAXIOV)
+ return ERR_PTR(-EINVAL);
+ if (nr_segs > fast_segs) {
+ iov = kmalloc_array(nr_segs, sizeof(struct iovec), GFP_KERNEL);
+ if (!iov)
+ return ERR_PTR(-ENOMEM);
+ }
+
+ if (unlikely(compat))
+ ret = copy_compat_iovec_from_user(iov, uvec, nr_segs);
+ else
+ ret = copy_iovec_from_user(iov, uvec, nr_segs);
+ if (ret) {
+ if (iov != fast_iov)
+ kfree(iov);
+ return ERR_PTR(ret);
+ }
+
+ return iov;
+}
+
+/*
+ * Single segment iovec supplied by the user, import it as ITER_UBUF.
+ */
+static ssize_t __import_iovec_ubuf(int type, const struct iovec __user *uvec,
+ struct iovec **iovp, struct iov_iter *i,
+ bool compat)
+{
+ struct iovec *iov = *iovp;
+ ssize_t ret;
+
+ if (compat)
+ ret = copy_compat_iovec_from_user(iov, uvec, 1);
+ else
+ ret = copy_iovec_from_user(iov, uvec, 1);
+ if (unlikely(ret))
+ return ret;
+
+ ret = import_ubuf(type, iov->iov_base, iov->iov_len, i);
+ if (unlikely(ret))
+ return ret;
+ *iovp = NULL;
+ return i->count;
+}
+
+ssize_t __import_iovec(int type, const struct iovec __user *uvec,
+ unsigned nr_segs, unsigned fast_segs, struct iovec **iovp,
+ struct iov_iter *i, bool compat)
+{
+ ssize_t total_len = 0;
+ unsigned long seg;
+ struct iovec *iov;
+
+ if (nr_segs == 1)
+ return __import_iovec_ubuf(type, uvec, iovp, i, compat);
+
+ iov = iovec_from_user(uvec, nr_segs, fast_segs, *iovp, compat);
+ if (IS_ERR(iov)) {
+ *iovp = NULL;
+ return PTR_ERR(iov);
+ }
+
+ /*
+ * According to the Single Unix Specification we should return EINVAL if
+ * an element length is < 0 when cast to ssize_t or if the total length
+ * would overflow the ssize_t return value of the system call.
+ *
+ * Linux caps all read/write calls to MAX_RW_COUNT, and avoids the
+ * overflow case.
+ */
+ for (seg = 0; seg < nr_segs; seg++) {
+ ssize_t len = (ssize_t)iov[seg].iov_len;
+
+ if (!access_ok(iov[seg].iov_base, len)) {
+ if (iov != *iovp)
+ kfree(iov);
+ *iovp = NULL;
+ return -EFAULT;
+ }
+
+ if (len > MAX_RW_COUNT - total_len) {
+ len = MAX_RW_COUNT - total_len;
+ iov[seg].iov_len = len;
+ }
+ total_len += len;
+ }
+
+ iov_iter_init(i, type, iov, nr_segs, total_len);
+ if (iov == *iovp)
+ *iovp = NULL;
+ else
+ *iovp = iov;
+ return total_len;
+}
+
/**
* import_iovec() - Copy an array of &struct iovec from userspace
* into the kernel, check that it is valid, and initialize a new
* &struct iov_iter iterator to access it.
*
* @type: One of %READ or %WRITE.
- * @uvector: Pointer to the userspace array.
+ * @uvec: Pointer to the userspace array.
* @nr_segs: Number of elements in userspace array.
* @fast_segs: Number of elements in @iov.
- * @iov: (input and output parameter) Pointer to pointer to (usually small
+ * @iovp: (input and output parameter) Pointer to pointer to (usually small
* on-stack) kernel array.
* @i: Pointer to iterator that will be initialized on success.
*
@@ -1634,84 +1335,327 @@ EXPORT_SYMBOL(dup_iter);
* on-stack array was used or not (and regardless of whether this function
* returns an error or not).
*
- * Return: 0 on success or negative error code on error.
+ * Return: Negative error code on error, bytes imported on success
*/
-int import_iovec(int type, const struct iovec __user * uvector,
+ssize_t import_iovec(int type, const struct iovec __user *uvec,
unsigned nr_segs, unsigned fast_segs,
- struct iovec **iov, struct iov_iter *i)
-{
- ssize_t n;
- struct iovec *p;
- n = rw_copy_check_uvector(type, uvector, nr_segs, fast_segs,
- *iov, &p);
- if (n < 0) {
- if (p != *iov)
- kfree(p);
- *iov = NULL;
- return n;
- }
- iov_iter_init(i, type, p, nr_segs, n);
- *iov = p == *iov ? NULL : p;
- return 0;
+ struct iovec **iovp, struct iov_iter *i)
+{
+ return __import_iovec(type, uvec, nr_segs, fast_segs, iovp, i,
+ in_compat_syscall());
}
EXPORT_SYMBOL(import_iovec);
-#ifdef CONFIG_COMPAT
-#include <linux/compat.h>
-
-int compat_import_iovec(int type, const struct compat_iovec __user * uvector,
- unsigned nr_segs, unsigned fast_segs,
- struct iovec **iov, struct iov_iter *i)
-{
- ssize_t n;
- struct iovec *p;
- n = compat_rw_copy_check_uvector(type, uvector, nr_segs, fast_segs,
- *iov, &p);
- if (n < 0) {
- if (p != *iov)
- kfree(p);
- *iov = NULL;
- return n;
- }
- iov_iter_init(i, type, p, nr_segs, n);
- *iov = p == *iov ? NULL : p;
- return 0;
-}
-#endif
-
-int import_single_range(int rw, void __user *buf, size_t len,
- struct iovec *iov, struct iov_iter *i)
+int import_ubuf(int rw, void __user *buf, size_t len, struct iov_iter *i)
{
if (len > MAX_RW_COUNT)
len = MAX_RW_COUNT;
if (unlikely(!access_ok(buf, len)))
return -EFAULT;
- iov->iov_base = buf;
- iov->iov_len = len;
- iov_iter_init(i, rw, iov, 1, len);
+ iov_iter_ubuf(i, rw, buf, len);
return 0;
}
-EXPORT_SYMBOL(import_single_range);
+EXPORT_SYMBOL_GPL(import_ubuf);
-int iov_iter_for_each_range(struct iov_iter *i, size_t bytes,
- int (*f)(struct kvec *vec, void *context),
- void *context)
+/**
+ * iov_iter_restore() - Restore a &struct iov_iter to the same state as when
+ * iov_iter_save_state() was called.
+ *
+ * @i: &struct iov_iter to restore
+ * @state: state to restore from
+ *
+ * Used after iov_iter_save_state() to bring restore @i, if operations may
+ * have advanced it.
+ *
+ * Note: only works on ITER_IOVEC, ITER_BVEC, and ITER_KVEC
+ */
+void iov_iter_restore(struct iov_iter *i, struct iov_iter_state *state)
{
- struct kvec w;
- int err = -EINVAL;
- if (!bytes)
+ if (WARN_ON_ONCE(!iov_iter_is_bvec(i) && !iter_is_iovec(i) &&
+ !iter_is_ubuf(i)) && !iov_iter_is_kvec(i))
+ return;
+ i->iov_offset = state->iov_offset;
+ i->count = state->count;
+ if (iter_is_ubuf(i))
+ return;
+ /*
+ * For the *vec iters, nr_segs + iov is constant - if we increment
+ * the vec, then we also decrement the nr_segs count. Hence we don't
+ * need to track both of these, just one is enough and we can deduct
+ * the other from that. ITER_KVEC and ITER_IOVEC are the same struct
+ * size, so we can just increment the iov pointer as they are unionzed.
+ * ITER_BVEC _may_ be the same size on some archs, but on others it is
+ * not. Be safe and handle it separately.
+ */
+ BUILD_BUG_ON(sizeof(struct iovec) != sizeof(struct kvec));
+ if (iov_iter_is_bvec(i))
+ i->bvec -= state->nr_segs - i->nr_segs;
+ else
+ i->__iov -= state->nr_segs - i->nr_segs;
+ i->nr_segs = state->nr_segs;
+}
+
+/*
+ * Extract a list of contiguous pages from an ITER_XARRAY iterator. This does not
+ * get references on the pages, nor does it get a pin on them.
+ */
+static ssize_t iov_iter_extract_xarray_pages(struct iov_iter *i,
+ struct page ***pages, size_t maxsize,
+ unsigned int maxpages,
+ iov_iter_extraction_t extraction_flags,
+ size_t *offset0)
+{
+ struct page *page, **p;
+ unsigned int nr = 0, offset;
+ loff_t pos = i->xarray_start + i->iov_offset;
+ pgoff_t index = pos >> PAGE_SHIFT;
+ XA_STATE(xas, i->xarray, index);
+
+ offset = pos & ~PAGE_MASK;
+ *offset0 = offset;
+
+ maxpages = want_pages_array(pages, maxsize, offset, maxpages);
+ if (!maxpages)
+ return -ENOMEM;
+ p = *pages;
+
+ rcu_read_lock();
+ for (page = xas_load(&xas); page; page = xas_next(&xas)) {
+ if (xas_retry(&xas, page))
+ continue;
+
+ /* Has the page moved or been split? */
+ if (unlikely(page != xas_reload(&xas))) {
+ xas_reset(&xas);
+ continue;
+ }
+
+ p[nr++] = find_subpage(page, xas.xa_index);
+ if (nr == maxpages)
+ break;
+ }
+ rcu_read_unlock();
+
+ maxsize = min_t(size_t, nr * PAGE_SIZE - offset, maxsize);
+ iov_iter_advance(i, maxsize);
+ return maxsize;
+}
+
+/*
+ * Extract a list of contiguous pages from an ITER_BVEC iterator. This does
+ * not get references on the pages, nor does it get a pin on them.
+ */
+static ssize_t iov_iter_extract_bvec_pages(struct iov_iter *i,
+ struct page ***pages, size_t maxsize,
+ unsigned int maxpages,
+ iov_iter_extraction_t extraction_flags,
+ size_t *offset0)
+{
+ struct page **p, *page;
+ size_t skip = i->iov_offset, offset, size;
+ int k;
+
+ for (;;) {
+ if (i->nr_segs == 0)
+ return 0;
+ size = min(maxsize, i->bvec->bv_len - skip);
+ if (size)
+ break;
+ i->iov_offset = 0;
+ i->nr_segs--;
+ i->bvec++;
+ skip = 0;
+ }
+
+ skip += i->bvec->bv_offset;
+ page = i->bvec->bv_page + skip / PAGE_SIZE;
+ offset = skip % PAGE_SIZE;
+ *offset0 = offset;
+
+ maxpages = want_pages_array(pages, size, offset, maxpages);
+ if (!maxpages)
+ return -ENOMEM;
+ p = *pages;
+ for (k = 0; k < maxpages; k++)
+ p[k] = page + k;
+
+ size = min_t(size_t, size, maxpages * PAGE_SIZE - offset);
+ iov_iter_advance(i, size);
+ return size;
+}
+
+/*
+ * Extract a list of virtually contiguous pages from an ITER_KVEC iterator.
+ * This does not get references on the pages, nor does it get a pin on them.
+ */
+static ssize_t iov_iter_extract_kvec_pages(struct iov_iter *i,
+ struct page ***pages, size_t maxsize,
+ unsigned int maxpages,
+ iov_iter_extraction_t extraction_flags,
+ size_t *offset0)
+{
+ struct page **p, *page;
+ const void *kaddr;
+ size_t skip = i->iov_offset, offset, len, size;
+ int k;
+
+ for (;;) {
+ if (i->nr_segs == 0)
+ return 0;
+ size = min(maxsize, i->kvec->iov_len - skip);
+ if (size)
+ break;
+ i->iov_offset = 0;
+ i->nr_segs--;
+ i->kvec++;
+ skip = 0;
+ }
+
+ kaddr = i->kvec->iov_base + skip;
+ offset = (unsigned long)kaddr & ~PAGE_MASK;
+ *offset0 = offset;
+
+ maxpages = want_pages_array(pages, size, offset, maxpages);
+ if (!maxpages)
+ return -ENOMEM;
+ p = *pages;
+
+ kaddr -= offset;
+ len = offset + size;
+ for (k = 0; k < maxpages; k++) {
+ size_t seg = min_t(size_t, len, PAGE_SIZE);
+
+ if (is_vmalloc_or_module_addr(kaddr))
+ page = vmalloc_to_page(kaddr);
+ else
+ page = virt_to_page(kaddr);
+
+ p[k] = page;
+ len -= seg;
+ kaddr += PAGE_SIZE;
+ }
+
+ size = min_t(size_t, size, maxpages * PAGE_SIZE - offset);
+ iov_iter_advance(i, size);
+ return size;
+}
+
+/*
+ * Extract a list of contiguous pages from a user iterator and get a pin on
+ * each of them. This should only be used if the iterator is user-backed
+ * (IOBUF/UBUF).
+ *
+ * It does not get refs on the pages, but the pages must be unpinned by the
+ * caller once the transfer is complete.
+ *
+ * This is safe to be used where background IO/DMA *is* going to be modifying
+ * the buffer; using a pin rather than a ref makes forces fork() to give the
+ * child a copy of the page.
+ */
+static ssize_t iov_iter_extract_user_pages(struct iov_iter *i,
+ struct page ***pages,
+ size_t maxsize,
+ unsigned int maxpages,
+ iov_iter_extraction_t extraction_flags,
+ size_t *offset0)
+{
+ unsigned long addr;
+ unsigned int gup_flags = 0;
+ size_t offset;
+ int res;
+
+ if (i->data_source == ITER_DEST)
+ gup_flags |= FOLL_WRITE;
+ if (extraction_flags & ITER_ALLOW_P2PDMA)
+ gup_flags |= FOLL_PCI_P2PDMA;
+ if (i->nofault)
+ gup_flags |= FOLL_NOFAULT;
+
+ addr = first_iovec_segment(i, &maxsize);
+ *offset0 = offset = addr % PAGE_SIZE;
+ addr &= PAGE_MASK;
+ maxpages = want_pages_array(pages, maxsize, offset, maxpages);
+ if (!maxpages)
+ return -ENOMEM;
+ res = pin_user_pages_fast(addr, maxpages, gup_flags, *pages);
+ if (unlikely(res <= 0))
+ return res;
+ maxsize = min_t(size_t, maxsize, res * PAGE_SIZE - offset);
+ iov_iter_advance(i, maxsize);
+ return maxsize;
+}
+
+/**
+ * iov_iter_extract_pages - Extract a list of contiguous pages from an iterator
+ * @i: The iterator to extract from
+ * @pages: Where to return the list of pages
+ * @maxsize: The maximum amount of iterator to extract
+ * @maxpages: The maximum size of the list of pages
+ * @extraction_flags: Flags to qualify request
+ * @offset0: Where to return the starting offset into (*@pages)[0]
+ *
+ * Extract a list of contiguous pages from the current point of the iterator,
+ * advancing the iterator. The maximum number of pages and the maximum amount
+ * of page contents can be set.
+ *
+ * If *@pages is NULL, a page list will be allocated to the required size and
+ * *@pages will be set to its base. If *@pages is not NULL, it will be assumed
+ * that the caller allocated a page list at least @maxpages in size and this
+ * will be filled in.
+ *
+ * @extraction_flags can have ITER_ALLOW_P2PDMA set to request peer-to-peer DMA
+ * be allowed on the pages extracted.
+ *
+ * The iov_iter_extract_will_pin() function can be used to query how cleanup
+ * should be performed.
+ *
+ * Extra refs or pins on the pages may be obtained as follows:
+ *
+ * (*) If the iterator is user-backed (ITER_IOVEC/ITER_UBUF), pins will be
+ * added to the pages, but refs will not be taken.
+ * iov_iter_extract_will_pin() will return true.
+ *
+ * (*) If the iterator is ITER_KVEC, ITER_BVEC or ITER_XARRAY, the pages are
+ * merely listed; no extra refs or pins are obtained.
+ * iov_iter_extract_will_pin() will return 0.
+ *
+ * Note also:
+ *
+ * (*) Use with ITER_DISCARD is not supported as that has no content.
+ *
+ * On success, the function sets *@pages to the new pagelist, if allocated, and
+ * sets *offset0 to the offset into the first page.
+ *
+ * It may also return -ENOMEM and -EFAULT.
+ */
+ssize_t iov_iter_extract_pages(struct iov_iter *i,
+ struct page ***pages,
+ size_t maxsize,
+ unsigned int maxpages,
+ iov_iter_extraction_t extraction_flags,
+ size_t *offset0)
+{
+ maxsize = min_t(size_t, min_t(size_t, maxsize, i->count), MAX_RW_COUNT);
+ if (!maxsize)
return 0;
- iterate_all_kinds(i, bytes, v, -EINVAL, ({
- w.iov_base = kmap(v.bv_page) + v.bv_offset;
- w.iov_len = v.bv_len;
- err = f(&w, context);
- kunmap(v.bv_page);
- err;}), ({
- w = v;
- err = f(&w, context);})
- )
- return err;
-}
-EXPORT_SYMBOL(iov_iter_for_each_range);
+ if (likely(user_backed_iter(i)))
+ return iov_iter_extract_user_pages(i, pages, maxsize,
+ maxpages, extraction_flags,
+ offset0);
+ if (iov_iter_is_kvec(i))
+ return iov_iter_extract_kvec_pages(i, pages, maxsize,
+ maxpages, extraction_flags,
+ offset0);
+ if (iov_iter_is_bvec(i))
+ return iov_iter_extract_bvec_pages(i, pages, maxsize,
+ maxpages, extraction_flags,
+ offset0);
+ if (iov_iter_is_xarray(i))
+ return iov_iter_extract_xarray_pages(i, pages, maxsize,
+ maxpages, extraction_flags,
+ offset0);
+ return -EFAULT;
+}
+EXPORT_SYMBOL_GPL(iov_iter_extract_pages);
diff --git a/lib/irq_poll.c b/lib/irq_poll.c
index 2f17b488d58e..2d5329a42105 100644
--- a/lib/irq_poll.c
+++ b/lib/irq_poll.c
@@ -188,14 +188,18 @@ EXPORT_SYMBOL(irq_poll_init);
static int irq_poll_cpu_dead(unsigned int cpu)
{
/*
- * If a CPU goes away, splice its entries to the current CPU
- * and trigger a run of the softirq
+ * If a CPU goes away, splice its entries to the current CPU and
+ * set the POLL softirq bit. The local_bh_disable()/enable() pair
+ * ensures that it is handled. Otherwise the current CPU could
+ * reach idle with the POLL softirq pending.
*/
+ local_bh_disable();
local_irq_disable();
list_splice_init(&per_cpu(blk_cpu_iopoll, cpu),
this_cpu_ptr(&blk_cpu_iopoll));
__raise_softirq_irqoff(IRQ_POLL_SOFTIRQ);
local_irq_enable();
+ local_bh_enable();
return 0;
}
diff --git a/lib/is_signed_type_kunit.c b/lib/is_signed_type_kunit.c
new file mode 100644
index 000000000000..0a7f6ae62839
--- /dev/null
+++ b/lib/is_signed_type_kunit.c
@@ -0,0 +1,49 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/*
+ * ./tools/testing/kunit/kunit.py run is_signed_type [--raw_output]
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <kunit/test.h>
+#include <linux/compiler.h>
+
+enum unsigned_enum {
+ constant_a = 3,
+};
+
+enum signed_enum {
+ constant_b = -1,
+ constant_c = 2,
+};
+
+static void is_signed_type_test(struct kunit *test)
+{
+ KUNIT_EXPECT_EQ(test, is_signed_type(bool), false);
+ KUNIT_EXPECT_EQ(test, is_signed_type(signed char), true);
+ KUNIT_EXPECT_EQ(test, is_signed_type(unsigned char), false);
+ KUNIT_EXPECT_EQ(test, is_signed_type(char), false);
+ KUNIT_EXPECT_EQ(test, is_signed_type(int), true);
+ KUNIT_EXPECT_EQ(test, is_signed_type(unsigned int), false);
+ KUNIT_EXPECT_EQ(test, is_signed_type(long), true);
+ KUNIT_EXPECT_EQ(test, is_signed_type(unsigned long), false);
+ KUNIT_EXPECT_EQ(test, is_signed_type(long long), true);
+ KUNIT_EXPECT_EQ(test, is_signed_type(unsigned long long), false);
+ KUNIT_EXPECT_EQ(test, is_signed_type(enum unsigned_enum), false);
+ KUNIT_EXPECT_EQ(test, is_signed_type(enum signed_enum), true);
+ KUNIT_EXPECT_EQ(test, is_signed_type(void *), false);
+ KUNIT_EXPECT_EQ(test, is_signed_type(const char *), false);
+}
+
+static struct kunit_case is_signed_type_test_cases[] = {
+ KUNIT_CASE(is_signed_type_test),
+ {}
+};
+
+static struct kunit_suite is_signed_type_test_suite = {
+ .name = "is_signed_type",
+ .test_cases = is_signed_type_test_cases,
+};
+
+kunit_test_suite(is_signed_type_test_suite);
+
+MODULE_LICENSE("Dual MIT/GPL");
diff --git a/lib/jedec_ddr_data.c b/lib/jedec_ddr_data.c
deleted file mode 100644
index d0b312e28d36..000000000000
--- a/lib/jedec_ddr_data.c
+++ /dev/null
@@ -1,132 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * DDR addressing details and AC timing parameters from JEDEC specs
- *
- * Copyright (C) 2012 Texas Instruments, Inc.
- *
- * Aneesh V <aneesh@ti.com>
- */
-
-#include <memory/jedec_ddr.h>
-#include <linux/module.h>
-
-/* LPDDR2 addressing details from JESD209-2 section 2.4 */
-const struct lpddr2_addressing
- lpddr2_jedec_addressing_table[NUM_DDR_ADDR_TABLE_ENTRIES] = {
- {B4, T_REFI_15_6, T_RFC_90}, /* 64M */
- {B4, T_REFI_15_6, T_RFC_90}, /* 128M */
- {B4, T_REFI_7_8, T_RFC_90}, /* 256M */
- {B4, T_REFI_7_8, T_RFC_90}, /* 512M */
- {B8, T_REFI_7_8, T_RFC_130}, /* 1GS4 */
- {B8, T_REFI_3_9, T_RFC_130}, /* 2GS4 */
- {B8, T_REFI_3_9, T_RFC_130}, /* 4G */
- {B8, T_REFI_3_9, T_RFC_210}, /* 8G */
- {B4, T_REFI_7_8, T_RFC_130}, /* 1GS2 */
- {B4, T_REFI_3_9, T_RFC_130}, /* 2GS2 */
-};
-EXPORT_SYMBOL_GPL(lpddr2_jedec_addressing_table);
-
-/* LPDDR2 AC timing parameters from JESD209-2 section 12 */
-const struct lpddr2_timings
- lpddr2_jedec_timings[NUM_DDR_TIMING_TABLE_ENTRIES] = {
- /* Speed bin 400(200 MHz) */
- [0] = {
- .max_freq = 200000000,
- .min_freq = 10000000,
- .tRPab = 21000,
- .tRCD = 18000,
- .tWR = 15000,
- .tRAS_min = 42000,
- .tRRD = 10000,
- .tWTR = 10000,
- .tXP = 7500,
- .tRTP = 7500,
- .tCKESR = 15000,
- .tDQSCK_max = 5500,
- .tFAW = 50000,
- .tZQCS = 90000,
- .tZQCL = 360000,
- .tZQinit = 1000000,
- .tRAS_max_ns = 70000,
- .tDQSCK_max_derated = 6000,
- },
- /* Speed bin 533(266 MHz) */
- [1] = {
- .max_freq = 266666666,
- .min_freq = 10000000,
- .tRPab = 21000,
- .tRCD = 18000,
- .tWR = 15000,
- .tRAS_min = 42000,
- .tRRD = 10000,
- .tWTR = 7500,
- .tXP = 7500,
- .tRTP = 7500,
- .tCKESR = 15000,
- .tDQSCK_max = 5500,
- .tFAW = 50000,
- .tZQCS = 90000,
- .tZQCL = 360000,
- .tZQinit = 1000000,
- .tRAS_max_ns = 70000,
- .tDQSCK_max_derated = 6000,
- },
- /* Speed bin 800(400 MHz) */
- [2] = {
- .max_freq = 400000000,
- .min_freq = 10000000,
- .tRPab = 21000,
- .tRCD = 18000,
- .tWR = 15000,
- .tRAS_min = 42000,
- .tRRD = 10000,
- .tWTR = 7500,
- .tXP = 7500,
- .tRTP = 7500,
- .tCKESR = 15000,
- .tDQSCK_max = 5500,
- .tFAW = 50000,
- .tZQCS = 90000,
- .tZQCL = 360000,
- .tZQinit = 1000000,
- .tRAS_max_ns = 70000,
- .tDQSCK_max_derated = 6000,
- },
- /* Speed bin 1066(533 MHz) */
- [3] = {
- .max_freq = 533333333,
- .min_freq = 10000000,
- .tRPab = 21000,
- .tRCD = 18000,
- .tWR = 15000,
- .tRAS_min = 42000,
- .tRRD = 10000,
- .tWTR = 7500,
- .tXP = 7500,
- .tRTP = 7500,
- .tCKESR = 15000,
- .tDQSCK_max = 5500,
- .tFAW = 50000,
- .tZQCS = 90000,
- .tZQCL = 360000,
- .tZQinit = 1000000,
- .tRAS_max_ns = 70000,
- .tDQSCK_max_derated = 5620,
- },
-};
-EXPORT_SYMBOL_GPL(lpddr2_jedec_timings);
-
-const struct lpddr2_min_tck lpddr2_jedec_min_tck = {
- .tRPab = 3,
- .tRCD = 3,
- .tWR = 3,
- .tRASmin = 3,
- .tRRD = 2,
- .tWTR = 2,
- .tXP = 2,
- .tRTP = 2,
- .tCKE = 3,
- .tCKESR = 3,
- .tFAW = 8
-};
-EXPORT_SYMBOL_GPL(lpddr2_jedec_min_tck);
diff --git a/lib/kasprintf.c b/lib/kasprintf.c
index bacf7b83ccf0..cd2f5974ed98 100644
--- a/lib/kasprintf.c
+++ b/lib/kasprintf.c
@@ -5,7 +5,7 @@
* Copyright (C) 1991, 1992 Linus Torvalds
*/
-#include <stdarg.h>
+#include <linux/stdarg.h>
#include <linux/export.h>
#include <linux/slab.h>
#include <linux/types.h>
diff --git a/lib/kfifo.c b/lib/kfifo.c
index 117ad0e7fbf4..12f5a347aa13 100644
--- a/lib/kfifo.c
+++ b/lib/kfifo.c
@@ -68,7 +68,8 @@ int __kfifo_init(struct __kfifo *fifo, void *buffer,
{
size /= esize;
- size = roundup_pow_of_two(size);
+ if (!is_power_of_2(size))
+ size = rounddown_pow_of_two(size);
fifo->in = 0;
fifo->out = 0;
@@ -414,7 +415,7 @@ static unsigned int __kfifo_peek_n(struct __kfifo *fifo, size_t recsize)
)
/*
- * __kfifo_poke_n internal helper function for storeing the length of
+ * __kfifo_poke_n internal helper function for storing the length of
* the record into the fifo
*/
static void __kfifo_poke_n(struct __kfifo *fifo, unsigned int n, size_t recsize)
diff --git a/lib/kobject.c b/lib/kobject.c
index f2ccdbac8ed9..72fa20f405f1 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -6,10 +6,12 @@
* Copyright (c) 2006-2007 Greg Kroah-Hartman <greg@kroah.com>
* Copyright (c) 2006-2007 Novell Inc.
*
- * Please see the file Documentation/kobject.txt for critical information
+ * Please see the file Documentation/core-api/kobject.rst for critical information
* about using the kobject interface.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kobject.h>
#include <linux/string.h>
#include <linux/export.h>
@@ -25,7 +27,7 @@
* and thus @kobj should have a namespace tag associated with it. Returns
* %NULL otherwise.
*/
-const void *kobject_namespace(struct kobject *kobj)
+const void *kobject_namespace(const struct kobject *kobj)
{
const struct kobj_ns_type_operations *ns_ops = kobj_ns_ops(kobj);
@@ -45,7 +47,7 @@ const void *kobject_namespace(struct kobject *kobj)
* representation of given kobject. Normally used to adjust ownership of
* objects in a container.
*/
-void kobject_get_ownership(struct kobject *kobj, kuid_t *uid, kgid_t *gid)
+void kobject_get_ownership(const struct kobject *kobj, kuid_t *uid, kgid_t *gid)
{
*uid = GLOBAL_ROOT_UID;
*gid = GLOBAL_ROOT_GID;
@@ -54,30 +56,12 @@ void kobject_get_ownership(struct kobject *kobj, kuid_t *uid, kgid_t *gid)
kobj->ktype->get_ownership(kobj, uid, gid);
}
-/*
- * populate_dir - populate directory with attributes.
- * @kobj: object we're working on.
- *
- * Most subsystems have a set of default attributes that are associated
- * with an object that registers with them. This is a helper called during
- * object registration that loops through the default attributes of the
- * subsystem and creates attributes files for them in sysfs.
- */
-static int populate_dir(struct kobject *kobj)
+static bool kobj_ns_type_is_valid(enum kobj_ns_type type)
{
- struct kobj_type *t = get_ktype(kobj);
- struct attribute *attr;
- int error = 0;
- int i;
+ if ((type <= KOBJ_NS_TYPE_NONE) || (type >= KOBJ_NS_TYPES))
+ return false;
- if (t && t->default_attrs) {
- for (i = 0; (attr = t->default_attrs[i]) != NULL; i++) {
- error = sysfs_create_file(kobj, attr);
- if (error)
- break;
- }
- }
- return error;
+ return true;
}
static int create_dir(struct kobject *kobj)
@@ -90,12 +74,6 @@ static int create_dir(struct kobject *kobj)
if (error)
return error;
- error = populate_dir(kobj);
- if (error) {
- sysfs_remove_dir(kobj);
- return error;
- }
-
if (ktype) {
error = sysfs_create_groups(kobj, ktype->default_groups);
if (error) {
@@ -116,8 +94,7 @@ static int create_dir(struct kobject *kobj)
*/
ops = kobj_child_ns_ops(kobj);
if (ops) {
- BUG_ON(ops->type <= KOBJ_NS_TYPE_NONE);
- BUG_ON(ops->type >= KOBJ_NS_TYPES);
+ BUG_ON(!kobj_ns_type_is_valid(ops->type));
BUG_ON(!kobj_ns_type_registered(ops->type));
sysfs_enable_ns(kobj->sd);
@@ -126,10 +103,10 @@ static int create_dir(struct kobject *kobj)
return 0;
}
-static int get_kobj_path_length(struct kobject *kobj)
+static int get_kobj_path_length(const struct kobject *kobj)
{
int length = 1;
- struct kobject *parent = kobj;
+ const struct kobject *parent = kobj;
/* walk up the ancestors until we hit the one pointing to the
* root.
@@ -144,21 +121,25 @@ static int get_kobj_path_length(struct kobject *kobj)
return length;
}
-static void fill_kobj_path(struct kobject *kobj, char *path, int length)
+static int fill_kobj_path(const struct kobject *kobj, char *path, int length)
{
- struct kobject *parent;
+ const struct kobject *parent;
--length;
for (parent = kobj; parent; parent = parent->parent) {
int cur = strlen(kobject_name(parent));
/* back up enough to print this name with '/' */
length -= cur;
+ if (length <= 0)
+ return -EINVAL;
memcpy(path + length, kobject_name(parent), cur);
*(path + --length) = '/';
}
- pr_debug("kobject: '%s' (%p): %s: path = '%s'\n", kobject_name(kobj),
+ pr_debug("'%s' (%p): %s: path = '%s'\n", kobject_name(kobj),
kobj, __func__, path);
+
+ return 0;
}
/**
@@ -168,18 +149,22 @@ static void fill_kobj_path(struct kobject *kobj, char *path, int length)
*
* Return: The newly allocated memory, caller must free with kfree().
*/
-char *kobject_get_path(struct kobject *kobj, gfp_t gfp_mask)
+char *kobject_get_path(const struct kobject *kobj, gfp_t gfp_mask)
{
char *path;
int len;
+retry:
len = get_kobj_path_length(kobj);
if (len == 0)
return NULL;
path = kzalloc(len, gfp_mask);
if (!path)
return NULL;
- fill_kobj_path(kobj, path, len);
+ if (fill_kobj_path(kobj, path, len)) {
+ kfree(path);
+ goto retry;
+ }
return path;
}
@@ -247,7 +232,7 @@ static int kobject_add_internal(struct kobject *kobj)
kobj->parent = parent;
}
- pr_debug("kobject: '%s' (%p): %s: parent: '%s', set: '%s'\n",
+ pr_debug("'%s' (%p): %s: parent: '%s', set: '%s'\n",
kobject_name(kobj), kobj, __func__,
parent ? kobject_name(parent) : "<NULL>",
kobj->kset ? kobject_name(&kobj->kset->kobj) : "<NULL>");
@@ -303,8 +288,7 @@ int kobject_set_name_vargs(struct kobject *kobj, const char *fmt,
kfree_const(s);
if (!t)
return -ENOMEM;
- strreplace(t, '/', '!');
- s = t;
+ s = strreplace(t, '/', '!');
}
kfree_const(kobj->name);
kobj->name = s;
@@ -346,7 +330,7 @@ EXPORT_SYMBOL(kobject_set_name);
* to kobject_put(), not by a call to kfree directly to ensure that all of
* the memory is cleaned up properly.
*/
-void kobject_init(struct kobject *kobj, struct kobj_type *ktype)
+void kobject_init(struct kobject *kobj, const struct kobj_type *ktype)
{
char *err_str;
@@ -362,7 +346,7 @@ void kobject_init(struct kobject *kobj, struct kobj_type *ktype)
/* do not error out as sometimes we can recover */
pr_err("kobject (%p): tried to init an initialized object, something is seriously wrong.\n",
kobj);
- dump_stack();
+ dump_stack_lvl(KERN_ERR);
}
kobject_init_internal(kobj);
@@ -371,7 +355,7 @@ void kobject_init(struct kobject *kobj, struct kobj_type *ktype)
error:
pr_err("kobject (%p): %s\n", kobj, err_str);
- dump_stack();
+ dump_stack_lvl(KERN_ERR);
}
EXPORT_SYMBOL(kobject_init);
@@ -383,7 +367,7 @@ static __printf(3, 0) int kobject_add_varg(struct kobject *kobj,
retval = kobject_set_name_vargs(kobj, fmt, vargs);
if (retval) {
- pr_err("kobject: can not set name properly!\n");
+ pr_err("can not set name properly!\n");
return retval;
}
kobj->parent = parent;
@@ -435,7 +419,7 @@ int kobject_add(struct kobject *kobj, struct kobject *parent,
if (!kobj->state_initialized) {
pr_err("kobject '%s' (%p): tried to add an uninitialized object, something is seriously wrong.\n",
kobject_name(kobj), kobj);
- dump_stack();
+ dump_stack_lvl(KERN_ERR);
return -EINVAL;
}
va_start(args, fmt);
@@ -461,7 +445,7 @@ EXPORT_SYMBOL(kobject_add);
* same type of error handling after a call to kobject_add() and kobject
* lifetime rules are the same here.
*/
-int kobject_init_and_add(struct kobject *kobj, struct kobj_type *ktype,
+int kobject_init_and_add(struct kobject *kobj, const struct kobj_type *ktype,
struct kobject *parent, const char *fmt, ...)
{
va_list args;
@@ -498,8 +482,10 @@ int kobject_rename(struct kobject *kobj, const char *new_name)
kobj = kobject_get(kobj);
if (!kobj)
return -EINVAL;
- if (!kobj->parent)
+ if (!kobj->parent) {
+ kobject_put(kobj);
return -EINVAL;
+ }
devpath = kobject_get_path(kobj, GFP_KERNEL);
if (!devpath) {
@@ -597,35 +583,50 @@ out:
}
EXPORT_SYMBOL_GPL(kobject_move);
-/**
- * kobject_del() - Unlink kobject from hierarchy.
- * @kobj: object.
- *
- * This is the function that should be called to delete an object
- * successfully added via kobject_add().
- */
-void kobject_del(struct kobject *kobj)
+static void __kobject_del(struct kobject *kobj)
{
struct kernfs_node *sd;
const struct kobj_type *ktype;
- if (!kobj)
- return;
-
sd = kobj->sd;
ktype = get_ktype(kobj);
if (ktype)
sysfs_remove_groups(kobj, ktype->default_groups);
+ /* send "remove" if the caller did not do it but sent "add" */
+ if (kobj->state_add_uevent_sent && !kobj->state_remove_uevent_sent) {
+ pr_debug("'%s' (%p): auto cleanup 'remove' event\n",
+ kobject_name(kobj), kobj);
+ kobject_uevent(kobj, KOBJ_REMOVE);
+ }
+
sysfs_remove_dir(kobj);
sysfs_put(sd);
kobj->state_in_sysfs = 0;
kobj_kset_leave(kobj);
- kobject_put(kobj->parent);
kobj->parent = NULL;
}
+
+/**
+ * kobject_del() - Unlink kobject from hierarchy.
+ * @kobj: object.
+ *
+ * This is the function that should be called to delete an object
+ * successfully added via kobject_add().
+ */
+void kobject_del(struct kobject *kobj)
+{
+ struct kobject *parent;
+
+ if (!kobj)
+ return;
+
+ parent = kobj->parent;
+ __kobject_del(kobj);
+ kobject_put(parent);
+}
EXPORT_SYMBOL(kobject_del);
/**
@@ -661,41 +662,40 @@ EXPORT_SYMBOL(kobject_get_unless_zero);
*/
static void kobject_cleanup(struct kobject *kobj)
{
- struct kobj_type *t = get_ktype(kobj);
+ struct kobject *parent = kobj->parent;
+ const struct kobj_type *t = get_ktype(kobj);
const char *name = kobj->name;
- pr_debug("kobject: '%s' (%p): %s, parent %p\n",
+ pr_debug("'%s' (%p): %s, parent %p\n",
kobject_name(kobj), kobj, __func__, kobj->parent);
if (t && !t->release)
- pr_debug("kobject: '%s' (%p): does not have a release() function, it is broken and must be fixed. See Documentation/kobject.txt.\n",
- kobject_name(kobj), kobj);
-
- /* send "remove" if the caller did not do it but sent "add" */
- if (kobj->state_add_uevent_sent && !kobj->state_remove_uevent_sent) {
- pr_debug("kobject: '%s' (%p): auto cleanup 'remove' event\n",
+ pr_debug("'%s' (%p): does not have a release() function, it is broken and must be fixed. See Documentation/core-api/kobject.rst.\n",
kobject_name(kobj), kobj);
- kobject_uevent(kobj, KOBJ_REMOVE);
- }
/* remove from sysfs if the caller did not do it */
if (kobj->state_in_sysfs) {
- pr_debug("kobject: '%s' (%p): auto cleanup kobject_del\n",
+ pr_debug("'%s' (%p): auto cleanup kobject_del\n",
kobject_name(kobj), kobj);
- kobject_del(kobj);
+ __kobject_del(kobj);
+ } else {
+ /* avoid dropping the parent reference unnecessarily */
+ parent = NULL;
}
if (t && t->release) {
- pr_debug("kobject: '%s' (%p): calling ktype release\n",
+ pr_debug("'%s' (%p): calling ktype release\n",
kobject_name(kobj), kobj);
t->release(kobj);
}
/* free name if we allocated it */
if (name) {
- pr_debug("kobject: '%s': free name\n", name);
+ pr_debug("'%s': free name\n", name);
kfree_const(name);
}
+
+ kobject_put(parent);
}
#ifdef CONFIG_DEBUG_KOBJECT_RELEASE
@@ -710,9 +710,9 @@ static void kobject_release(struct kref *kref)
{
struct kobject *kobj = container_of(kref, struct kobject, kref);
#ifdef CONFIG_DEBUG_KOBJECT_RELEASE
- unsigned long delay = HZ + HZ * (get_random_int() & 0x3);
- pr_info("kobject: '%s' (%p): %s, parent %p (delayed %ld)\n",
- kobject_name(kobj), kobj, __func__, kobj->parent, delay);
+ unsigned long delay = HZ + HZ * get_random_u32_below(4);
+ pr_info("'%s' (%p): %s, parent %p (delayed %ld)\n",
+ kobject_name(kobj), kobj, __func__, kobj->parent, delay);
INIT_DELAYED_WORK(&kobj->release, kobject_delayed_cleanup);
schedule_delayed_work(&kobj->release, delay);
@@ -741,11 +741,11 @@ EXPORT_SYMBOL(kobject_put);
static void dynamic_kobj_release(struct kobject *kobj)
{
- pr_debug("kobject: (%p): %s\n", kobj, __func__);
+ pr_debug("(%p): %s\n", kobj, __func__);
kfree(kobj);
}
-static struct kobj_type dynamic_kobj_ktype = {
+static const struct kobj_type dynamic_kobj_ktype = {
.release = dynamic_kobj_release,
.sysfs_ops = &kobj_sysfs_ops,
};
@@ -761,7 +761,7 @@ static struct kobj_type dynamic_kobj_ktype = {
* call to kobject_put() and not kfree(), as kobject_init() has
* already been called on this structure.
*/
-struct kobject *kobject_create(void)
+static struct kobject *kobject_create(void)
{
struct kobject *kobj;
@@ -850,6 +850,9 @@ EXPORT_SYMBOL_GPL(kobj_sysfs_ops);
/**
* kset_register() - Initialize and add a kset.
* @k: kset.
+ *
+ * NOTE: On error, the kset.kobj.name allocated by() kobj_set_name()
+ * is freed, it can not be used any more.
*/
int kset_register(struct kset *k)
{
@@ -858,10 +861,19 @@ int kset_register(struct kset *k)
if (!k)
return -EINVAL;
+ if (!k->kobj.ktype) {
+ pr_err("must have a ktype to be initialized properly!\n");
+ return -EINVAL;
+ }
+
kset_init(k);
err = kobject_add_internal(&k->kobj);
- if (err)
+ if (err) {
+ kfree_const(k->kobj.name);
+ /* Set it to NULL to avoid accessing bad pointer in callers. */
+ k->kobj.name = NULL;
return err;
+ }
kobject_uevent(&k->kobj, KOBJ_ADD);
return 0;
}
@@ -911,18 +923,18 @@ EXPORT_SYMBOL_GPL(kset_find_obj);
static void kset_release(struct kobject *kobj)
{
struct kset *kset = container_of(kobj, struct kset, kobj);
- pr_debug("kobject: '%s' (%p): %s\n",
+ pr_debug("'%s' (%p): %s\n",
kobject_name(kobj), kobj, __func__);
kfree(kset);
}
-static void kset_get_ownership(struct kobject *kobj, kuid_t *uid, kgid_t *gid)
+static void kset_get_ownership(const struct kobject *kobj, kuid_t *uid, kgid_t *gid)
{
if (kobj->parent)
kobject_get_ownership(kobj->parent, uid, gid);
}
-static struct kobj_type kset_ktype = {
+static const struct kobj_type kset_ktype = {
.sysfs_ops = &kobj_sysfs_ops,
.release = kset_release,
.get_ownership = kset_get_ownership,
@@ -1017,11 +1029,7 @@ int kobj_ns_type_register(const struct kobj_ns_type_operations *ops)
spin_lock(&kobj_ns_type_lock);
error = -EINVAL;
- if (type >= KOBJ_NS_TYPES)
- goto out;
-
- error = -EINVAL;
- if (type <= KOBJ_NS_TYPE_NONE)
+ if (!kobj_ns_type_is_valid(type))
goto out;
error = -EBUSY;
@@ -1041,14 +1049,14 @@ int kobj_ns_type_registered(enum kobj_ns_type type)
int registered = 0;
spin_lock(&kobj_ns_type_lock);
- if ((type > KOBJ_NS_TYPE_NONE) && (type < KOBJ_NS_TYPES))
+ if (kobj_ns_type_is_valid(type))
registered = kobj_ns_ops_tbl[type] != NULL;
spin_unlock(&kobj_ns_type_lock);
return registered;
}
-const struct kobj_ns_type_operations *kobj_child_ns_ops(struct kobject *parent)
+const struct kobj_ns_type_operations *kobj_child_ns_ops(const struct kobject *parent)
{
const struct kobj_ns_type_operations *ops = NULL;
@@ -1058,7 +1066,7 @@ const struct kobj_ns_type_operations *kobj_child_ns_ops(struct kobject *parent)
return ops;
}
-const struct kobj_ns_type_operations *kobj_ns_ops(struct kobject *kobj)
+const struct kobj_ns_type_operations *kobj_ns_ops(const struct kobject *kobj)
{
return kobj_child_ns_ops(kobj->parent);
}
@@ -1068,8 +1076,7 @@ bool kobj_ns_current_may_mount(enum kobj_ns_type type)
bool may_mount = true;
spin_lock(&kobj_ns_type_lock);
- if ((type > KOBJ_NS_TYPE_NONE) && (type < KOBJ_NS_TYPES) &&
- kobj_ns_ops_tbl[type])
+ if (kobj_ns_type_is_valid(type) && kobj_ns_ops_tbl[type])
may_mount = kobj_ns_ops_tbl[type]->current_may_mount();
spin_unlock(&kobj_ns_type_lock);
@@ -1081,8 +1088,7 @@ void *kobj_ns_grab_current(enum kobj_ns_type type)
void *ns = NULL;
spin_lock(&kobj_ns_type_lock);
- if ((type > KOBJ_NS_TYPE_NONE) && (type < KOBJ_NS_TYPES) &&
- kobj_ns_ops_tbl[type])
+ if (kobj_ns_type_is_valid(type) && kobj_ns_ops_tbl[type])
ns = kobj_ns_ops_tbl[type]->grab_current_ns();
spin_unlock(&kobj_ns_type_lock);
@@ -1095,8 +1101,7 @@ const void *kobj_ns_netlink(enum kobj_ns_type type, struct sock *sk)
const void *ns = NULL;
spin_lock(&kobj_ns_type_lock);
- if ((type > KOBJ_NS_TYPE_NONE) && (type < KOBJ_NS_TYPES) &&
- kobj_ns_ops_tbl[type])
+ if (kobj_ns_type_is_valid(type) && kobj_ns_ops_tbl[type])
ns = kobj_ns_ops_tbl[type]->netlink_ns(sk);
spin_unlock(&kobj_ns_type_lock);
@@ -1108,8 +1113,7 @@ const void *kobj_ns_initial(enum kobj_ns_type type)
const void *ns = NULL;
spin_lock(&kobj_ns_type_lock);
- if ((type > KOBJ_NS_TYPE_NONE) && (type < KOBJ_NS_TYPES) &&
- kobj_ns_ops_tbl[type])
+ if (kobj_ns_type_is_valid(type) && kobj_ns_ops_tbl[type])
ns = kobj_ns_ops_tbl[type]->initial_ns();
spin_unlock(&kobj_ns_type_lock);
@@ -1119,7 +1123,7 @@ const void *kobj_ns_initial(enum kobj_ns_type type)
void kobj_ns_drop(enum kobj_ns_type type, void *ns)
{
spin_lock(&kobj_ns_type_lock);
- if ((type > KOBJ_NS_TYPE_NONE) && (type < KOBJ_NS_TYPES) &&
+ if (kobj_ns_type_is_valid(type) &&
kobj_ns_ops_tbl[type] && kobj_ns_ops_tbl[type]->drop_ns)
kobj_ns_ops_tbl[type]->drop_ns(ns);
spin_unlock(&kobj_ns_type_lock);
diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c
index 7998affa45d4..fb9a2f06dd1e 100644
--- a/lib/kobject_uevent.c
+++ b/lib/kobject_uevent.c
@@ -251,12 +251,13 @@ static int kobj_usermode_filter(struct kobject *kobj)
static int init_uevent_argv(struct kobj_uevent_env *env, const char *subsystem)
{
+ int buffer_size = sizeof(env->buf) - env->buflen;
int len;
- len = strlcpy(&env->buf[env->buflen], subsystem,
- sizeof(env->buf) - env->buflen);
- if (len >= (sizeof(env->buf) - env->buflen)) {
- WARN(1, KERN_ERR "init_uevent_argv: buffer size too small\n");
+ len = strscpy(&env->buf[env->buflen], subsystem, buffer_size);
+ if (len < 0) {
+ pr_warn("%s: insufficient buffer space (%u left) for %s\n",
+ __func__, buffer_size, subsystem);
return -ENOMEM;
}
@@ -500,7 +501,7 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
}
/* skip the event, if the filter returns zero. */
if (uevent_ops && uevent_ops->filter)
- if (!uevent_ops->filter(kset, kobj)) {
+ if (!uevent_ops->filter(kobj)) {
pr_debug("kobject: '%s' (%p): %s: filter function "
"caused the event to drop!\n",
kobject_name(kobj), kobj, __func__);
@@ -509,7 +510,7 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
/* originating subsystem */
if (uevent_ops && uevent_ops->name)
- subsystem = uevent_ops->name(kset, kobj);
+ subsystem = uevent_ops->name(kobj);
else
subsystem = kobject_name(&kset->kobj);
if (!subsystem) {
@@ -553,7 +554,7 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
/* let the kset specific function add its stuff */
if (uevent_ops && uevent_ops->uevent) {
- retval = uevent_ops->uevent(kset, kobj, env);
+ retval = uevent_ops->uevent(kobj, env);
if (retval) {
pr_debug("kobject: '%s' (%p): %s: uevent() returned "
"%d\n", kobject_name(kobj), kobj,
diff --git a/lib/kstrtox.c b/lib/kstrtox.c
index 1006bf70bf74..d586e6af5e5a 100644
--- a/lib/kstrtox.c
+++ b/lib/kstrtox.c
@@ -14,13 +14,15 @@
*/
#include <linux/ctype.h>
#include <linux/errno.h>
-#include <linux/kernel.h>
-#include <linux/math64.h>
#include <linux/export.h>
+#include <linux/kstrtox.h>
+#include <linux/math64.h>
#include <linux/types.h>
#include <linux/uaccess.h>
+
#include "kstrtox.h"
+noinline
const char *_parse_integer_fixup_radix(const char *s, unsigned int *base)
{
if (*base == 0) {
@@ -39,22 +41,25 @@ const char *_parse_integer_fixup_radix(const char *s, unsigned int *base)
/*
* Convert non-negative integer string representation in explicitly given radix
- * to an integer.
+ * to an integer. A maximum of max_chars characters will be converted.
+ *
* Return number of characters consumed maybe or-ed with overflow bit.
* If overflow occurs, result integer (incorrect) is still returned.
*
* Don't you dare use this function.
*/
-unsigned int _parse_integer(const char *s, unsigned int base, unsigned long long *p)
+noinline
+unsigned int _parse_integer_limit(const char *s, unsigned int base, unsigned long long *p,
+ size_t max_chars)
{
unsigned long long res;
unsigned int rv;
res = 0;
rv = 0;
- while (1) {
+ while (max_chars--) {
unsigned int c = *s;
- unsigned int lc = c | 0x20; /* don't tolower() this line */
+ unsigned int lc = _tolower(c);
unsigned int val;
if ('0' <= c && c <= '9')
@@ -82,6 +87,12 @@ unsigned int _parse_integer(const char *s, unsigned int base, unsigned long long
return rv;
}
+noinline
+unsigned int _parse_integer(const char *s, unsigned int base, unsigned long long *p)
+{
+ return _parse_integer_limit(s, base, p, INT_MAX);
+}
+
static int _kstrtoull(const char *s, unsigned int base, unsigned long long *res)
{
unsigned long long _res;
@@ -115,9 +126,9 @@ static int _kstrtoull(const char *s, unsigned int base, unsigned long long *res)
* @res: Where to write the result of the conversion on success.
*
* Returns 0 on success, -ERANGE on overflow and -EINVAL on parsing error.
- * Used as a replacement for the obsolete simple_strtoull. Return code must
- * be checked.
+ * Preferred over simple_strtoull(). Return code must be checked.
*/
+noinline
int kstrtoull(const char *s, unsigned int base, unsigned long long *res)
{
if (s[0] == '+')
@@ -139,9 +150,9 @@ EXPORT_SYMBOL(kstrtoull);
* @res: Where to write the result of the conversion on success.
*
* Returns 0 on success, -ERANGE on overflow and -EINVAL on parsing error.
- * Used as a replacement for the obsolete simple_strtoull. Return code must
- * be checked.
+ * Preferred over simple_strtoll(). Return code must be checked.
*/
+noinline
int kstrtoll(const char *s, unsigned int base, long long *res)
{
unsigned long long tmp;
@@ -211,9 +222,9 @@ EXPORT_SYMBOL(_kstrtol);
* @res: Where to write the result of the conversion on success.
*
* Returns 0 on success, -ERANGE on overflow and -EINVAL on parsing error.
- * Used as a replacement for the obsolete simple_strtoull. Return code must
- * be checked.
+ * Preferred over simple_strtoul(). Return code must be checked.
*/
+noinline
int kstrtouint(const char *s, unsigned int base, unsigned int *res)
{
unsigned long long tmp;
@@ -242,9 +253,9 @@ EXPORT_SYMBOL(kstrtouint);
* @res: Where to write the result of the conversion on success.
*
* Returns 0 on success, -ERANGE on overflow and -EINVAL on parsing error.
- * Used as a replacement for the obsolete simple_strtoull. Return code must
- * be checked.
+ * Preferred over simple_strtol(). Return code must be checked.
*/
+noinline
int kstrtoint(const char *s, unsigned int base, int *res)
{
long long tmp;
@@ -260,6 +271,7 @@ int kstrtoint(const char *s, unsigned int base, int *res)
}
EXPORT_SYMBOL(kstrtoint);
+noinline
int kstrtou16(const char *s, unsigned int base, u16 *res)
{
unsigned long long tmp;
@@ -275,6 +287,7 @@ int kstrtou16(const char *s, unsigned int base, u16 *res)
}
EXPORT_SYMBOL(kstrtou16);
+noinline
int kstrtos16(const char *s, unsigned int base, s16 *res)
{
long long tmp;
@@ -290,6 +303,7 @@ int kstrtos16(const char *s, unsigned int base, s16 *res)
}
EXPORT_SYMBOL(kstrtos16);
+noinline
int kstrtou8(const char *s, unsigned int base, u8 *res)
{
unsigned long long tmp;
@@ -305,6 +319,7 @@ int kstrtou8(const char *s, unsigned int base, u8 *res)
}
EXPORT_SYMBOL(kstrtou8);
+noinline
int kstrtos8(const char *s, unsigned int base, s8 *res)
{
long long tmp;
@@ -325,10 +340,11 @@ EXPORT_SYMBOL(kstrtos8);
* @s: input string
* @res: result
*
- * This routine returns 0 iff the first character is one of 'Yy1Nn0', or
+ * This routine returns 0 iff the first character is one of 'YyTt1NnFf0', or
* [oO][NnFf] for "on" and "off". Otherwise it will return -EINVAL. Value
* pointed to by res is updated upon finding a match.
*/
+noinline
int kstrtobool(const char *s, bool *res)
{
if (!s)
@@ -337,11 +353,15 @@ int kstrtobool(const char *s, bool *res)
switch (s[0]) {
case 'y':
case 'Y':
+ case 't':
+ case 'T':
case '1':
*res = true;
return 0;
case 'n':
case 'N':
+ case 'f':
+ case 'F':
case '0':
*res = false;
return 0;
@@ -359,6 +379,7 @@ int kstrtobool(const char *s, bool *res)
default:
break;
}
+ break;
default:
break;
}
diff --git a/lib/kstrtox.h b/lib/kstrtox.h
index 3b4637bcd254..158c400ca865 100644
--- a/lib/kstrtox.h
+++ b/lib/kstrtox.h
@@ -4,6 +4,8 @@
#define KSTRTOX_OVERFLOW (1U << 31)
const char *_parse_integer_fixup_radix(const char *s, unsigned int *base);
+unsigned int _parse_integer_limit(const char *s, unsigned int base, unsigned long long *res,
+ size_t max_chars);
unsigned int _parse_integer(const char *s, unsigned int base, unsigned long long *res);
#endif
diff --git a/lib/kunit/.kunitconfig b/lib/kunit/.kunitconfig
new file mode 100644
index 000000000000..9235b7d42d38
--- /dev/null
+++ b/lib/kunit/.kunitconfig
@@ -0,0 +1,3 @@
+CONFIG_KUNIT=y
+CONFIG_KUNIT_TEST=y
+CONFIG_KUNIT_EXAMPLE_TEST=y
diff --git a/lib/kunit/Kconfig b/lib/kunit/Kconfig
new file mode 100644
index 000000000000..68a6daec0aef
--- /dev/null
+++ b/lib/kunit/Kconfig
@@ -0,0 +1,73 @@
+#
+# KUnit base configuration
+#
+
+menuconfig KUNIT
+ tristate "KUnit - Enable support for unit tests"
+ select GLOB
+ help
+ Enables support for kernel unit tests (KUnit), a lightweight unit
+ testing and mocking framework for the Linux kernel. These tests are
+ able to be run locally on a developer's workstation without a VM or
+ special hardware when using UML. Can also be used on most other
+ architectures. For more information, please see
+ Documentation/dev-tools/kunit/.
+
+if KUNIT
+
+config KUNIT_DEBUGFS
+ bool "KUnit - Enable /sys/kernel/debug/kunit debugfs representation" if !KUNIT_ALL_TESTS
+ default KUNIT_ALL_TESTS
+ help
+ Enable debugfs representation for kunit. Currently this consists
+ of /sys/kernel/debug/kunit/<test_suite>/results files for each
+ test suite, which allow users to see results of the last test suite
+ run that occurred.
+
+config KUNIT_TEST
+ tristate "KUnit test for KUnit" if !KUNIT_ALL_TESTS
+ default KUNIT_ALL_TESTS
+ help
+ Enables the unit tests for the KUnit test framework. These tests test
+ the KUnit test framework itself; the tests are both written using
+ KUnit and test KUnit. This option should only be enabled for testing
+ purposes by developers interested in testing that KUnit works as
+ expected.
+
+config KUNIT_EXAMPLE_TEST
+ tristate "Example test for KUnit" if !KUNIT_ALL_TESTS
+ default KUNIT_ALL_TESTS
+ help
+ Enables an example unit test that illustrates some of the basic
+ features of KUnit. This test only exists to help new users understand
+ what KUnit is and how it is used. Please refer to the example test
+ itself, lib/kunit/example-test.c, for more information. This option
+ is intended for curious hackers who would like to understand how to
+ use KUnit for kernel development.
+
+config KUNIT_ALL_TESTS
+ tristate "All KUnit tests with satisfied dependencies"
+ help
+ Enables all KUnit tests, if they can be enabled.
+ KUnit tests run during boot and output the results to the debug log
+ in TAP format (http://testanything.org/). Only useful for kernel devs
+ running the KUnit test harness, and not intended for inclusion into a
+ production build.
+
+ For more information on KUnit and unit tests in general please refer
+ to the KUnit documentation in Documentation/dev-tools/kunit/.
+
+ If unsure, say N.
+
+config KUNIT_DEFAULT_ENABLED
+ bool "Default value of kunit.enable"
+ default y
+ help
+ Sets the default value of kunit.enable. If set to N then KUnit
+ tests will not execute unless kunit.enable=1 is passed to the
+ kernel command line.
+
+ In most cases this should be left as Y. Only if additional opt-in
+ behavior is needed should this be set to N.
+
+endif # KUNIT
diff --git a/lib/kunit/Makefile b/lib/kunit/Makefile
new file mode 100644
index 000000000000..309659a32a78
--- /dev/null
+++ b/lib/kunit/Makefile
@@ -0,0 +1,27 @@
+obj-$(CONFIG_KUNIT) += kunit.o
+
+kunit-objs += test.o \
+ resource.o \
+ static_stub.o \
+ string-stream.o \
+ assert.o \
+ try-catch.o \
+ executor.o \
+ attributes.o \
+ device.o
+
+ifeq ($(CONFIG_KUNIT_DEBUGFS),y)
+kunit-objs += debugfs.o
+endif
+
+# KUnit 'hooks' are built-in even when KUnit is built as a module.
+obj-y += hooks.o
+
+obj-$(CONFIG_KUNIT_TEST) += kunit-test.o
+
+# string-stream-test compiles built-in only.
+ifeq ($(CONFIG_KUNIT_TEST),y)
+obj-$(CONFIG_KUNIT_TEST) += string-stream-test.o
+endif
+
+obj-$(CONFIG_KUNIT_EXAMPLE_TEST) += kunit-example-test.o
diff --git a/lib/kunit/assert.c b/lib/kunit/assert.c
new file mode 100644
index 000000000000..dd1d633d0fe2
--- /dev/null
+++ b/lib/kunit/assert.c
@@ -0,0 +1,272 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Assertion and expectation serialization API.
+ *
+ * Copyright (C) 2019, Google LLC.
+ * Author: Brendan Higgins <brendanhiggins@google.com>
+ */
+#include <kunit/assert.h>
+#include <kunit/test.h>
+
+#include "string-stream.h"
+
+void kunit_assert_prologue(const struct kunit_loc *loc,
+ enum kunit_assert_type type,
+ struct string_stream *stream)
+{
+ const char *expect_or_assert = NULL;
+
+ switch (type) {
+ case KUNIT_EXPECTATION:
+ expect_or_assert = "EXPECTATION";
+ break;
+ case KUNIT_ASSERTION:
+ expect_or_assert = "ASSERTION";
+ break;
+ }
+
+ string_stream_add(stream, "%s FAILED at %s:%d\n",
+ expect_or_assert, loc->file, loc->line);
+}
+EXPORT_SYMBOL_GPL(kunit_assert_prologue);
+
+static void kunit_assert_print_msg(const struct va_format *message,
+ struct string_stream *stream)
+{
+ if (message->fmt)
+ string_stream_add(stream, "\n%pV", message);
+}
+
+void kunit_fail_assert_format(const struct kunit_assert *assert,
+ const struct va_format *message,
+ struct string_stream *stream)
+{
+ string_stream_add(stream, "%pV", message);
+}
+EXPORT_SYMBOL_GPL(kunit_fail_assert_format);
+
+void kunit_unary_assert_format(const struct kunit_assert *assert,
+ const struct va_format *message,
+ struct string_stream *stream)
+{
+ struct kunit_unary_assert *unary_assert;
+
+ unary_assert = container_of(assert, struct kunit_unary_assert, assert);
+
+ if (unary_assert->expected_true)
+ string_stream_add(stream,
+ KUNIT_SUBTEST_INDENT "Expected %s to be true, but is false\n",
+ unary_assert->condition);
+ else
+ string_stream_add(stream,
+ KUNIT_SUBTEST_INDENT "Expected %s to be false, but is true\n",
+ unary_assert->condition);
+ kunit_assert_print_msg(message, stream);
+}
+EXPORT_SYMBOL_GPL(kunit_unary_assert_format);
+
+void kunit_ptr_not_err_assert_format(const struct kunit_assert *assert,
+ const struct va_format *message,
+ struct string_stream *stream)
+{
+ struct kunit_ptr_not_err_assert *ptr_assert;
+
+ ptr_assert = container_of(assert, struct kunit_ptr_not_err_assert,
+ assert);
+
+ if (!ptr_assert->value) {
+ string_stream_add(stream,
+ KUNIT_SUBTEST_INDENT "Expected %s is not null, but is\n",
+ ptr_assert->text);
+ } else if (IS_ERR(ptr_assert->value)) {
+ string_stream_add(stream,
+ KUNIT_SUBTEST_INDENT "Expected %s is not error, but is: %ld\n",
+ ptr_assert->text,
+ PTR_ERR(ptr_assert->value));
+ }
+ kunit_assert_print_msg(message, stream);
+}
+EXPORT_SYMBOL_GPL(kunit_ptr_not_err_assert_format);
+
+/* Checks if `text` is a literal representing `value`, e.g. "5" and 5 */
+static bool is_literal(const char *text, long long value)
+{
+ char *buffer;
+ int len;
+ bool ret;
+
+ len = snprintf(NULL, 0, "%lld", value);
+ if (strlen(text) != len)
+ return false;
+
+ buffer = kmalloc(len+1, GFP_KERNEL);
+ if (!buffer)
+ return false;
+
+ snprintf(buffer, len+1, "%lld", value);
+ ret = strncmp(buffer, text, len) == 0;
+
+ kfree(buffer);
+
+ return ret;
+}
+
+void kunit_binary_assert_format(const struct kunit_assert *assert,
+ const struct va_format *message,
+ struct string_stream *stream)
+{
+ struct kunit_binary_assert *binary_assert;
+
+ binary_assert = container_of(assert, struct kunit_binary_assert,
+ assert);
+
+ string_stream_add(stream,
+ KUNIT_SUBTEST_INDENT "Expected %s %s %s, but\n",
+ binary_assert->text->left_text,
+ binary_assert->text->operation,
+ binary_assert->text->right_text);
+ if (!is_literal(binary_assert->text->left_text, binary_assert->left_value))
+ string_stream_add(stream, KUNIT_SUBSUBTEST_INDENT "%s == %lld (0x%llx)\n",
+ binary_assert->text->left_text,
+ binary_assert->left_value,
+ binary_assert->left_value);
+ if (!is_literal(binary_assert->text->right_text, binary_assert->right_value))
+ string_stream_add(stream, KUNIT_SUBSUBTEST_INDENT "%s == %lld (0x%llx)",
+ binary_assert->text->right_text,
+ binary_assert->right_value,
+ binary_assert->right_value);
+ kunit_assert_print_msg(message, stream);
+}
+EXPORT_SYMBOL_GPL(kunit_binary_assert_format);
+
+void kunit_binary_ptr_assert_format(const struct kunit_assert *assert,
+ const struct va_format *message,
+ struct string_stream *stream)
+{
+ struct kunit_binary_ptr_assert *binary_assert;
+
+ binary_assert = container_of(assert, struct kunit_binary_ptr_assert,
+ assert);
+
+ string_stream_add(stream,
+ KUNIT_SUBTEST_INDENT "Expected %s %s %s, but\n",
+ binary_assert->text->left_text,
+ binary_assert->text->operation,
+ binary_assert->text->right_text);
+ string_stream_add(stream, KUNIT_SUBSUBTEST_INDENT "%s == %px\n",
+ binary_assert->text->left_text,
+ binary_assert->left_value);
+ string_stream_add(stream, KUNIT_SUBSUBTEST_INDENT "%s == %px",
+ binary_assert->text->right_text,
+ binary_assert->right_value);
+ kunit_assert_print_msg(message, stream);
+}
+EXPORT_SYMBOL_GPL(kunit_binary_ptr_assert_format);
+
+/* Checks if KUNIT_EXPECT_STREQ() args were string literals.
+ * Note: `text` will have ""s where as `value` will not.
+ */
+static bool is_str_literal(const char *text, const char *value)
+{
+ int len;
+
+ len = strlen(text);
+ if (len < 2)
+ return false;
+ if (text[0] != '\"' || text[len - 1] != '\"')
+ return false;
+
+ return strncmp(text + 1, value, len - 2) == 0;
+}
+
+void kunit_binary_str_assert_format(const struct kunit_assert *assert,
+ const struct va_format *message,
+ struct string_stream *stream)
+{
+ struct kunit_binary_str_assert *binary_assert;
+
+ binary_assert = container_of(assert, struct kunit_binary_str_assert,
+ assert);
+
+ string_stream_add(stream,
+ KUNIT_SUBTEST_INDENT "Expected %s %s %s, but\n",
+ binary_assert->text->left_text,
+ binary_assert->text->operation,
+ binary_assert->text->right_text);
+ if (!is_str_literal(binary_assert->text->left_text, binary_assert->left_value))
+ string_stream_add(stream, KUNIT_SUBSUBTEST_INDENT "%s == \"%s\"\n",
+ binary_assert->text->left_text,
+ binary_assert->left_value);
+ if (!is_str_literal(binary_assert->text->right_text, binary_assert->right_value))
+ string_stream_add(stream, KUNIT_SUBSUBTEST_INDENT "%s == \"%s\"",
+ binary_assert->text->right_text,
+ binary_assert->right_value);
+ kunit_assert_print_msg(message, stream);
+}
+EXPORT_SYMBOL_GPL(kunit_binary_str_assert_format);
+
+/* Adds a hexdump of a buffer to a string_stream comparing it with
+ * a second buffer. The different bytes are marked with <>.
+ */
+static void kunit_assert_hexdump(struct string_stream *stream,
+ const void *buf,
+ const void *compared_buf,
+ const size_t len)
+{
+ size_t i;
+ const u8 *buf1 = buf;
+ const u8 *buf2 = compared_buf;
+
+ string_stream_add(stream, KUNIT_SUBSUBTEST_INDENT);
+
+ for (i = 0; i < len; ++i) {
+ if (!(i % 16) && i)
+ string_stream_add(stream, "\n" KUNIT_SUBSUBTEST_INDENT);
+
+ if (buf1[i] != buf2[i])
+ string_stream_add(stream, "<%02x>", buf1[i]);
+ else
+ string_stream_add(stream, " %02x ", buf1[i]);
+ }
+}
+
+void kunit_mem_assert_format(const struct kunit_assert *assert,
+ const struct va_format *message,
+ struct string_stream *stream)
+{
+ struct kunit_mem_assert *mem_assert;
+
+ mem_assert = container_of(assert, struct kunit_mem_assert,
+ assert);
+
+ if (!mem_assert->left_value) {
+ string_stream_add(stream,
+ KUNIT_SUBTEST_INDENT "Expected %s is not null, but is\n",
+ mem_assert->text->left_text);
+ } else if (!mem_assert->right_value) {
+ string_stream_add(stream,
+ KUNIT_SUBTEST_INDENT "Expected %s is not null, but is\n",
+ mem_assert->text->right_text);
+ } else {
+ string_stream_add(stream,
+ KUNIT_SUBTEST_INDENT "Expected %s %s %s, but\n",
+ mem_assert->text->left_text,
+ mem_assert->text->operation,
+ mem_assert->text->right_text);
+
+ string_stream_add(stream, KUNIT_SUBSUBTEST_INDENT "%s ==\n",
+ mem_assert->text->left_text);
+ kunit_assert_hexdump(stream, mem_assert->left_value,
+ mem_assert->right_value, mem_assert->size);
+
+ string_stream_add(stream, "\n");
+
+ string_stream_add(stream, KUNIT_SUBSUBTEST_INDENT "%s ==\n",
+ mem_assert->text->right_text);
+ kunit_assert_hexdump(stream, mem_assert->right_value,
+ mem_assert->left_value, mem_assert->size);
+
+ kunit_assert_print_msg(message, stream);
+ }
+}
+EXPORT_SYMBOL_GPL(kunit_mem_assert_format);
diff --git a/lib/kunit/attributes.c b/lib/kunit/attributes.c
new file mode 100644
index 000000000000..2cf04cc09372
--- /dev/null
+++ b/lib/kunit/attributes.c
@@ -0,0 +1,474 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * KUnit API to save and access test attributes
+ *
+ * Copyright (C) 2023, Google LLC.
+ * Author: Rae Moar <rmoar@google.com>
+ */
+
+#include <kunit/test.h>
+#include <kunit/attributes.h>
+
+/* Options for printing attributes:
+ * PRINT_ALWAYS - attribute is printed for every test case and suite if set
+ * PRINT_SUITE - attribute is printed for every suite if set but not for test cases
+ * PRINT_NEVER - attribute is never printed
+ */
+enum print_ops {
+ PRINT_ALWAYS,
+ PRINT_SUITE,
+ PRINT_NEVER,
+};
+
+/**
+ * struct kunit_attr - represents a test attribute and holds flexible
+ * helper functions to interact with attribute.
+ *
+ * @name: name of test attribute, eg. speed
+ * @get_attr: function to return attribute value given a test
+ * @to_string: function to return string representation of given
+ * attribute value
+ * @filter: function to indicate whether a given attribute value passes a
+ * filter
+ * @attr_default: default attribute value used during filtering
+ * @print: value of enum print_ops to indicate when to print attribute
+ */
+struct kunit_attr {
+ const char *name;
+ void *(*get_attr)(void *test_or_suite, bool is_test);
+ const char *(*to_string)(void *attr, bool *to_free);
+ int (*filter)(void *attr, const char *input, int *err);
+ void *attr_default;
+ enum print_ops print;
+};
+
+/* String Lists for enum Attributes */
+
+static const char * const speed_str_list[] = {"unset", "very_slow", "slow", "normal"};
+
+/* To String Methods */
+
+static const char *attr_enum_to_string(void *attr, const char * const str_list[], bool *to_free)
+{
+ long val = (long)attr;
+
+ *to_free = false;
+ if (!val)
+ return NULL;
+ return str_list[val];
+}
+
+static const char *attr_bool_to_string(void *attr, bool *to_free)
+{
+ bool val = (bool)attr;
+
+ *to_free = false;
+ if (val)
+ return "true";
+ return "false";
+}
+
+static const char *attr_speed_to_string(void *attr, bool *to_free)
+{
+ return attr_enum_to_string(attr, speed_str_list, to_free);
+}
+
+static const char *attr_string_to_string(void *attr, bool *to_free)
+{
+ *to_free = false;
+ return (char *) attr;
+}
+
+/* Filter Methods */
+
+static const char op_list[] = "<>!=";
+
+/*
+ * Returns whether the inputted integer value matches the filter given
+ * by the operation string and inputted integer.
+ */
+static int int_filter(long val, const char *op, int input, int *err)
+{
+ if (!strncmp(op, "<=", 2))
+ return (val <= input);
+ else if (!strncmp(op, ">=", 2))
+ return (val >= input);
+ else if (!strncmp(op, "!=", 2))
+ return (val != input);
+ else if (!strncmp(op, ">", 1))
+ return (val > input);
+ else if (!strncmp(op, "<", 1))
+ return (val < input);
+ else if (!strncmp(op, "=", 1))
+ return (val == input);
+ *err = -EINVAL;
+ pr_err("kunit executor: invalid filter operation: %s\n", op);
+ return false;
+}
+
+/*
+ * Returns whether the inputted enum value "attr" matches the filter given
+ * by the input string. Note: the str_list includes the corresponding string
+ * list to the enum values.
+ */
+static int attr_enum_filter(void *attr, const char *input, int *err,
+ const char * const str_list[], int max)
+{
+ int i, j, input_int = -1;
+ long test_val = (long)attr;
+ const char *input_val = NULL;
+
+ for (i = 0; input[i]; i++) {
+ if (!strchr(op_list, input[i])) {
+ input_val = input + i;
+ break;
+ }
+ }
+
+ if (!input_val) {
+ *err = -EINVAL;
+ pr_err("kunit executor: filter value not found: %s\n", input);
+ return false;
+ }
+
+ for (j = 0; j <= max; j++) {
+ if (!strcmp(input_val, str_list[j]))
+ input_int = j;
+ }
+
+ if (input_int < 0) {
+ *err = -EINVAL;
+ pr_err("kunit executor: invalid filter input: %s\n", input);
+ return false;
+ }
+
+ return int_filter(test_val, input, input_int, err);
+}
+
+static int attr_speed_filter(void *attr, const char *input, int *err)
+{
+ return attr_enum_filter(attr, input, err, speed_str_list, KUNIT_SPEED_MAX);
+}
+
+/*
+ * Returns whether the inputted string value (attr) matches the filter given
+ * by the input string.
+ */
+static int attr_string_filter(void *attr, const char *input, int *err)
+{
+ char *str = attr;
+
+ if (!strncmp(input, "<", 1)) {
+ *err = -EINVAL;
+ pr_err("kunit executor: invalid filter input: %s\n", input);
+ return false;
+ } else if (!strncmp(input, ">", 1)) {
+ *err = -EINVAL;
+ pr_err("kunit executor: invalid filter input: %s\n", input);
+ return false;
+ } else if (!strncmp(input, "!=", 2)) {
+ return (strcmp(input + 2, str) != 0);
+ } else if (!strncmp(input, "=", 1)) {
+ return (strcmp(input + 1, str) == 0);
+ }
+ *err = -EINVAL;
+ pr_err("kunit executor: invalid filter operation: %s\n", input);
+ return false;
+}
+
+static int attr_bool_filter(void *attr, const char *input, int *err)
+{
+ int i, input_int = -1;
+ long val = (long)attr;
+ const char *input_str = NULL;
+
+ for (i = 0; input[i]; i++) {
+ if (!strchr(op_list, input[i])) {
+ input_str = input + i;
+ break;
+ }
+ }
+
+ if (!input_str) {
+ *err = -EINVAL;
+ pr_err("kunit executor: filter value not found: %s\n", input);
+ return false;
+ }
+
+ if (!strcmp(input_str, "true"))
+ input_int = (int)true;
+ else if (!strcmp(input_str, "false"))
+ input_int = (int)false;
+ else {
+ *err = -EINVAL;
+ pr_err("kunit executor: invalid filter input: %s\n", input);
+ return false;
+ }
+
+ return int_filter(val, input, input_int, err);
+}
+
+/* Get Attribute Methods */
+
+static void *attr_speed_get(void *test_or_suite, bool is_test)
+{
+ struct kunit_suite *suite = is_test ? NULL : test_or_suite;
+ struct kunit_case *test = is_test ? test_or_suite : NULL;
+
+ if (test)
+ return ((void *) test->attr.speed);
+ else
+ return ((void *) suite->attr.speed);
+}
+
+static void *attr_module_get(void *test_or_suite, bool is_test)
+{
+ struct kunit_suite *suite = is_test ? NULL : test_or_suite;
+ struct kunit_case *test = is_test ? test_or_suite : NULL;
+
+ // Suites get their module attribute from their first test_case
+ if (test)
+ return ((void *) test->module_name);
+ else if (kunit_suite_num_test_cases(suite) > 0)
+ return ((void *) suite->test_cases[0].module_name);
+ else
+ return (void *) "";
+}
+
+static void *attr_is_init_get(void *test_or_suite, bool is_test)
+{
+ struct kunit_suite *suite = is_test ? NULL : test_or_suite;
+ struct kunit_case *test = is_test ? test_or_suite : NULL;
+
+ if (test)
+ return ((void *) NULL);
+ else
+ return ((void *) suite->is_init);
+}
+
+/* List of all Test Attributes */
+
+static struct kunit_attr kunit_attr_list[] = {
+ {
+ .name = "speed",
+ .get_attr = attr_speed_get,
+ .to_string = attr_speed_to_string,
+ .filter = attr_speed_filter,
+ .attr_default = (void *)KUNIT_SPEED_NORMAL,
+ .print = PRINT_ALWAYS,
+ },
+ {
+ .name = "module",
+ .get_attr = attr_module_get,
+ .to_string = attr_string_to_string,
+ .filter = attr_string_filter,
+ .attr_default = (void *)"",
+ .print = PRINT_SUITE,
+ },
+ {
+ .name = "is_init",
+ .get_attr = attr_is_init_get,
+ .to_string = attr_bool_to_string,
+ .filter = attr_bool_filter,
+ .attr_default = (void *)false,
+ .print = PRINT_SUITE,
+ }
+};
+
+/* Helper Functions to Access Attributes */
+
+const char *kunit_attr_filter_name(struct kunit_attr_filter filter)
+{
+ return filter.attr->name;
+}
+
+void kunit_print_attr(void *test_or_suite, bool is_test, unsigned int test_level)
+{
+ int i;
+ bool to_free = false;
+ void *attr;
+ const char *attr_name, *attr_str;
+ struct kunit_suite *suite = is_test ? NULL : test_or_suite;
+ struct kunit_case *test = is_test ? test_or_suite : NULL;
+
+ for (i = 0; i < ARRAY_SIZE(kunit_attr_list); i++) {
+ if (kunit_attr_list[i].print == PRINT_NEVER ||
+ (test && kunit_attr_list[i].print == PRINT_SUITE))
+ continue;
+ attr = kunit_attr_list[i].get_attr(test_or_suite, is_test);
+ if (attr) {
+ attr_name = kunit_attr_list[i].name;
+ attr_str = kunit_attr_list[i].to_string(attr, &to_free);
+ if (test) {
+ kunit_log(KERN_INFO, test, "%*s# %s.%s: %s",
+ KUNIT_INDENT_LEN * test_level, "", test->name,
+ attr_name, attr_str);
+ } else {
+ kunit_log(KERN_INFO, suite, "%*s# %s: %s",
+ KUNIT_INDENT_LEN * test_level, "", attr_name, attr_str);
+ }
+
+ /* Free to_string of attribute if needed */
+ if (to_free)
+ kfree(attr_str);
+ }
+ }
+}
+
+/* Helper Functions to Filter Attributes */
+
+int kunit_get_filter_count(char *input)
+{
+ int i, comma_index = 0, count = 0;
+
+ for (i = 0; input[i]; i++) {
+ if (input[i] == ',') {
+ if ((i - comma_index) > 1)
+ count++;
+ comma_index = i;
+ }
+ }
+ if ((i - comma_index) > 0)
+ count++;
+ return count;
+}
+
+struct kunit_attr_filter kunit_next_attr_filter(char **filters, int *err)
+{
+ struct kunit_attr_filter filter = {};
+ int i, j, comma_index = 0, new_start_index = 0;
+ int op_index = -1, attr_index = -1;
+ char op;
+ char *input = *filters;
+
+ /* Parse input until operation */
+ for (i = 0; input[i]; i++) {
+ if (op_index < 0 && strchr(op_list, input[i])) {
+ op_index = i;
+ } else if (!comma_index && input[i] == ',') {
+ comma_index = i;
+ } else if (comma_index && input[i] != ' ') {
+ new_start_index = i;
+ break;
+ }
+ }
+
+ if (op_index <= 0) {
+ *err = -EINVAL;
+ pr_err("kunit executor: filter operation not found: %s\n", input);
+ return filter;
+ }
+
+ /* Temporarily set operator to \0 character. */
+ op = input[op_index];
+ input[op_index] = '\0';
+
+ /* Find associated kunit_attr object */
+ for (j = 0; j < ARRAY_SIZE(kunit_attr_list); j++) {
+ if (!strcmp(input, kunit_attr_list[j].name)) {
+ attr_index = j;
+ break;
+ }
+ }
+
+ input[op_index] = op;
+
+ if (attr_index < 0) {
+ *err = -EINVAL;
+ pr_err("kunit executor: attribute not found: %s\n", input);
+ } else {
+ filter.attr = &kunit_attr_list[attr_index];
+ }
+
+ if (comma_index > 0) {
+ input[comma_index] = '\0';
+ filter.input = input + op_index;
+ input = input + new_start_index;
+ } else {
+ filter.input = input + op_index;
+ input = NULL;
+ }
+
+ *filters = input;
+
+ return filter;
+}
+
+struct kunit_suite *kunit_filter_attr_tests(const struct kunit_suite *const suite,
+ struct kunit_attr_filter filter, char *action, int *err)
+{
+ int n = 0;
+ struct kunit_case *filtered, *test_case;
+ struct kunit_suite *copy;
+ void *suite_val, *test_val;
+ bool suite_result, test_result, default_result, result;
+
+ /* Allocate memory for new copy of suite and list of test cases */
+ copy = kmemdup(suite, sizeof(*copy), GFP_KERNEL);
+ if (!copy)
+ return ERR_PTR(-ENOMEM);
+
+ kunit_suite_for_each_test_case(suite, test_case) { n++; }
+
+ filtered = kcalloc(n + 1, sizeof(*filtered), GFP_KERNEL);
+ if (!filtered) {
+ kfree(copy);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ n = 0;
+
+ /* Save filtering result on default value */
+ default_result = filter.attr->filter(filter.attr->attr_default, filter.input, err);
+ if (*err)
+ goto err;
+
+ /* Save suite attribute value and filtering result on that value */
+ suite_val = filter.attr->get_attr((void *)suite, false);
+ suite_result = filter.attr->filter(suite_val, filter.input, err);
+ if (*err)
+ goto err;
+
+ /* For each test case, save test case if passes filtering. */
+ kunit_suite_for_each_test_case(suite, test_case) {
+ test_val = filter.attr->get_attr((void *) test_case, true);
+ test_result = filter.attr->filter(filter.attr->get_attr(test_case, true),
+ filter.input, err);
+ if (*err)
+ goto err;
+
+ /*
+ * If attribute value of test case is set, filter on that value.
+ * If not, filter on suite value if set. If not, filter on
+ * default value.
+ */
+ result = false;
+ if (test_val) {
+ if (test_result)
+ result = true;
+ } else if (suite_val) {
+ if (suite_result)
+ result = true;
+ } else if (default_result) {
+ result = true;
+ }
+
+ if (result) {
+ filtered[n++] = *test_case;
+ } else if (action && strcmp(action, "skip") == 0) {
+ test_case->status = KUNIT_SKIPPED;
+ filtered[n++] = *test_case;
+ }
+ }
+
+err:
+ if (n == 0 || *err) {
+ kfree(copy);
+ kfree(filtered);
+ return NULL;
+ }
+
+ copy->test_cases = filtered;
+
+ return copy;
+}
diff --git a/lib/kunit/debugfs.c b/lib/kunit/debugfs.c
new file mode 100644
index 000000000000..d548750a325a
--- /dev/null
+++ b/lib/kunit/debugfs.c
@@ -0,0 +1,227 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2020, Oracle and/or its affiliates.
+ * Author: Alan Maguire <alan.maguire@oracle.com>
+ */
+
+#include <linux/debugfs.h>
+#include <linux/module.h>
+
+#include <kunit/test.h>
+#include <kunit/test-bug.h>
+
+#include "string-stream.h"
+#include "debugfs.h"
+
+#define KUNIT_DEBUGFS_ROOT "kunit"
+#define KUNIT_DEBUGFS_RESULTS "results"
+#define KUNIT_DEBUGFS_RUN "run"
+
+/*
+ * Create a debugfs representation of test suites:
+ *
+ * Path Semantics
+ * /sys/kernel/debug/kunit/<testsuite>/results Show results of last run for
+ * testsuite
+ * /sys/kernel/debug/kunit/<testsuite>/run Write to this file to trigger
+ * testsuite to run
+ *
+ */
+
+static struct dentry *debugfs_rootdir;
+
+void kunit_debugfs_cleanup(void)
+{
+ debugfs_remove_recursive(debugfs_rootdir);
+}
+
+void kunit_debugfs_init(void)
+{
+ if (!debugfs_rootdir)
+ debugfs_rootdir = debugfs_create_dir(KUNIT_DEBUGFS_ROOT, NULL);
+}
+
+static void debugfs_print_result(struct seq_file *seq, struct string_stream *log)
+{
+ struct string_stream_fragment *frag_container;
+
+ if (!log)
+ return;
+
+ /*
+ * Walk the fragments so we don't need to allocate a temporary
+ * buffer to hold the entire string.
+ */
+ spin_lock(&log->lock);
+ list_for_each_entry(frag_container, &log->fragments, node)
+ seq_printf(seq, "%s", frag_container->fragment);
+ spin_unlock(&log->lock);
+}
+
+/*
+ * /sys/kernel/debug/kunit/<testsuite>/results shows all results for testsuite.
+ */
+static int debugfs_print_results(struct seq_file *seq, void *v)
+{
+ struct kunit_suite *suite = (struct kunit_suite *)seq->private;
+ enum kunit_status success;
+ struct kunit_case *test_case;
+
+ if (!suite)
+ return 0;
+
+ success = kunit_suite_has_succeeded(suite);
+
+ /* Print KTAP header so the debugfs log can be parsed as valid KTAP. */
+ seq_puts(seq, "KTAP version 1\n");
+ seq_puts(seq, "1..1\n");
+
+ /* Print suite header because it is not stored in the test logs. */
+ seq_puts(seq, KUNIT_SUBTEST_INDENT "KTAP version 1\n");
+ seq_printf(seq, KUNIT_SUBTEST_INDENT "# Subtest: %s\n", suite->name);
+ seq_printf(seq, KUNIT_SUBTEST_INDENT "1..%zd\n", kunit_suite_num_test_cases(suite));
+
+ kunit_suite_for_each_test_case(suite, test_case)
+ debugfs_print_result(seq, test_case->log);
+
+ debugfs_print_result(seq, suite->log);
+
+ seq_printf(seq, "%s %d %s\n",
+ kunit_status_to_ok_not_ok(success), 1, suite->name);
+ return 0;
+}
+
+static int debugfs_release(struct inode *inode, struct file *file)
+{
+ return single_release(inode, file);
+}
+
+static int debugfs_results_open(struct inode *inode, struct file *file)
+{
+ struct kunit_suite *suite;
+
+ suite = (struct kunit_suite *)inode->i_private;
+
+ return single_open(file, debugfs_print_results, suite);
+}
+
+/*
+ * Print a usage message to the debugfs "run" file
+ * (/sys/kernel/debug/kunit/<testsuite>/run) if opened.
+ */
+static int debugfs_print_run(struct seq_file *seq, void *v)
+{
+ struct kunit_suite *suite = (struct kunit_suite *)seq->private;
+
+ seq_puts(seq, "Write to this file to trigger the test suite to run.\n");
+ seq_printf(seq, "usage: echo \"any string\" > /sys/kernel/debugfs/kunit/%s/run\n",
+ suite->name);
+ return 0;
+}
+
+/*
+ * The debugfs "run" file (/sys/kernel/debug/kunit/<testsuite>/run)
+ * contains no information. Write to the file to trigger the test suite
+ * to run.
+ */
+static int debugfs_run_open(struct inode *inode, struct file *file)
+{
+ struct kunit_suite *suite;
+
+ suite = (struct kunit_suite *)inode->i_private;
+
+ return single_open(file, debugfs_print_run, suite);
+}
+
+/*
+ * Trigger a test suite to run by writing to the suite's "run" debugfs
+ * file found at: /sys/kernel/debug/kunit/<testsuite>/run
+ *
+ * Note: what is written to this file will not be saved.
+ */
+static ssize_t debugfs_run(struct file *file,
+ const char __user *buf, size_t count, loff_t *ppos)
+{
+ struct inode *f_inode = file->f_inode;
+ struct kunit_suite *suite = (struct kunit_suite *) f_inode->i_private;
+
+ __kunit_test_suites_init(&suite, 1);
+
+ return count;
+}
+
+static const struct file_operations debugfs_results_fops = {
+ .open = debugfs_results_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = debugfs_release,
+};
+
+static const struct file_operations debugfs_run_fops = {
+ .open = debugfs_run_open,
+ .read = seq_read,
+ .write = debugfs_run,
+ .llseek = seq_lseek,
+ .release = debugfs_release,
+};
+
+void kunit_debugfs_create_suite(struct kunit_suite *suite)
+{
+ struct kunit_case *test_case;
+ struct string_stream *stream;
+
+ /* If suite log already allocated, do not create new debugfs files. */
+ if (suite->log)
+ return;
+
+ /*
+ * Allocate logs before creating debugfs representation.
+ * The suite->log and test_case->log pointer are expected to be NULL
+ * if there isn't a log, so only set it if the log stream was created
+ * successfully.
+ */
+ stream = alloc_string_stream(GFP_KERNEL);
+ if (IS_ERR_OR_NULL(stream))
+ return;
+
+ string_stream_set_append_newlines(stream, true);
+ suite->log = stream;
+
+ kunit_suite_for_each_test_case(suite, test_case) {
+ stream = alloc_string_stream(GFP_KERNEL);
+ if (IS_ERR_OR_NULL(stream))
+ goto err;
+
+ string_stream_set_append_newlines(stream, true);
+ test_case->log = stream;
+ }
+
+ suite->debugfs = debugfs_create_dir(suite->name, debugfs_rootdir);
+
+ debugfs_create_file(KUNIT_DEBUGFS_RESULTS, S_IFREG | 0444,
+ suite->debugfs,
+ suite, &debugfs_results_fops);
+
+ /* Do not create file to re-run test if test runs on init */
+ if (!suite->is_init) {
+ debugfs_create_file(KUNIT_DEBUGFS_RUN, S_IFREG | 0644,
+ suite->debugfs,
+ suite, &debugfs_run_fops);
+ }
+ return;
+
+err:
+ string_stream_destroy(suite->log);
+ kunit_suite_for_each_test_case(suite, test_case)
+ string_stream_destroy(test_case->log);
+}
+
+void kunit_debugfs_destroy_suite(struct kunit_suite *suite)
+{
+ struct kunit_case *test_case;
+
+ debugfs_remove_recursive(suite->debugfs);
+ string_stream_destroy(suite->log);
+ kunit_suite_for_each_test_case(suite, test_case)
+ string_stream_destroy(test_case->log);
+}
diff --git a/lib/kunit/debugfs.h b/lib/kunit/debugfs.h
new file mode 100644
index 000000000000..dcc7d7556107
--- /dev/null
+++ b/lib/kunit/debugfs.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020, Oracle and/or its affiliates.
+ */
+
+#ifndef _KUNIT_DEBUGFS_H
+#define _KUNIT_DEBUGFS_H
+
+#include <kunit/test.h>
+
+#ifdef CONFIG_KUNIT_DEBUGFS
+
+void kunit_debugfs_create_suite(struct kunit_suite *suite);
+void kunit_debugfs_destroy_suite(struct kunit_suite *suite);
+void kunit_debugfs_init(void);
+void kunit_debugfs_cleanup(void);
+
+#else
+
+static inline void kunit_debugfs_create_suite(struct kunit_suite *suite) { }
+
+static inline void kunit_debugfs_destroy_suite(struct kunit_suite *suite) { }
+
+static inline void kunit_debugfs_init(void) { }
+
+static inline void kunit_debugfs_cleanup(void) { }
+
+#endif /* CONFIG_KUNIT_DEBUGFS */
+
+#endif /* _KUNIT_DEBUGFS_H */
diff --git a/lib/kunit/device-impl.h b/lib/kunit/device-impl.h
new file mode 100644
index 000000000000..5fcd48ff0f36
--- /dev/null
+++ b/lib/kunit/device-impl.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * KUnit internal header for device helpers
+ *
+ * Header for KUnit-internal driver / bus management.
+ *
+ * Copyright (C) 2023, Google LLC.
+ * Author: David Gow <davidgow@google.com>
+ */
+
+#ifndef _KUNIT_DEVICE_IMPL_H
+#define _KUNIT_DEVICE_IMPL_H
+
+// For internal use only -- registers the kunit_bus.
+int kunit_bus_init(void);
+// For internal use only -- unregisters the kunit_bus.
+void kunit_bus_shutdown(void);
+
+#endif //_KUNIT_DEVICE_IMPL_H
diff --git a/lib/kunit/device.c b/lib/kunit/device.c
new file mode 100644
index 000000000000..644a38a1f5b1
--- /dev/null
+++ b/lib/kunit/device.c
@@ -0,0 +1,195 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * KUnit-managed device implementation
+ *
+ * Implementation of struct kunit_device helpers for fake devices whose
+ * lifecycle is managed by KUnit.
+ *
+ * Copyright (C) 2023, Google LLC.
+ * Author: David Gow <davidgow@google.com>
+ */
+
+#include <linux/device.h>
+
+#include <kunit/test.h>
+#include <kunit/device.h>
+#include <kunit/resource.h>
+
+#include "device-impl.h"
+
+/* Wrappers for use with kunit_add_action() */
+KUNIT_DEFINE_ACTION_WRAPPER(device_unregister_wrapper, device_unregister, struct device *);
+KUNIT_DEFINE_ACTION_WRAPPER(driver_unregister_wrapper, driver_unregister, struct device_driver *);
+
+/* The root device for the KUnit bus, parent of all kunit_devices. */
+static struct device *kunit_bus_device;
+
+/* A device owned by a KUnit test. */
+struct kunit_device {
+ struct device dev;
+ /* The KUnit test which owns this device. */
+ struct kunit *owner;
+ /* If the driver is managed by KUnit and unique to this device. */
+ const struct device_driver *driver;
+};
+
+#define to_kunit_device(d) container_of_const(d, struct kunit_device, dev)
+
+static struct bus_type kunit_bus_type = {
+ .name = "kunit",
+};
+
+/* Register the 'kunit_bus' used for fake devices. */
+int kunit_bus_init(void)
+{
+ int error;
+
+ kunit_bus_device = root_device_register("kunit");
+ if (IS_ERR(kunit_bus_device))
+ return PTR_ERR(kunit_bus_device);
+
+ error = bus_register(&kunit_bus_type);
+ if (error)
+ bus_unregister(&kunit_bus_type);
+ return error;
+}
+
+/* Unregister the 'kunit_bus' in case the KUnit module is unloaded. */
+void kunit_bus_shutdown(void)
+{
+ /* Make sure the bus exists before we unregister it. */
+ if (IS_ERR_OR_NULL(kunit_bus_device))
+ return;
+
+ bus_unregister(&kunit_bus_type);
+
+ root_device_unregister(kunit_bus_device);
+
+ kunit_bus_device = NULL;
+}
+
+/* Release a 'fake' KUnit device. */
+static void kunit_device_release(struct device *d)
+{
+ kfree(to_kunit_device(d));
+}
+
+/*
+ * Create and register a KUnit-managed struct device_driver on the kunit_bus.
+ * Returns an error pointer on failure.
+ */
+struct device_driver *kunit_driver_create(struct kunit *test, const char *name)
+{
+ struct device_driver *driver;
+ int err = -ENOMEM;
+
+ driver = kunit_kzalloc(test, sizeof(*driver), GFP_KERNEL);
+
+ if (!driver)
+ return ERR_PTR(err);
+
+ driver->name = name;
+ driver->bus = &kunit_bus_type;
+ driver->owner = THIS_MODULE;
+
+ err = driver_register(driver);
+ if (err) {
+ kunit_kfree(test, driver);
+ return ERR_PTR(err);
+ }
+
+ kunit_add_action(test, driver_unregister_wrapper, driver);
+ return driver;
+}
+EXPORT_SYMBOL_GPL(kunit_driver_create);
+
+/* Helper which creates a kunit_device, attaches it to the kunit_bus*/
+static struct kunit_device *kunit_device_register_internal(struct kunit *test,
+ const char *name,
+ const struct device_driver *drv)
+{
+ struct kunit_device *kunit_dev;
+ int err = -ENOMEM;
+
+ kunit_dev = kzalloc(sizeof(*kunit_dev), GFP_KERNEL);
+ if (!kunit_dev)
+ return ERR_PTR(err);
+
+ kunit_dev->owner = test;
+
+ err = dev_set_name(&kunit_dev->dev, "%s.%s", test->name, name);
+ if (err) {
+ kfree(kunit_dev);
+ return ERR_PTR(err);
+ }
+
+ kunit_dev->dev.release = kunit_device_release;
+ kunit_dev->dev.bus = &kunit_bus_type;
+ kunit_dev->dev.parent = kunit_bus_device;
+
+ err = device_register(&kunit_dev->dev);
+ if (err) {
+ put_device(&kunit_dev->dev);
+ return ERR_PTR(err);
+ }
+
+ kunit_add_action(test, device_unregister_wrapper, &kunit_dev->dev);
+
+ return kunit_dev;
+}
+
+/*
+ * Create and register a new KUnit-managed device, using the user-supplied device_driver.
+ * On failure, returns an error pointer.
+ */
+struct device *kunit_device_register_with_driver(struct kunit *test,
+ const char *name,
+ const struct device_driver *drv)
+{
+ struct kunit_device *kunit_dev = kunit_device_register_internal(test, name, drv);
+
+ if (IS_ERR_OR_NULL(kunit_dev))
+ return ERR_CAST(kunit_dev);
+
+ return &kunit_dev->dev;
+}
+EXPORT_SYMBOL_GPL(kunit_device_register_with_driver);
+
+/*
+ * Create and register a new KUnit-managed device, including a matching device_driver.
+ * On failure, returns an error pointer.
+ */
+struct device *kunit_device_register(struct kunit *test, const char *name)
+{
+ struct device_driver *drv;
+ struct kunit_device *dev;
+
+ drv = kunit_driver_create(test, name);
+ if (IS_ERR(drv))
+ return ERR_CAST(drv);
+
+ dev = kunit_device_register_internal(test, name, drv);
+ if (IS_ERR(dev)) {
+ kunit_release_action(test, driver_unregister_wrapper, (void *)drv);
+ return ERR_CAST(dev);
+ }
+
+ /* Request the driver be freed. */
+ dev->driver = drv;
+
+
+ return &dev->dev;
+}
+EXPORT_SYMBOL_GPL(kunit_device_register);
+
+/* Unregisters a KUnit-managed device early (including the driver, if automatically created). */
+void kunit_device_unregister(struct kunit *test, struct device *dev)
+{
+ const struct device_driver *driver = to_kunit_device(dev)->driver;
+
+ kunit_release_action(test, device_unregister_wrapper, dev);
+ if (driver)
+ kunit_release_action(test, driver_unregister_wrapper, (void *)driver);
+}
+EXPORT_SYMBOL_GPL(kunit_device_unregister);
+
diff --git a/lib/kunit/executor.c b/lib/kunit/executor.c
new file mode 100644
index 000000000000..689fff2b2b10
--- /dev/null
+++ b/lib/kunit/executor.c
@@ -0,0 +1,416 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/reboot.h>
+#include <kunit/test.h>
+#include <kunit/attributes.h>
+#include <linux/glob.h>
+#include <linux/moduleparam.h>
+
+/*
+ * These symbols point to the .kunit_test_suites section and are defined in
+ * include/asm-generic/vmlinux.lds.h, and consequently must be extern.
+ */
+extern struct kunit_suite * const __kunit_suites_start[];
+extern struct kunit_suite * const __kunit_suites_end[];
+extern struct kunit_suite * const __kunit_init_suites_start[];
+extern struct kunit_suite * const __kunit_init_suites_end[];
+
+static char *action_param;
+
+module_param_named(action, action_param, charp, 0400);
+MODULE_PARM_DESC(action,
+ "Changes KUnit executor behavior, valid values are:\n"
+ "<none>: run the tests like normal\n"
+ "'list' to list test names instead of running them.\n"
+ "'list_attr' to list test names and attributes instead of running them.\n");
+
+const char *kunit_action(void)
+{
+ return action_param;
+}
+
+static char *filter_glob_param;
+static char *filter_param;
+static char *filter_action_param;
+
+module_param_named(filter_glob, filter_glob_param, charp, 0400);
+MODULE_PARM_DESC(filter_glob,
+ "Filter which KUnit test suites/tests run at boot-time, e.g. list* or list*.*del_test");
+module_param_named(filter, filter_param, charp, 0400);
+MODULE_PARM_DESC(filter,
+ "Filter which KUnit test suites/tests run at boot-time using attributes, e.g. speed>slow");
+module_param_named(filter_action, filter_action_param, charp, 0400);
+MODULE_PARM_DESC(filter_action,
+ "Changes behavior of filtered tests using attributes, valid values are:\n"
+ "<none>: do not run filtered tests as normal\n"
+ "'skip': skip all filtered tests instead so tests will appear in output\n");
+
+const char *kunit_filter_glob(void)
+{
+ return filter_glob_param;
+}
+
+char *kunit_filter(void)
+{
+ return filter_param;
+}
+
+char *kunit_filter_action(void)
+{
+ return filter_action_param;
+}
+
+/* glob_match() needs NULL terminated strings, so we need a copy of filter_glob_param. */
+struct kunit_glob_filter {
+ char *suite_glob;
+ char *test_glob;
+};
+
+/* Split "suite_glob.test_glob" into two. Assumes filter_glob is not empty. */
+static int kunit_parse_glob_filter(struct kunit_glob_filter *parsed,
+ const char *filter_glob)
+{
+ const int len = strlen(filter_glob);
+ const char *period = strchr(filter_glob, '.');
+
+ if (!period) {
+ parsed->suite_glob = kzalloc(len + 1, GFP_KERNEL);
+ if (!parsed->suite_glob)
+ return -ENOMEM;
+
+ parsed->test_glob = NULL;
+ strcpy(parsed->suite_glob, filter_glob);
+ return 0;
+ }
+
+ parsed->suite_glob = kzalloc(period - filter_glob + 1, GFP_KERNEL);
+ if (!parsed->suite_glob)
+ return -ENOMEM;
+
+ parsed->test_glob = kzalloc(len - (period - filter_glob) + 1, GFP_KERNEL);
+ if (!parsed->test_glob) {
+ kfree(parsed->suite_glob);
+ return -ENOMEM;
+ }
+
+ strncpy(parsed->suite_glob, filter_glob, period - filter_glob);
+ strncpy(parsed->test_glob, period + 1, len - (period - filter_glob));
+
+ return 0;
+}
+
+/* Create a copy of suite with only tests that match test_glob. */
+static struct kunit_suite *
+kunit_filter_glob_tests(const struct kunit_suite *const suite, const char *test_glob)
+{
+ int n = 0;
+ struct kunit_case *filtered, *test_case;
+ struct kunit_suite *copy;
+
+ kunit_suite_for_each_test_case(suite, test_case) {
+ if (!test_glob || glob_match(test_glob, test_case->name))
+ ++n;
+ }
+
+ if (n == 0)
+ return NULL;
+
+ copy = kmemdup(suite, sizeof(*copy), GFP_KERNEL);
+ if (!copy)
+ return ERR_PTR(-ENOMEM);
+
+ filtered = kcalloc(n + 1, sizeof(*filtered), GFP_KERNEL);
+ if (!filtered) {
+ kfree(copy);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ n = 0;
+ kunit_suite_for_each_test_case(suite, test_case) {
+ if (!test_glob || glob_match(test_glob, test_case->name))
+ filtered[n++] = *test_case;
+ }
+
+ copy->test_cases = filtered;
+ return copy;
+}
+
+void kunit_free_suite_set(struct kunit_suite_set suite_set)
+{
+ struct kunit_suite * const *suites;
+
+ for (suites = suite_set.start; suites < suite_set.end; suites++) {
+ kfree((*suites)->test_cases);
+ kfree(*suites);
+ }
+ kfree(suite_set.start);
+}
+
+/*
+ * Filter and reallocate test suites. Must return the filtered test suites set
+ * allocated at a valid virtual address or NULL in case of error.
+ */
+struct kunit_suite_set
+kunit_filter_suites(const struct kunit_suite_set *suite_set,
+ const char *filter_glob,
+ char *filters,
+ char *filter_action,
+ int *err)
+{
+ int i, j, k;
+ int filter_count = 0;
+ struct kunit_suite **copy, **copy_start, *filtered_suite, *new_filtered_suite;
+ struct kunit_suite_set filtered = {NULL, NULL};
+ struct kunit_glob_filter parsed_glob;
+ struct kunit_attr_filter *parsed_filters = NULL;
+ struct kunit_suite * const *suites;
+
+ const size_t max = suite_set->end - suite_set->start;
+
+ copy = kcalloc(max, sizeof(*filtered.start), GFP_KERNEL);
+ if (!copy) { /* won't be able to run anything, return an empty set */
+ return filtered;
+ }
+ copy_start = copy;
+
+ if (filter_glob) {
+ *err = kunit_parse_glob_filter(&parsed_glob, filter_glob);
+ if (*err)
+ goto free_copy;
+ }
+
+ /* Parse attribute filters */
+ if (filters) {
+ filter_count = kunit_get_filter_count(filters);
+ parsed_filters = kcalloc(filter_count, sizeof(*parsed_filters), GFP_KERNEL);
+ if (!parsed_filters) {
+ *err = -ENOMEM;
+ goto free_parsed_glob;
+ }
+ for (j = 0; j < filter_count; j++)
+ parsed_filters[j] = kunit_next_attr_filter(&filters, err);
+ if (*err)
+ goto free_parsed_filters;
+ }
+
+ for (i = 0; &suite_set->start[i] != suite_set->end; i++) {
+ filtered_suite = suite_set->start[i];
+ if (filter_glob) {
+ if (!glob_match(parsed_glob.suite_glob, filtered_suite->name))
+ continue;
+ filtered_suite = kunit_filter_glob_tests(filtered_suite,
+ parsed_glob.test_glob);
+ if (IS_ERR(filtered_suite)) {
+ *err = PTR_ERR(filtered_suite);
+ goto free_filtered_suite;
+ }
+ }
+ if (filter_count > 0 && parsed_filters != NULL) {
+ for (k = 0; k < filter_count; k++) {
+ new_filtered_suite = kunit_filter_attr_tests(filtered_suite,
+ parsed_filters[k], filter_action, err);
+
+ /* Free previous copy of suite */
+ if (k > 0 || filter_glob) {
+ kfree(filtered_suite->test_cases);
+ kfree(filtered_suite);
+ }
+
+ filtered_suite = new_filtered_suite;
+
+ if (*err)
+ goto free_filtered_suite;
+
+ if (IS_ERR(filtered_suite)) {
+ *err = PTR_ERR(filtered_suite);
+ goto free_filtered_suite;
+ }
+ if (!filtered_suite)
+ break;
+ }
+ }
+
+ if (!filtered_suite)
+ continue;
+
+ *copy++ = filtered_suite;
+ }
+ filtered.start = copy_start;
+ filtered.end = copy;
+
+free_filtered_suite:
+ if (*err) {
+ for (suites = copy_start; suites < copy; suites++) {
+ kfree((*suites)->test_cases);
+ kfree(*suites);
+ }
+ }
+
+free_parsed_filters:
+ if (filter_count)
+ kfree(parsed_filters);
+
+free_parsed_glob:
+ if (filter_glob) {
+ kfree(parsed_glob.suite_glob);
+ kfree(parsed_glob.test_glob);
+ }
+
+free_copy:
+ if (*err)
+ kfree(copy_start);
+
+ return filtered;
+}
+
+void kunit_exec_run_tests(struct kunit_suite_set *suite_set, bool builtin)
+{
+ size_t num_suites = suite_set->end - suite_set->start;
+
+ if (builtin || num_suites) {
+ pr_info("KTAP version 1\n");
+ pr_info("1..%zu\n", num_suites);
+ }
+
+ __kunit_test_suites_init(suite_set->start, num_suites);
+}
+
+void kunit_exec_list_tests(struct kunit_suite_set *suite_set, bool include_attr)
+{
+ struct kunit_suite * const *suites;
+ struct kunit_case *test_case;
+
+ /* Hack: print a ktap header so kunit.py can find the start of KUnit output. */
+ pr_info("KTAP version 1\n");
+
+ for (suites = suite_set->start; suites < suite_set->end; suites++) {
+ /* Print suite name and suite attributes */
+ pr_info("%s\n", (*suites)->name);
+ if (include_attr)
+ kunit_print_attr((void *)(*suites), false, 0);
+
+ /* Print test case name and attributes in suite */
+ kunit_suite_for_each_test_case((*suites), test_case) {
+ pr_info("%s.%s\n", (*suites)->name, test_case->name);
+ if (include_attr)
+ kunit_print_attr((void *)test_case, true, 0);
+ }
+ }
+}
+
+struct kunit_suite_set kunit_merge_suite_sets(struct kunit_suite_set init_suite_set,
+ struct kunit_suite_set suite_set)
+{
+ struct kunit_suite_set total_suite_set = {NULL, NULL};
+ struct kunit_suite **total_suite_start = NULL;
+ size_t init_num_suites, num_suites, suite_size;
+ int i = 0;
+
+ init_num_suites = init_suite_set.end - init_suite_set.start;
+ num_suites = suite_set.end - suite_set.start;
+ suite_size = sizeof(suite_set.start);
+
+ /* Allocate memory for array of all kunit suites */
+ total_suite_start = kmalloc_array(init_num_suites + num_suites, suite_size, GFP_KERNEL);
+ if (!total_suite_start)
+ return total_suite_set;
+
+ /* Append and mark init suites and then append all other kunit suites */
+ memcpy(total_suite_start, init_suite_set.start, init_num_suites * suite_size);
+ for (i = 0; i < init_num_suites; i++)
+ total_suite_start[i]->is_init = true;
+
+ memcpy(total_suite_start + init_num_suites, suite_set.start, num_suites * suite_size);
+
+ /* Set kunit suite set start and end */
+ total_suite_set.start = total_suite_start;
+ total_suite_set.end = total_suite_start + (init_num_suites + num_suites);
+
+ return total_suite_set;
+}
+
+#if IS_BUILTIN(CONFIG_KUNIT)
+
+static char *kunit_shutdown;
+core_param(kunit_shutdown, kunit_shutdown, charp, 0644);
+
+static void kunit_handle_shutdown(void)
+{
+ if (!kunit_shutdown)
+ return;
+
+ if (!strcmp(kunit_shutdown, "poweroff"))
+ kernel_power_off();
+ else if (!strcmp(kunit_shutdown, "halt"))
+ kernel_halt();
+ else if (!strcmp(kunit_shutdown, "reboot"))
+ kernel_restart(NULL);
+
+}
+
+int kunit_run_all_tests(void)
+{
+ struct kunit_suite_set suite_set = {NULL, NULL};
+ struct kunit_suite_set filtered_suite_set = {NULL, NULL};
+ struct kunit_suite_set init_suite_set = {
+ __kunit_init_suites_start, __kunit_init_suites_end,
+ };
+ struct kunit_suite_set normal_suite_set = {
+ __kunit_suites_start, __kunit_suites_end,
+ };
+ size_t init_num_suites = init_suite_set.end - init_suite_set.start;
+ int err = 0;
+
+ if (init_num_suites > 0) {
+ suite_set = kunit_merge_suite_sets(init_suite_set, normal_suite_set);
+ if (!suite_set.start)
+ goto out;
+ } else
+ suite_set = normal_suite_set;
+
+ if (!kunit_enabled()) {
+ pr_info("kunit: disabled\n");
+ goto free_out;
+ }
+
+ if (filter_glob_param || filter_param) {
+ filtered_suite_set = kunit_filter_suites(&suite_set, filter_glob_param,
+ filter_param, filter_action_param, &err);
+
+ /* Free original suite set before using filtered suite set */
+ if (init_num_suites > 0)
+ kfree(suite_set.start);
+ suite_set = filtered_suite_set;
+
+ if (err) {
+ pr_err("kunit executor: error filtering suites: %d\n", err);
+ goto free_out;
+ }
+ }
+
+ if (!action_param)
+ kunit_exec_run_tests(&suite_set, true);
+ else if (strcmp(action_param, "list") == 0)
+ kunit_exec_list_tests(&suite_set, false);
+ else if (strcmp(action_param, "list_attr") == 0)
+ kunit_exec_list_tests(&suite_set, true);
+ else
+ pr_err("kunit executor: unknown action '%s'\n", action_param);
+
+free_out:
+ if (filter_glob_param || filter_param)
+ kunit_free_suite_set(suite_set);
+ else if (init_num_suites > 0)
+ /* Don't use kunit_free_suite_set because suites aren't individually allocated */
+ kfree(suite_set.start);
+
+out:
+ kunit_handle_shutdown();
+ return err;
+}
+
+#if IS_BUILTIN(CONFIG_KUNIT_TEST)
+#include "executor_test.c"
+#endif
+
+#endif /* IS_BUILTIN(CONFIG_KUNIT) */
diff --git a/lib/kunit/executor_test.c b/lib/kunit/executor_test.c
new file mode 100644
index 000000000000..22d4ee86dbed
--- /dev/null
+++ b/lib/kunit/executor_test.c
@@ -0,0 +1,293 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * KUnit test for the KUnit executor.
+ *
+ * Copyright (C) 2021, Google LLC.
+ * Author: Daniel Latypov <dlatypov@google.com>
+ */
+
+#include <kunit/test.h>
+#include <kunit/attributes.h>
+
+static void free_suite_set_at_end(struct kunit *test, const void *to_free);
+static struct kunit_suite *alloc_fake_suite(struct kunit *test,
+ const char *suite_name,
+ struct kunit_case *test_cases);
+
+static void dummy_test(struct kunit *test) {}
+
+static struct kunit_case dummy_test_cases[] = {
+ /* .run_case is not important, just needs to be non-NULL */
+ { .name = "test1", .run_case = dummy_test },
+ { .name = "test2", .run_case = dummy_test },
+ {},
+};
+
+static void parse_filter_test(struct kunit *test)
+{
+ struct kunit_glob_filter filter = {NULL, NULL};
+
+ kunit_parse_glob_filter(&filter, "suite");
+ KUNIT_EXPECT_STREQ(test, filter.suite_glob, "suite");
+ KUNIT_EXPECT_FALSE(test, filter.test_glob);
+ kfree(filter.suite_glob);
+ kfree(filter.test_glob);
+
+ kunit_parse_glob_filter(&filter, "suite.test");
+ KUNIT_EXPECT_STREQ(test, filter.suite_glob, "suite");
+ KUNIT_EXPECT_STREQ(test, filter.test_glob, "test");
+ kfree(filter.suite_glob);
+ kfree(filter.test_glob);
+}
+
+static void filter_suites_test(struct kunit *test)
+{
+ struct kunit_suite *subsuite[3] = {NULL, NULL};
+ struct kunit_suite_set suite_set = {
+ .start = subsuite, .end = &subsuite[2],
+ };
+ struct kunit_suite_set got;
+ int err = 0;
+
+ subsuite[0] = alloc_fake_suite(test, "suite1", dummy_test_cases);
+ subsuite[1] = alloc_fake_suite(test, "suite2", dummy_test_cases);
+
+ /* Want: suite1, suite2, NULL -> suite2, NULL */
+ got = kunit_filter_suites(&suite_set, "suite2", NULL, NULL, &err);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, got.start);
+ KUNIT_ASSERT_EQ(test, err, 0);
+ free_suite_set_at_end(test, &got);
+
+ /* Validate we just have suite2 */
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, got.start[0]);
+ KUNIT_EXPECT_STREQ(test, (const char *)got.start[0]->name, "suite2");
+
+ /* Contains one element (end is 1 past end) */
+ KUNIT_ASSERT_EQ(test, got.end - got.start, 1);
+}
+
+static void filter_suites_test_glob_test(struct kunit *test)
+{
+ struct kunit_suite *subsuite[3] = {NULL, NULL};
+ struct kunit_suite_set suite_set = {
+ .start = subsuite, .end = &subsuite[2],
+ };
+ struct kunit_suite_set got;
+ int err = 0;
+
+ subsuite[0] = alloc_fake_suite(test, "suite1", dummy_test_cases);
+ subsuite[1] = alloc_fake_suite(test, "suite2", dummy_test_cases);
+
+ /* Want: suite1, suite2, NULL -> suite2 (just test1), NULL */
+ got = kunit_filter_suites(&suite_set, "suite2.test2", NULL, NULL, &err);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, got.start);
+ KUNIT_ASSERT_EQ(test, err, 0);
+ free_suite_set_at_end(test, &got);
+
+ /* Validate we just have suite2 */
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, got.start[0]);
+ KUNIT_EXPECT_STREQ(test, (const char *)got.start[0]->name, "suite2");
+ KUNIT_ASSERT_EQ(test, got.end - got.start, 1);
+
+ /* Now validate we just have test2 */
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, got.start[0]->test_cases);
+ KUNIT_EXPECT_STREQ(test, (const char *)got.start[0]->test_cases[0].name, "test2");
+ KUNIT_EXPECT_FALSE(test, got.start[0]->test_cases[1].name);
+}
+
+static void filter_suites_to_empty_test(struct kunit *test)
+{
+ struct kunit_suite *subsuite[3] = {NULL, NULL};
+ struct kunit_suite_set suite_set = {
+ .start = subsuite, .end = &subsuite[2],
+ };
+ struct kunit_suite_set got;
+ int err = 0;
+
+ subsuite[0] = alloc_fake_suite(test, "suite1", dummy_test_cases);
+ subsuite[1] = alloc_fake_suite(test, "suite2", dummy_test_cases);
+
+ got = kunit_filter_suites(&suite_set, "not_found", NULL, NULL, &err);
+ KUNIT_ASSERT_EQ(test, err, 0);
+ free_suite_set_at_end(test, &got); /* just in case */
+
+ KUNIT_EXPECT_PTR_EQ_MSG(test, got.start, got.end,
+ "should be empty to indicate no match");
+}
+
+static void parse_filter_attr_test(struct kunit *test)
+{
+ int j, filter_count;
+ struct kunit_attr_filter *parsed_filters;
+ char filters[] = "speed>slow, module!=example", *filter = filters;
+ int err = 0;
+
+ filter_count = kunit_get_filter_count(filters);
+ KUNIT_EXPECT_EQ(test, filter_count, 2);
+
+ parsed_filters = kunit_kcalloc(test, filter_count, sizeof(*parsed_filters),
+ GFP_KERNEL);
+ for (j = 0; j < filter_count; j++) {
+ parsed_filters[j] = kunit_next_attr_filter(&filter, &err);
+ KUNIT_ASSERT_EQ_MSG(test, err, 0, "failed to parse filter '%s'", filters[j]);
+ }
+
+ KUNIT_EXPECT_STREQ(test, kunit_attr_filter_name(parsed_filters[0]), "speed");
+ KUNIT_EXPECT_STREQ(test, parsed_filters[0].input, ">slow");
+
+ KUNIT_EXPECT_STREQ(test, kunit_attr_filter_name(parsed_filters[1]), "module");
+ KUNIT_EXPECT_STREQ(test, parsed_filters[1].input, "!=example");
+}
+
+static struct kunit_case dummy_attr_test_cases[] = {
+ /* .run_case is not important, just needs to be non-NULL */
+ { .name = "slow", .run_case = dummy_test, .module_name = "dummy",
+ .attr.speed = KUNIT_SPEED_SLOW },
+ { .name = "normal", .run_case = dummy_test, .module_name = "dummy" },
+ {},
+};
+
+static void filter_attr_test(struct kunit *test)
+{
+ struct kunit_suite *subsuite[3] = {NULL, NULL};
+ struct kunit_suite_set suite_set = {
+ .start = subsuite, .end = &subsuite[2],
+ };
+ struct kunit_suite_set got;
+ char filter[] = "speed>slow";
+ int err = 0;
+
+ subsuite[0] = alloc_fake_suite(test, "normal_suite", dummy_attr_test_cases);
+ subsuite[1] = alloc_fake_suite(test, "slow_suite", dummy_attr_test_cases);
+ subsuite[1]->attr.speed = KUNIT_SPEED_SLOW; // Set suite attribute
+
+ /*
+ * Want: normal_suite(slow, normal), slow_suite(slow, normal),
+ * NULL -> normal_suite(normal), NULL
+ *
+ * The normal test in slow_suite is filtered out because the speed
+ * attribute is unset and thus, the filtering is based on the parent attribute
+ * of slow.
+ */
+ got = kunit_filter_suites(&suite_set, NULL, filter, NULL, &err);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, got.start);
+ KUNIT_ASSERT_EQ(test, err, 0);
+ free_suite_set_at_end(test, &got);
+
+ /* Validate we just have normal_suite */
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, got.start[0]);
+ KUNIT_EXPECT_STREQ(test, got.start[0]->name, "normal_suite");
+ KUNIT_ASSERT_EQ(test, got.end - got.start, 1);
+
+ /* Now validate we just have normal test case */
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, got.start[0]->test_cases);
+ KUNIT_EXPECT_STREQ(test, got.start[0]->test_cases[0].name, "normal");
+ KUNIT_EXPECT_FALSE(test, got.start[0]->test_cases[1].name);
+}
+
+static void filter_attr_empty_test(struct kunit *test)
+{
+ struct kunit_suite *subsuite[3] = {NULL, NULL};
+ struct kunit_suite_set suite_set = {
+ .start = subsuite, .end = &subsuite[2],
+ };
+ struct kunit_suite_set got;
+ char filter[] = "module!=dummy";
+ int err = 0;
+
+ subsuite[0] = alloc_fake_suite(test, "suite1", dummy_attr_test_cases);
+ subsuite[1] = alloc_fake_suite(test, "suite2", dummy_attr_test_cases);
+
+ got = kunit_filter_suites(&suite_set, NULL, filter, NULL, &err);
+ KUNIT_ASSERT_EQ(test, err, 0);
+ free_suite_set_at_end(test, &got); /* just in case */
+
+ KUNIT_EXPECT_PTR_EQ_MSG(test, got.start, got.end,
+ "should be empty to indicate no match");
+}
+
+static void filter_attr_skip_test(struct kunit *test)
+{
+ struct kunit_suite *subsuite[2] = {NULL};
+ struct kunit_suite_set suite_set = {
+ .start = subsuite, .end = &subsuite[1],
+ };
+ struct kunit_suite_set got;
+ char filter[] = "speed>slow";
+ int err = 0;
+
+ subsuite[0] = alloc_fake_suite(test, "suite", dummy_attr_test_cases);
+
+ /* Want: suite(slow, normal), NULL -> suite(slow with SKIP, normal), NULL */
+ got = kunit_filter_suites(&suite_set, NULL, filter, "skip", &err);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, got.start);
+ KUNIT_ASSERT_EQ(test, err, 0);
+ free_suite_set_at_end(test, &got);
+
+ /* Validate we have both the slow and normal test */
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, got.start[0]->test_cases);
+ KUNIT_ASSERT_EQ(test, kunit_suite_num_test_cases(got.start[0]), 2);
+ KUNIT_EXPECT_STREQ(test, got.start[0]->test_cases[0].name, "slow");
+ KUNIT_EXPECT_STREQ(test, got.start[0]->test_cases[1].name, "normal");
+
+ /* Now ensure slow is skipped and normal is not */
+ KUNIT_EXPECT_EQ(test, got.start[0]->test_cases[0].status, KUNIT_SKIPPED);
+ KUNIT_EXPECT_FALSE(test, got.start[0]->test_cases[1].status);
+}
+
+static struct kunit_case executor_test_cases[] = {
+ KUNIT_CASE(parse_filter_test),
+ KUNIT_CASE(filter_suites_test),
+ KUNIT_CASE(filter_suites_test_glob_test),
+ KUNIT_CASE(filter_suites_to_empty_test),
+ KUNIT_CASE(parse_filter_attr_test),
+ KUNIT_CASE(filter_attr_test),
+ KUNIT_CASE(filter_attr_empty_test),
+ KUNIT_CASE(filter_attr_skip_test),
+ {}
+};
+
+static struct kunit_suite executor_test_suite = {
+ .name = "kunit_executor_test",
+ .test_cases = executor_test_cases,
+};
+
+kunit_test_suites(&executor_test_suite);
+
+/* Test helpers */
+
+static void free_suite_set(void *suite_set)
+{
+ kunit_free_suite_set(*(struct kunit_suite_set *)suite_set);
+ kfree(suite_set);
+}
+
+/* Use the resource API to register a call to free_suite_set.
+ * Since we never actually use the resource, it's safe to use on const data.
+ */
+static void free_suite_set_at_end(struct kunit *test, const void *to_free)
+{
+ struct kunit_suite_set *free;
+
+ if (!((struct kunit_suite_set *)to_free)->start)
+ return;
+
+ free = kzalloc(sizeof(struct kunit_suite_set), GFP_KERNEL);
+ *free = *(struct kunit_suite_set *)to_free;
+
+ kunit_add_action(test, free_suite_set, (void *)free);
+}
+
+static struct kunit_suite *alloc_fake_suite(struct kunit *test,
+ const char *suite_name,
+ struct kunit_case *test_cases)
+{
+ struct kunit_suite *suite;
+
+ /* We normally never expect to allocate suites, hence the non-const cast. */
+ suite = kunit_kzalloc(test, sizeof(*suite), GFP_KERNEL);
+ strncpy((char *)suite->name, suite_name, sizeof(suite->name) - 1);
+ suite->test_cases = test_cases;
+
+ return suite;
+}
diff --git a/lib/kunit/hooks-impl.h b/lib/kunit/hooks-impl.h
new file mode 100644
index 000000000000..4e71b2d0143b
--- /dev/null
+++ b/lib/kunit/hooks-impl.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Declarations for hook implementations.
+ *
+ * These will be set as the function pointers in struct kunit_hook_table,
+ * found in include/kunit/test-bug.h.
+ *
+ * Copyright (C) 2023, Google LLC.
+ * Author: David Gow <davidgow@google.com>
+ */
+
+#ifndef _KUNIT_HOOKS_IMPL_H
+#define _KUNIT_HOOKS_IMPL_H
+
+#include <kunit/test-bug.h>
+
+/* List of declarations. */
+void __printf(3, 4) __kunit_fail_current_test_impl(const char *file,
+ int line,
+ const char *fmt, ...);
+void *__kunit_get_static_stub_address_impl(struct kunit *test, void *real_fn_addr);
+
+/* Code to set all of the function pointers. */
+static inline void kunit_install_hooks(void)
+{
+ /* Install the KUnit hook functions. */
+ kunit_hooks.fail_current_test = __kunit_fail_current_test_impl;
+ kunit_hooks.get_static_stub_address = __kunit_get_static_stub_address_impl;
+}
+
+#endif /* _KUNIT_HOOKS_IMPL_H */
diff --git a/lib/kunit/hooks.c b/lib/kunit/hooks.c
new file mode 100644
index 000000000000..365d98d4953c
--- /dev/null
+++ b/lib/kunit/hooks.c
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * KUnit 'Hooks' implementation.
+ *
+ * This file contains code / structures which should be built-in even when
+ * KUnit itself is built as a module.
+ *
+ * Copyright (C) 2022, Google LLC.
+ * Author: David Gow <davidgow@google.com>
+ */
+
+
+#include <kunit/test-bug.h>
+
+DEFINE_STATIC_KEY_FALSE(kunit_running);
+EXPORT_SYMBOL(kunit_running);
+
+/* Function pointers for hooks. */
+struct kunit_hooks_table kunit_hooks;
+EXPORT_SYMBOL(kunit_hooks);
+
diff --git a/lib/kunit/kunit-example-test.c b/lib/kunit/kunit-example-test.c
new file mode 100644
index 000000000000..798924f7cc86
--- /dev/null
+++ b/lib/kunit/kunit-example-test.c
@@ -0,0 +1,377 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Example KUnit test to show how to use KUnit.
+ *
+ * Copyright (C) 2019, Google LLC.
+ * Author: Brendan Higgins <brendanhiggins@google.com>
+ */
+
+#include <kunit/test.h>
+#include <kunit/static_stub.h>
+
+/*
+ * This is the most fundamental element of KUnit, the test case. A test case
+ * makes a set EXPECTATIONs and ASSERTIONs about the behavior of some code; if
+ * any expectations or assertions are not met, the test fails; otherwise, the
+ * test passes.
+ *
+ * In KUnit, a test case is just a function with the signature
+ * `void (*)(struct kunit *)`. `struct kunit` is a context object that stores
+ * information about the current test.
+ */
+static void example_simple_test(struct kunit *test)
+{
+ /*
+ * This is an EXPECTATION; it is how KUnit tests things. When you want
+ * to test a piece of code, you set some expectations about what the
+ * code should do. KUnit then runs the test and verifies that the code's
+ * behavior matched what was expected.
+ */
+ KUNIT_EXPECT_EQ(test, 1 + 1, 2);
+}
+
+/*
+ * This is run once before each test case, see the comment on
+ * example_test_suite for more information.
+ */
+static int example_test_init(struct kunit *test)
+{
+ kunit_info(test, "initializing\n");
+
+ return 0;
+}
+
+/*
+ * This is run once after each test case, see the comment on
+ * example_test_suite for more information.
+ */
+static void example_test_exit(struct kunit *test)
+{
+ kunit_info(test, "cleaning up\n");
+}
+
+
+/*
+ * This is run once before all test cases in the suite.
+ * See the comment on example_test_suite for more information.
+ */
+static int example_test_init_suite(struct kunit_suite *suite)
+{
+ kunit_info(suite, "initializing suite\n");
+
+ return 0;
+}
+
+/*
+ * This is run once after all test cases in the suite.
+ * See the comment on example_test_suite for more information.
+ */
+static void example_test_exit_suite(struct kunit_suite *suite)
+{
+ kunit_info(suite, "exiting suite\n");
+}
+
+
+/*
+ * This test should always be skipped.
+ */
+static void example_skip_test(struct kunit *test)
+{
+ /* This line should run */
+ kunit_info(test, "You should not see a line below.");
+
+ /* Skip (and abort) the test */
+ kunit_skip(test, "this test should be skipped");
+
+ /* This line should not execute */
+ KUNIT_FAIL(test, "You should not see this line.");
+}
+
+/*
+ * This test should always be marked skipped.
+ */
+static void example_mark_skipped_test(struct kunit *test)
+{
+ /* This line should run */
+ kunit_info(test, "You should see a line below.");
+
+ /* Skip (but do not abort) the test */
+ kunit_mark_skipped(test, "this test should be skipped");
+
+ /* This line should run */
+ kunit_info(test, "You should see this line.");
+}
+
+/*
+ * This test shows off all the types of KUNIT_EXPECT macros.
+ */
+static void example_all_expect_macros_test(struct kunit *test)
+{
+ const u32 array1[] = { 0x0F, 0xFF };
+ const u32 array2[] = { 0x1F, 0xFF };
+
+ /* Boolean assertions */
+ KUNIT_EXPECT_TRUE(test, true);
+ KUNIT_EXPECT_FALSE(test, false);
+
+ /* Integer assertions */
+ KUNIT_EXPECT_EQ(test, 1, 1); /* check == */
+ KUNIT_EXPECT_GE(test, 1, 1); /* check >= */
+ KUNIT_EXPECT_LE(test, 1, 1); /* check <= */
+ KUNIT_EXPECT_NE(test, 1, 0); /* check != */
+ KUNIT_EXPECT_GT(test, 1, 0); /* check > */
+ KUNIT_EXPECT_LT(test, 0, 1); /* check < */
+
+ /* Pointer assertions */
+ KUNIT_EXPECT_NOT_ERR_OR_NULL(test, test);
+ KUNIT_EXPECT_PTR_EQ(test, NULL, NULL);
+ KUNIT_EXPECT_PTR_NE(test, test, NULL);
+ KUNIT_EXPECT_NULL(test, NULL);
+ KUNIT_EXPECT_NOT_NULL(test, test);
+
+ /* String assertions */
+ KUNIT_EXPECT_STREQ(test, "hi", "hi");
+ KUNIT_EXPECT_STRNEQ(test, "hi", "bye");
+
+ /* Memory block assertions */
+ KUNIT_EXPECT_MEMEQ(test, array1, array1, sizeof(array1));
+ KUNIT_EXPECT_MEMNEQ(test, array1, array2, sizeof(array1));
+
+ /*
+ * There are also ASSERT variants of all of the above that abort test
+ * execution if they fail. Useful for memory allocations, etc.
+ */
+ KUNIT_ASSERT_GT(test, sizeof(char), 0);
+
+ /*
+ * There are also _MSG variants of all of the above that let you include
+ * additional text on failure.
+ */
+ KUNIT_EXPECT_GT_MSG(test, sizeof(int), 0, "Your ints are 0-bit?!");
+ KUNIT_ASSERT_GT_MSG(test, sizeof(int), 0, "Your ints are 0-bit?!");
+}
+
+/* This is a function we'll replace with static stubs. */
+static int add_one(int i)
+{
+ /* This will trigger the stub if active. */
+ KUNIT_STATIC_STUB_REDIRECT(add_one, i);
+
+ return i + 1;
+}
+
+/* This is used as a replacement for the above function. */
+static int subtract_one(int i)
+{
+ /* We don't need to trigger the stub from the replacement. */
+
+ return i - 1;
+}
+
+/*
+ * If the function to be replaced is static within a module it is
+ * useful to export a pointer to that function instead of having
+ * to change the static function to a non-static exported function.
+ *
+ * This pointer simulates a module exporting a pointer to a static
+ * function.
+ */
+static int (* const add_one_fn_ptr)(int i) = add_one;
+
+/*
+ * This test shows the use of static stubs.
+ */
+static void example_static_stub_test(struct kunit *test)
+{
+ /* By default, function is not stubbed. */
+ KUNIT_EXPECT_EQ(test, add_one(1), 2);
+
+ /* Replace add_one() with subtract_one(). */
+ kunit_activate_static_stub(test, add_one, subtract_one);
+
+ /* add_one() is now replaced. */
+ KUNIT_EXPECT_EQ(test, add_one(1), 0);
+
+ /* Return add_one() to normal. */
+ kunit_deactivate_static_stub(test, add_one);
+ KUNIT_EXPECT_EQ(test, add_one(1), 2);
+}
+
+/*
+ * This test shows the use of static stubs when the function being
+ * replaced is provided as a pointer-to-function instead of the
+ * actual function. This is useful for providing access to static
+ * functions in a module by exporting a pointer to that function
+ * instead of having to change the static function to a non-static
+ * exported function.
+ */
+static void example_static_stub_using_fn_ptr_test(struct kunit *test)
+{
+ /* By default, function is not stubbed. */
+ KUNIT_EXPECT_EQ(test, add_one(1), 2);
+
+ /* Replace add_one() with subtract_one(). */
+ kunit_activate_static_stub(test, add_one_fn_ptr, subtract_one);
+
+ /* add_one() is now replaced. */
+ KUNIT_EXPECT_EQ(test, add_one(1), 0);
+
+ /* Return add_one() to normal. */
+ kunit_deactivate_static_stub(test, add_one_fn_ptr);
+ KUNIT_EXPECT_EQ(test, add_one(1), 2);
+}
+
+static const struct example_param {
+ int value;
+} example_params_array[] = {
+ { .value = 3, },
+ { .value = 2, },
+ { .value = 1, },
+ { .value = 0, },
+};
+
+static void example_param_get_desc(const struct example_param *p, char *desc)
+{
+ snprintf(desc, KUNIT_PARAM_DESC_SIZE, "example value %d", p->value);
+}
+
+KUNIT_ARRAY_PARAM(example, example_params_array, example_param_get_desc);
+
+/*
+ * This test shows the use of params.
+ */
+static void example_params_test(struct kunit *test)
+{
+ const struct example_param *param = test->param_value;
+
+ /* By design, param pointer will not be NULL */
+ KUNIT_ASSERT_NOT_NULL(test, param);
+
+ /* Test can be skipped on unsupported param values */
+ if (!is_power_of_2(param->value))
+ kunit_skip(test, "unsupported param value %d", param->value);
+
+ /* You can use param values for parameterized testing */
+ KUNIT_EXPECT_EQ(test, param->value % param->value, 0);
+}
+
+/*
+ * This test shows the use of test->priv.
+ */
+static void example_priv_test(struct kunit *test)
+{
+ /* unless setup in suite->init(), test->priv is NULL */
+ KUNIT_ASSERT_NULL(test, test->priv);
+
+ /* but can be used to pass arbitrary data to other functions */
+ test->priv = kunit_kzalloc(test, 1, GFP_KERNEL);
+ KUNIT_EXPECT_NOT_NULL(test, test->priv);
+ KUNIT_ASSERT_PTR_EQ(test, test->priv, kunit_get_current_test()->priv);
+}
+
+/*
+ * This test should always pass. Can be used to practice filtering attributes.
+ */
+static void example_slow_test(struct kunit *test)
+{
+ KUNIT_EXPECT_EQ(test, 1 + 1, 2);
+}
+
+/*
+ * Here we make a list of all the test cases we want to add to the test suite
+ * below.
+ */
+static struct kunit_case example_test_cases[] = {
+ /*
+ * This is a helper to create a test case object from a test case
+ * function; its exact function is not important to understand how to
+ * use KUnit, just know that this is how you associate test cases with a
+ * test suite.
+ */
+ KUNIT_CASE(example_simple_test),
+ KUNIT_CASE(example_skip_test),
+ KUNIT_CASE(example_mark_skipped_test),
+ KUNIT_CASE(example_all_expect_macros_test),
+ KUNIT_CASE(example_static_stub_test),
+ KUNIT_CASE(example_static_stub_using_fn_ptr_test),
+ KUNIT_CASE(example_priv_test),
+ KUNIT_CASE_PARAM(example_params_test, example_gen_params),
+ KUNIT_CASE_SLOW(example_slow_test),
+ {}
+};
+
+/*
+ * This defines a suite or grouping of tests.
+ *
+ * Test cases are defined as belonging to the suite by adding them to
+ * `kunit_cases`.
+ *
+ * Often it is desirable to run some function which will set up things which
+ * will be used by every test; this is accomplished with an `init` function
+ * which runs before each test case is invoked. Similarly, an `exit` function
+ * may be specified which runs after every test case and can be used to for
+ * cleanup. For clarity, running tests in a test suite would behave as follows:
+ *
+ * suite.suite_init(suite);
+ * suite.init(test);
+ * suite.test_case[0](test);
+ * suite.exit(test);
+ * suite.init(test);
+ * suite.test_case[1](test);
+ * suite.exit(test);
+ * suite.suite_exit(suite);
+ * ...;
+ */
+static struct kunit_suite example_test_suite = {
+ .name = "example",
+ .init = example_test_init,
+ .exit = example_test_exit,
+ .suite_init = example_test_init_suite,
+ .suite_exit = example_test_exit_suite,
+ .test_cases = example_test_cases,
+};
+
+/*
+ * This registers the above test suite telling KUnit that this is a suite of
+ * tests that need to be run.
+ */
+kunit_test_suites(&example_test_suite);
+
+static int __init init_add(int x, int y)
+{
+ return (x + y);
+}
+
+/*
+ * This test should always pass. Can be used to test init suites.
+ */
+static void __init example_init_test(struct kunit *test)
+{
+ KUNIT_EXPECT_EQ(test, init_add(1, 1), 2);
+}
+
+/*
+ * The kunit_case struct cannot be marked as __initdata as this will be
+ * used in debugfs to retrieve results after test has run
+ */
+static struct kunit_case __refdata example_init_test_cases[] = {
+ KUNIT_CASE(example_init_test),
+ {}
+};
+
+/*
+ * The kunit_suite struct cannot be marked as __initdata as this will be
+ * used in debugfs to retrieve results after test has run
+ */
+static struct kunit_suite example_init_test_suite = {
+ .name = "example_init",
+ .test_cases = example_init_test_cases,
+};
+
+/*
+ * This registers the test suite and marks the suite as using init data
+ * and/or functions.
+ */
+kunit_test_init_section_suites(&example_init_test_suite);
+
+MODULE_LICENSE("GPL v2");
diff --git a/lib/kunit/kunit-test.c b/lib/kunit/kunit-test.c
new file mode 100644
index 000000000000..f7980ef236a3
--- /dev/null
+++ b/lib/kunit/kunit-test.c
@@ -0,0 +1,831 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * KUnit test for core test infrastructure.
+ *
+ * Copyright (C) 2019, Google LLC.
+ * Author: Brendan Higgins <brendanhiggins@google.com>
+ */
+#include "linux/gfp_types.h"
+#include <kunit/test.h>
+#include <kunit/test-bug.h>
+
+#include <linux/device.h>
+#include <kunit/device.h>
+
+#include "string-stream.h"
+#include "try-catch-impl.h"
+
+struct kunit_try_catch_test_context {
+ struct kunit_try_catch *try_catch;
+ bool function_called;
+};
+
+static void kunit_test_successful_try(void *data)
+{
+ struct kunit *test = data;
+ struct kunit_try_catch_test_context *ctx = test->priv;
+
+ ctx->function_called = true;
+}
+
+static void kunit_test_no_catch(void *data)
+{
+ struct kunit *test = data;
+
+ KUNIT_FAIL(test, "Catch should not be called\n");
+}
+
+static void kunit_test_try_catch_successful_try_no_catch(struct kunit *test)
+{
+ struct kunit_try_catch_test_context *ctx = test->priv;
+ struct kunit_try_catch *try_catch = ctx->try_catch;
+
+ kunit_try_catch_init(try_catch,
+ test,
+ kunit_test_successful_try,
+ kunit_test_no_catch);
+ kunit_try_catch_run(try_catch, test);
+
+ KUNIT_EXPECT_TRUE(test, ctx->function_called);
+}
+
+static void kunit_test_unsuccessful_try(void *data)
+{
+ struct kunit *test = data;
+ struct kunit_try_catch_test_context *ctx = test->priv;
+ struct kunit_try_catch *try_catch = ctx->try_catch;
+
+ kunit_try_catch_throw(try_catch);
+ KUNIT_FAIL(test, "This line should never be reached\n");
+}
+
+static void kunit_test_catch(void *data)
+{
+ struct kunit *test = data;
+ struct kunit_try_catch_test_context *ctx = test->priv;
+
+ ctx->function_called = true;
+}
+
+static void kunit_test_try_catch_unsuccessful_try_does_catch(struct kunit *test)
+{
+ struct kunit_try_catch_test_context *ctx = test->priv;
+ struct kunit_try_catch *try_catch = ctx->try_catch;
+
+ kunit_try_catch_init(try_catch,
+ test,
+ kunit_test_unsuccessful_try,
+ kunit_test_catch);
+ kunit_try_catch_run(try_catch, test);
+
+ KUNIT_EXPECT_TRUE(test, ctx->function_called);
+}
+
+static int kunit_try_catch_test_init(struct kunit *test)
+{
+ struct kunit_try_catch_test_context *ctx;
+
+ ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+ test->priv = ctx;
+
+ ctx->try_catch = kunit_kmalloc(test,
+ sizeof(*ctx->try_catch),
+ GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx->try_catch);
+
+ return 0;
+}
+
+static struct kunit_case kunit_try_catch_test_cases[] = {
+ KUNIT_CASE(kunit_test_try_catch_successful_try_no_catch),
+ KUNIT_CASE(kunit_test_try_catch_unsuccessful_try_does_catch),
+ {}
+};
+
+static struct kunit_suite kunit_try_catch_test_suite = {
+ .name = "kunit-try-catch-test",
+ .init = kunit_try_catch_test_init,
+ .test_cases = kunit_try_catch_test_cases,
+};
+
+/*
+ * Context for testing test managed resources
+ * is_resource_initialized is used to test arbitrary resources
+ */
+struct kunit_test_resource_context {
+ struct kunit test;
+ bool is_resource_initialized;
+ int allocate_order[2];
+ int free_order[4];
+};
+
+static int fake_resource_init(struct kunit_resource *res, void *context)
+{
+ struct kunit_test_resource_context *ctx = context;
+
+ res->data = &ctx->is_resource_initialized;
+ ctx->is_resource_initialized = true;
+ return 0;
+}
+
+static void fake_resource_free(struct kunit_resource *res)
+{
+ bool *is_resource_initialized = res->data;
+
+ *is_resource_initialized = false;
+}
+
+static void kunit_resource_test_init_resources(struct kunit *test)
+{
+ struct kunit_test_resource_context *ctx = test->priv;
+
+ kunit_init_test(&ctx->test, "testing_test_init_test", NULL);
+
+ KUNIT_EXPECT_TRUE(test, list_empty(&ctx->test.resources));
+}
+
+static void kunit_resource_test_alloc_resource(struct kunit *test)
+{
+ struct kunit_test_resource_context *ctx = test->priv;
+ struct kunit_resource *res;
+ kunit_resource_free_t free = fake_resource_free;
+
+ res = kunit_alloc_and_get_resource(&ctx->test,
+ fake_resource_init,
+ fake_resource_free,
+ GFP_KERNEL,
+ ctx);
+
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, res);
+ KUNIT_EXPECT_PTR_EQ(test,
+ &ctx->is_resource_initialized,
+ (bool *)res->data);
+ KUNIT_EXPECT_TRUE(test, list_is_last(&res->node, &ctx->test.resources));
+ KUNIT_EXPECT_PTR_EQ(test, free, res->free);
+
+ kunit_put_resource(res);
+}
+
+static inline bool kunit_resource_instance_match(struct kunit *test,
+ struct kunit_resource *res,
+ void *match_data)
+{
+ return res->data == match_data;
+}
+
+/*
+ * Note: tests below use kunit_alloc_and_get_resource(), so as a consequence
+ * they have a reference to the associated resource that they must release
+ * via kunit_put_resource(). In normal operation, users will only
+ * have to do this for cases where they use kunit_find_resource(), and the
+ * kunit_alloc_resource() function will be used (which does not take a
+ * resource reference).
+ */
+static void kunit_resource_test_destroy_resource(struct kunit *test)
+{
+ struct kunit_test_resource_context *ctx = test->priv;
+ struct kunit_resource *res = kunit_alloc_and_get_resource(
+ &ctx->test,
+ fake_resource_init,
+ fake_resource_free,
+ GFP_KERNEL,
+ ctx);
+
+ kunit_put_resource(res);
+
+ KUNIT_ASSERT_FALSE(test,
+ kunit_destroy_resource(&ctx->test,
+ kunit_resource_instance_match,
+ res->data));
+
+ KUNIT_EXPECT_FALSE(test, ctx->is_resource_initialized);
+ KUNIT_EXPECT_TRUE(test, list_empty(&ctx->test.resources));
+}
+
+static void kunit_resource_test_remove_resource(struct kunit *test)
+{
+ struct kunit_test_resource_context *ctx = test->priv;
+ struct kunit_resource *res = kunit_alloc_and_get_resource(
+ &ctx->test,
+ fake_resource_init,
+ fake_resource_free,
+ GFP_KERNEL,
+ ctx);
+
+ /* The resource is in the list */
+ KUNIT_EXPECT_FALSE(test, list_empty(&ctx->test.resources));
+
+ /* Remove the resource. The pointer is still valid, but it can't be
+ * found.
+ */
+ kunit_remove_resource(test, res);
+ KUNIT_EXPECT_TRUE(test, list_empty(&ctx->test.resources));
+ /* We haven't been freed yet. */
+ KUNIT_EXPECT_TRUE(test, ctx->is_resource_initialized);
+
+ /* Removing the resource multiple times is valid. */
+ kunit_remove_resource(test, res);
+ KUNIT_EXPECT_TRUE(test, list_empty(&ctx->test.resources));
+ /* Despite having been removed twice (from only one reference), the
+ * resource still has not been freed.
+ */
+ KUNIT_EXPECT_TRUE(test, ctx->is_resource_initialized);
+
+ /* Free the resource. */
+ kunit_put_resource(res);
+ KUNIT_EXPECT_FALSE(test, ctx->is_resource_initialized);
+}
+
+static void kunit_resource_test_cleanup_resources(struct kunit *test)
+{
+ int i;
+ struct kunit_test_resource_context *ctx = test->priv;
+ struct kunit_resource *resources[5];
+
+ for (i = 0; i < ARRAY_SIZE(resources); i++) {
+ resources[i] = kunit_alloc_and_get_resource(&ctx->test,
+ fake_resource_init,
+ fake_resource_free,
+ GFP_KERNEL,
+ ctx);
+ kunit_put_resource(resources[i]);
+ }
+
+ kunit_cleanup(&ctx->test);
+
+ KUNIT_EXPECT_TRUE(test, list_empty(&ctx->test.resources));
+}
+
+static void kunit_resource_test_mark_order(int order_array[],
+ size_t order_size,
+ int key)
+{
+ int i;
+
+ for (i = 0; i < order_size && order_array[i]; i++)
+ ;
+
+ order_array[i] = key;
+}
+
+#define KUNIT_RESOURCE_TEST_MARK_ORDER(ctx, order_field, key) \
+ kunit_resource_test_mark_order(ctx->order_field, \
+ ARRAY_SIZE(ctx->order_field), \
+ key)
+
+static int fake_resource_2_init(struct kunit_resource *res, void *context)
+{
+ struct kunit_test_resource_context *ctx = context;
+
+ KUNIT_RESOURCE_TEST_MARK_ORDER(ctx, allocate_order, 2);
+
+ res->data = ctx;
+
+ return 0;
+}
+
+static void fake_resource_2_free(struct kunit_resource *res)
+{
+ struct kunit_test_resource_context *ctx = res->data;
+
+ KUNIT_RESOURCE_TEST_MARK_ORDER(ctx, free_order, 2);
+}
+
+static int fake_resource_1_init(struct kunit_resource *res, void *context)
+{
+ struct kunit_test_resource_context *ctx = context;
+ struct kunit_resource *res2;
+
+ res2 = kunit_alloc_and_get_resource(&ctx->test,
+ fake_resource_2_init,
+ fake_resource_2_free,
+ GFP_KERNEL,
+ ctx);
+
+ KUNIT_RESOURCE_TEST_MARK_ORDER(ctx, allocate_order, 1);
+
+ res->data = ctx;
+
+ kunit_put_resource(res2);
+
+ return 0;
+}
+
+static void fake_resource_1_free(struct kunit_resource *res)
+{
+ struct kunit_test_resource_context *ctx = res->data;
+
+ KUNIT_RESOURCE_TEST_MARK_ORDER(ctx, free_order, 1);
+}
+
+/*
+ * TODO(brendanhiggins@google.com): replace the arrays that keep track of the
+ * order of allocation and freeing with strict mocks using the IN_SEQUENCE macro
+ * to assert allocation and freeing order when the feature becomes available.
+ */
+static void kunit_resource_test_proper_free_ordering(struct kunit *test)
+{
+ struct kunit_test_resource_context *ctx = test->priv;
+ struct kunit_resource *res;
+
+ /* fake_resource_1 allocates a fake_resource_2 in its init. */
+ res = kunit_alloc_and_get_resource(&ctx->test,
+ fake_resource_1_init,
+ fake_resource_1_free,
+ GFP_KERNEL,
+ ctx);
+
+ /*
+ * Since fake_resource_2_init calls KUNIT_RESOURCE_TEST_MARK_ORDER
+ * before returning to fake_resource_1_init, it should be the first to
+ * put its key in the allocate_order array.
+ */
+ KUNIT_EXPECT_EQ(test, ctx->allocate_order[0], 2);
+ KUNIT_EXPECT_EQ(test, ctx->allocate_order[1], 1);
+
+ kunit_put_resource(res);
+
+ kunit_cleanup(&ctx->test);
+
+ /*
+ * Because fake_resource_2 finishes allocation before fake_resource_1,
+ * fake_resource_1 should be freed first since it could depend on
+ * fake_resource_2.
+ */
+ KUNIT_EXPECT_EQ(test, ctx->free_order[0], 1);
+ KUNIT_EXPECT_EQ(test, ctx->free_order[1], 2);
+}
+
+static void kunit_resource_test_static(struct kunit *test)
+{
+ struct kunit_test_resource_context ctx;
+ struct kunit_resource res;
+
+ KUNIT_EXPECT_EQ(test, kunit_add_resource(test, NULL, NULL, &res, &ctx),
+ 0);
+
+ KUNIT_EXPECT_PTR_EQ(test, res.data, (void *)&ctx);
+
+ kunit_cleanup(test);
+
+ KUNIT_EXPECT_TRUE(test, list_empty(&test->resources));
+}
+
+static void kunit_resource_test_named(struct kunit *test)
+{
+ struct kunit_resource res1, res2, *found = NULL;
+ struct kunit_test_resource_context ctx;
+
+ KUNIT_EXPECT_EQ(test,
+ kunit_add_named_resource(test, NULL, NULL, &res1,
+ "resource_1", &ctx),
+ 0);
+ KUNIT_EXPECT_PTR_EQ(test, res1.data, (void *)&ctx);
+
+ KUNIT_EXPECT_EQ(test,
+ kunit_add_named_resource(test, NULL, NULL, &res1,
+ "resource_1", &ctx),
+ -EEXIST);
+
+ KUNIT_EXPECT_EQ(test,
+ kunit_add_named_resource(test, NULL, NULL, &res2,
+ "resource_2", &ctx),
+ 0);
+
+ found = kunit_find_named_resource(test, "resource_1");
+
+ KUNIT_EXPECT_PTR_EQ(test, found, &res1);
+
+ if (found)
+ kunit_put_resource(&res1);
+
+ KUNIT_EXPECT_EQ(test, kunit_destroy_named_resource(test, "resource_2"),
+ 0);
+
+ kunit_cleanup(test);
+
+ KUNIT_EXPECT_TRUE(test, list_empty(&test->resources));
+}
+
+static void increment_int(void *ctx)
+{
+ int *i = (int *)ctx;
+ (*i)++;
+}
+
+static void kunit_resource_test_action(struct kunit *test)
+{
+ int num_actions = 0;
+
+ kunit_add_action(test, increment_int, &num_actions);
+ KUNIT_EXPECT_EQ(test, num_actions, 0);
+ kunit_cleanup(test);
+ KUNIT_EXPECT_EQ(test, num_actions, 1);
+
+ /* Once we've cleaned up, the action queue is empty. */
+ kunit_cleanup(test);
+ KUNIT_EXPECT_EQ(test, num_actions, 1);
+
+ /* Check the same function can be deferred multiple times. */
+ kunit_add_action(test, increment_int, &num_actions);
+ kunit_add_action(test, increment_int, &num_actions);
+ kunit_cleanup(test);
+ KUNIT_EXPECT_EQ(test, num_actions, 3);
+}
+static void kunit_resource_test_remove_action(struct kunit *test)
+{
+ int num_actions = 0;
+
+ kunit_add_action(test, increment_int, &num_actions);
+ KUNIT_EXPECT_EQ(test, num_actions, 0);
+
+ kunit_remove_action(test, increment_int, &num_actions);
+ kunit_cleanup(test);
+ KUNIT_EXPECT_EQ(test, num_actions, 0);
+}
+static void kunit_resource_test_release_action(struct kunit *test)
+{
+ int num_actions = 0;
+
+ kunit_add_action(test, increment_int, &num_actions);
+ KUNIT_EXPECT_EQ(test, num_actions, 0);
+ /* Runs immediately on trigger. */
+ kunit_release_action(test, increment_int, &num_actions);
+ KUNIT_EXPECT_EQ(test, num_actions, 1);
+
+ /* Doesn't run again on test exit. */
+ kunit_cleanup(test);
+ KUNIT_EXPECT_EQ(test, num_actions, 1);
+}
+static void action_order_1(void *ctx)
+{
+ struct kunit_test_resource_context *res_ctx = (struct kunit_test_resource_context *)ctx;
+
+ KUNIT_RESOURCE_TEST_MARK_ORDER(res_ctx, free_order, 1);
+ kunit_log(KERN_INFO, current->kunit_test, "action_order_1");
+}
+static void action_order_2(void *ctx)
+{
+ struct kunit_test_resource_context *res_ctx = (struct kunit_test_resource_context *)ctx;
+
+ KUNIT_RESOURCE_TEST_MARK_ORDER(res_ctx, free_order, 2);
+ kunit_log(KERN_INFO, current->kunit_test, "action_order_2");
+}
+static void kunit_resource_test_action_ordering(struct kunit *test)
+{
+ struct kunit_test_resource_context *ctx = test->priv;
+
+ kunit_add_action(test, action_order_1, ctx);
+ kunit_add_action(test, action_order_2, ctx);
+ kunit_add_action(test, action_order_1, ctx);
+ kunit_add_action(test, action_order_2, ctx);
+ kunit_remove_action(test, action_order_1, ctx);
+ kunit_release_action(test, action_order_2, ctx);
+ kunit_cleanup(test);
+
+ /* [2 is triggered] [2], [(1 is cancelled)] [1] */
+ KUNIT_EXPECT_EQ(test, ctx->free_order[0], 2);
+ KUNIT_EXPECT_EQ(test, ctx->free_order[1], 2);
+ KUNIT_EXPECT_EQ(test, ctx->free_order[2], 1);
+}
+
+static int kunit_resource_test_init(struct kunit *test)
+{
+ struct kunit_test_resource_context *ctx =
+ kzalloc(sizeof(*ctx), GFP_KERNEL);
+
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+
+ test->priv = ctx;
+
+ kunit_init_test(&ctx->test, "test_test_context", NULL);
+
+ return 0;
+}
+
+static void kunit_resource_test_exit(struct kunit *test)
+{
+ struct kunit_test_resource_context *ctx = test->priv;
+
+ kunit_cleanup(&ctx->test);
+ kfree(ctx);
+}
+
+static struct kunit_case kunit_resource_test_cases[] = {
+ KUNIT_CASE(kunit_resource_test_init_resources),
+ KUNIT_CASE(kunit_resource_test_alloc_resource),
+ KUNIT_CASE(kunit_resource_test_destroy_resource),
+ KUNIT_CASE(kunit_resource_test_remove_resource),
+ KUNIT_CASE(kunit_resource_test_cleanup_resources),
+ KUNIT_CASE(kunit_resource_test_proper_free_ordering),
+ KUNIT_CASE(kunit_resource_test_static),
+ KUNIT_CASE(kunit_resource_test_named),
+ KUNIT_CASE(kunit_resource_test_action),
+ KUNIT_CASE(kunit_resource_test_remove_action),
+ KUNIT_CASE(kunit_resource_test_release_action),
+ KUNIT_CASE(kunit_resource_test_action_ordering),
+ {}
+};
+
+static struct kunit_suite kunit_resource_test_suite = {
+ .name = "kunit-resource-test",
+ .init = kunit_resource_test_init,
+ .exit = kunit_resource_test_exit,
+ .test_cases = kunit_resource_test_cases,
+};
+
+/*
+ * Log tests call string_stream functions, which aren't exported. So only
+ * build this code if this test is built-in.
+ */
+#if IS_BUILTIN(CONFIG_KUNIT_TEST)
+
+/* This avoids a cast warning if kfree() is passed direct to kunit_add_action(). */
+KUNIT_DEFINE_ACTION_WRAPPER(kfree_wrapper, kfree, const void *);
+
+static void kunit_log_test(struct kunit *test)
+{
+ struct kunit_suite suite;
+#ifdef CONFIG_KUNIT_DEBUGFS
+ char *full_log;
+#endif
+ suite.log = kunit_alloc_string_stream(test, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, suite.log);
+ string_stream_set_append_newlines(suite.log, true);
+
+ kunit_log(KERN_INFO, test, "put this in log.");
+ kunit_log(KERN_INFO, test, "this too.");
+ kunit_log(KERN_INFO, &suite, "add to suite log.");
+ kunit_log(KERN_INFO, &suite, "along with this.");
+
+#ifdef CONFIG_KUNIT_DEBUGFS
+ KUNIT_EXPECT_TRUE(test, test->log->append_newlines);
+
+ full_log = string_stream_get_string(test->log);
+ kunit_add_action(test, kfree_wrapper, full_log);
+ KUNIT_EXPECT_NOT_ERR_OR_NULL(test,
+ strstr(full_log, "put this in log."));
+ KUNIT_EXPECT_NOT_ERR_OR_NULL(test,
+ strstr(full_log, "this too."));
+
+ full_log = string_stream_get_string(suite.log);
+ kunit_add_action(test, kfree_wrapper, full_log);
+ KUNIT_EXPECT_NOT_ERR_OR_NULL(test,
+ strstr(full_log, "add to suite log."));
+ KUNIT_EXPECT_NOT_ERR_OR_NULL(test,
+ strstr(full_log, "along with this."));
+#else
+ KUNIT_EXPECT_NULL(test, test->log);
+#endif
+}
+
+static void kunit_log_newline_test(struct kunit *test)
+{
+ char *full_log;
+
+ kunit_info(test, "Add newline\n");
+ if (test->log) {
+ full_log = string_stream_get_string(test->log);
+ kunit_add_action(test, kfree_wrapper, full_log);
+ KUNIT_ASSERT_NOT_NULL_MSG(test, strstr(full_log, "Add newline\n"),
+ "Missing log line, full log:\n%s", full_log);
+ KUNIT_EXPECT_NULL(test, strstr(full_log, "Add newline\n\n"));
+ } else {
+ kunit_skip(test, "only useful when debugfs is enabled");
+ }
+}
+#else
+static void kunit_log_test(struct kunit *test)
+{
+ kunit_skip(test, "Log tests only run when built-in");
+}
+
+static void kunit_log_newline_test(struct kunit *test)
+{
+ kunit_skip(test, "Log tests only run when built-in");
+}
+#endif /* IS_BUILTIN(CONFIG_KUNIT_TEST) */
+
+static struct kunit_case kunit_log_test_cases[] = {
+ KUNIT_CASE(kunit_log_test),
+ KUNIT_CASE(kunit_log_newline_test),
+ {}
+};
+
+static struct kunit_suite kunit_log_test_suite = {
+ .name = "kunit-log-test",
+ .test_cases = kunit_log_test_cases,
+};
+
+static void kunit_status_set_failure_test(struct kunit *test)
+{
+ struct kunit fake;
+
+ kunit_init_test(&fake, "fake test", NULL);
+
+ KUNIT_EXPECT_EQ(test, fake.status, (enum kunit_status)KUNIT_SUCCESS);
+ kunit_set_failure(&fake);
+ KUNIT_EXPECT_EQ(test, fake.status, (enum kunit_status)KUNIT_FAILURE);
+}
+
+static void kunit_status_mark_skipped_test(struct kunit *test)
+{
+ struct kunit fake;
+
+ kunit_init_test(&fake, "fake test", NULL);
+
+ /* Before: Should be SUCCESS with no comment. */
+ KUNIT_EXPECT_EQ(test, fake.status, KUNIT_SUCCESS);
+ KUNIT_EXPECT_STREQ(test, fake.status_comment, "");
+
+ /* Mark the test as skipped. */
+ kunit_mark_skipped(&fake, "Accepts format string: %s", "YES");
+
+ /* After: Should be SKIPPED with our comment. */
+ KUNIT_EXPECT_EQ(test, fake.status, (enum kunit_status)KUNIT_SKIPPED);
+ KUNIT_EXPECT_STREQ(test, fake.status_comment, "Accepts format string: YES");
+}
+
+static struct kunit_case kunit_status_test_cases[] = {
+ KUNIT_CASE(kunit_status_set_failure_test),
+ KUNIT_CASE(kunit_status_mark_skipped_test),
+ {}
+};
+
+static struct kunit_suite kunit_status_test_suite = {
+ .name = "kunit_status",
+ .test_cases = kunit_status_test_cases,
+};
+
+static void kunit_current_test(struct kunit *test)
+{
+ /* Check results of both current->kunit_test and
+ * kunit_get_current_test() are equivalent to current test.
+ */
+ KUNIT_EXPECT_PTR_EQ(test, test, current->kunit_test);
+ KUNIT_EXPECT_PTR_EQ(test, test, kunit_get_current_test());
+}
+
+static void kunit_current_fail_test(struct kunit *test)
+{
+ struct kunit fake;
+
+ kunit_init_test(&fake, "fake test", NULL);
+ KUNIT_EXPECT_EQ(test, fake.status, KUNIT_SUCCESS);
+
+ /* Set current->kunit_test to fake test. */
+ current->kunit_test = &fake;
+
+ kunit_fail_current_test("This should make `fake` test fail.");
+ KUNIT_EXPECT_EQ(test, fake.status, (enum kunit_status)KUNIT_FAILURE);
+ kunit_cleanup(&fake);
+
+ /* Reset current->kunit_test to current test. */
+ current->kunit_test = test;
+}
+
+static struct kunit_case kunit_current_test_cases[] = {
+ KUNIT_CASE(kunit_current_test),
+ KUNIT_CASE(kunit_current_fail_test),
+ {}
+};
+
+static void test_dev_action(void *priv)
+{
+ *(void **)priv = (void *)1;
+}
+
+static void kunit_device_test(struct kunit *test)
+{
+ struct device *test_device;
+ long action_was_run = 0;
+
+ test_device = kunit_device_register(test, "my_device");
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, test_device);
+
+ // Add an action to verify cleanup.
+ devm_add_action(test_device, test_dev_action, &action_was_run);
+
+ KUNIT_EXPECT_EQ(test, action_was_run, 0);
+
+ kunit_device_unregister(test, test_device);
+
+ KUNIT_EXPECT_EQ(test, action_was_run, 1);
+}
+
+static void kunit_device_cleanup_test(struct kunit *test)
+{
+ struct device *test_device;
+ long action_was_run = 0;
+
+ test_device = kunit_device_register(test, "my_device");
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, test_device);
+
+ /* Add an action to verify cleanup. */
+ devm_add_action(test_device, test_dev_action, &action_was_run);
+
+ KUNIT_EXPECT_EQ(test, action_was_run, 0);
+
+ /* Force KUnit to run cleanup early. */
+ kunit_cleanup(test);
+
+ KUNIT_EXPECT_EQ(test, action_was_run, 1);
+}
+
+struct driver_test_state {
+ bool driver_device_probed;
+ bool driver_device_removed;
+ long action_was_run;
+};
+
+static int driver_probe_hook(struct device *dev)
+{
+ struct kunit *test = kunit_get_current_test();
+ struct driver_test_state *state = (struct driver_test_state *)test->priv;
+
+ state->driver_device_probed = true;
+ return 0;
+}
+
+static int driver_remove_hook(struct device *dev)
+{
+ struct kunit *test = kunit_get_current_test();
+ struct driver_test_state *state = (struct driver_test_state *)test->priv;
+
+ state->driver_device_removed = true;
+ return 0;
+}
+
+static void kunit_device_driver_test(struct kunit *test)
+{
+ struct device_driver *test_driver;
+ struct device *test_device;
+ struct driver_test_state *test_state = kunit_kzalloc(test, sizeof(*test_state), GFP_KERNEL);
+
+ test->priv = test_state;
+ test_driver = kunit_driver_create(test, "my_driver");
+
+ // This can fail with an error pointer.
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, test_driver);
+
+ test_driver->probe = driver_probe_hook;
+ test_driver->remove = driver_remove_hook;
+
+ test_device = kunit_device_register_with_driver(test, "my_device", test_driver);
+
+ // This can fail with an error pointer.
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, test_device);
+
+ // Make sure the probe function was called.
+ KUNIT_ASSERT_TRUE(test, test_state->driver_device_probed);
+
+ // Add an action to verify cleanup.
+ devm_add_action(test_device, test_dev_action, &test_state->action_was_run);
+
+ KUNIT_EXPECT_EQ(test, test_state->action_was_run, 0);
+
+ kunit_device_unregister(test, test_device);
+ test_device = NULL;
+
+ // Make sure the remove hook was called.
+ KUNIT_ASSERT_TRUE(test, test_state->driver_device_removed);
+
+ // We're going to test this again.
+ test_state->driver_device_probed = false;
+
+ // The driver should not automatically be destroyed by
+ // kunit_device_unregister, so we can re-use it.
+ test_device = kunit_device_register_with_driver(test, "my_device", test_driver);
+
+ // This can fail with an error pointer.
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, test_device);
+
+ // Probe was called again.
+ KUNIT_ASSERT_TRUE(test, test_state->driver_device_probed);
+
+ // Everything is automatically freed here.
+}
+
+static struct kunit_case kunit_device_test_cases[] = {
+ KUNIT_CASE(kunit_device_test),
+ KUNIT_CASE(kunit_device_cleanup_test),
+ KUNIT_CASE(kunit_device_driver_test),
+ {}
+};
+
+static struct kunit_suite kunit_device_test_suite = {
+ .name = "kunit_device",
+ .test_cases = kunit_device_test_cases,
+};
+
+static struct kunit_suite kunit_current_test_suite = {
+ .name = "kunit_current",
+ .test_cases = kunit_current_test_cases,
+};
+
+kunit_test_suites(&kunit_try_catch_test_suite, &kunit_resource_test_suite,
+ &kunit_log_test_suite, &kunit_status_test_suite,
+ &kunit_current_test_suite, &kunit_device_test_suite);
+
+MODULE_LICENSE("GPL v2");
diff --git a/lib/kunit/resource.c b/lib/kunit/resource.c
new file mode 100644
index 000000000000..f0209252b179
--- /dev/null
+++ b/lib/kunit/resource.c
@@ -0,0 +1,178 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * KUnit resource API for test managed resources (allocations, etc.).
+ *
+ * Copyright (C) 2022, Google LLC.
+ * Author: Daniel Latypov <dlatypov@google.com>
+ */
+
+#include <kunit/resource.h>
+#include <kunit/test.h>
+#include <linux/kref.h>
+
+/*
+ * Used for static resources and when a kunit_resource * has been created by
+ * kunit_alloc_resource(). When an init function is supplied, @data is passed
+ * into the init function; otherwise, we simply set the resource data field to
+ * the data value passed in. Doesn't initialize res->should_kfree.
+ */
+int __kunit_add_resource(struct kunit *test,
+ kunit_resource_init_t init,
+ kunit_resource_free_t free,
+ struct kunit_resource *res,
+ void *data)
+{
+ int ret = 0;
+ unsigned long flags;
+
+ res->free = free;
+ kref_init(&res->refcount);
+
+ if (init) {
+ ret = init(res, data);
+ if (ret)
+ return ret;
+ } else {
+ res->data = data;
+ }
+
+ spin_lock_irqsave(&test->lock, flags);
+ list_add_tail(&res->node, &test->resources);
+ /* refcount for list is established by kref_init() */
+ spin_unlock_irqrestore(&test->lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(__kunit_add_resource);
+
+void kunit_remove_resource(struct kunit *test, struct kunit_resource *res)
+{
+ unsigned long flags;
+ bool was_linked;
+
+ spin_lock_irqsave(&test->lock, flags);
+ was_linked = !list_empty(&res->node);
+ list_del_init(&res->node);
+ spin_unlock_irqrestore(&test->lock, flags);
+
+ if (was_linked)
+ kunit_put_resource(res);
+}
+EXPORT_SYMBOL_GPL(kunit_remove_resource);
+
+int kunit_destroy_resource(struct kunit *test, kunit_resource_match_t match,
+ void *match_data)
+{
+ struct kunit_resource *res = kunit_find_resource(test, match,
+ match_data);
+
+ if (!res)
+ return -ENOENT;
+
+ kunit_remove_resource(test, res);
+
+ /* We have a reference also via _find(); drop it. */
+ kunit_put_resource(res);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(kunit_destroy_resource);
+
+struct kunit_action_ctx {
+ struct kunit_resource res;
+ kunit_action_t *func;
+ void *ctx;
+};
+
+static void __kunit_action_free(struct kunit_resource *res)
+{
+ struct kunit_action_ctx *action_ctx = container_of(res, struct kunit_action_ctx, res);
+
+ action_ctx->func(action_ctx->ctx);
+}
+
+
+int kunit_add_action(struct kunit *test, void (*action)(void *), void *ctx)
+{
+ struct kunit_action_ctx *action_ctx;
+
+ KUNIT_ASSERT_NOT_NULL_MSG(test, action, "Tried to action a NULL function!");
+
+ action_ctx = kzalloc(sizeof(*action_ctx), GFP_KERNEL);
+ if (!action_ctx)
+ return -ENOMEM;
+
+ action_ctx->func = action;
+ action_ctx->ctx = ctx;
+
+ action_ctx->res.should_kfree = true;
+ /* As init is NULL, this cannot fail. */
+ __kunit_add_resource(test, NULL, __kunit_action_free, &action_ctx->res, action_ctx);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(kunit_add_action);
+
+int kunit_add_action_or_reset(struct kunit *test, void (*action)(void *),
+ void *ctx)
+{
+ int res = kunit_add_action(test, action, ctx);
+
+ if (res)
+ action(ctx);
+ return res;
+}
+EXPORT_SYMBOL_GPL(kunit_add_action_or_reset);
+
+static bool __kunit_action_match(struct kunit *test,
+ struct kunit_resource *res, void *match_data)
+{
+ struct kunit_action_ctx *match_ctx = (struct kunit_action_ctx *)match_data;
+ struct kunit_action_ctx *res_ctx = container_of(res, struct kunit_action_ctx, res);
+
+ /* Make sure this is a free function. */
+ if (res->free != __kunit_action_free)
+ return false;
+
+ /* Both the function and context data should match. */
+ return (match_ctx->func == res_ctx->func) && (match_ctx->ctx == res_ctx->ctx);
+}
+
+void kunit_remove_action(struct kunit *test,
+ kunit_action_t *action,
+ void *ctx)
+{
+ struct kunit_action_ctx match_ctx;
+ struct kunit_resource *res;
+
+ match_ctx.func = action;
+ match_ctx.ctx = ctx;
+
+ res = kunit_find_resource(test, __kunit_action_match, &match_ctx);
+ if (res) {
+ /* Remove the free function so we don't run the action. */
+ res->free = NULL;
+ kunit_remove_resource(test, res);
+ kunit_put_resource(res);
+ }
+}
+EXPORT_SYMBOL_GPL(kunit_remove_action);
+
+void kunit_release_action(struct kunit *test,
+ kunit_action_t *action,
+ void *ctx)
+{
+ struct kunit_action_ctx match_ctx;
+ struct kunit_resource *res;
+
+ match_ctx.func = action;
+ match_ctx.ctx = ctx;
+
+ res = kunit_find_resource(test, __kunit_action_match, &match_ctx);
+ if (res) {
+ kunit_remove_resource(test, res);
+ /* We have to put() this here, else free won't be called. */
+ kunit_put_resource(res);
+ }
+}
+EXPORT_SYMBOL_GPL(kunit_release_action);
diff --git a/lib/kunit/static_stub.c b/lib/kunit/static_stub.c
new file mode 100644
index 000000000000..92b2cccd5e76
--- /dev/null
+++ b/lib/kunit/static_stub.c
@@ -0,0 +1,123 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * KUnit function redirection (static stubbing) API.
+ *
+ * Copyright (C) 2022, Google LLC.
+ * Author: David Gow <davidgow@google.com>
+ */
+
+#include <kunit/test.h>
+#include <kunit/static_stub.h>
+#include "hooks-impl.h"
+
+
+/* Context for a static stub. This is stored in the resource data. */
+struct kunit_static_stub_ctx {
+ void *real_fn_addr;
+ void *replacement_addr;
+};
+
+static void __kunit_static_stub_resource_free(struct kunit_resource *res)
+{
+ kfree(res->data);
+}
+
+/* Matching function for kunit_find_resource(). match_data is real_fn_addr. */
+static bool __kunit_static_stub_resource_match(struct kunit *test,
+ struct kunit_resource *res,
+ void *match_real_fn_addr)
+{
+ /* This pointer is only valid if res is a static stub resource. */
+ struct kunit_static_stub_ctx *ctx = res->data;
+
+ /* Make sure the resource is a static stub resource. */
+ if (res->free != &__kunit_static_stub_resource_free)
+ return false;
+
+ return ctx->real_fn_addr == match_real_fn_addr;
+}
+
+/* Hook to return the address of the replacement function. */
+void *__kunit_get_static_stub_address_impl(struct kunit *test, void *real_fn_addr)
+{
+ struct kunit_resource *res;
+ struct kunit_static_stub_ctx *ctx;
+ void *replacement_addr;
+
+ res = kunit_find_resource(test,
+ __kunit_static_stub_resource_match,
+ real_fn_addr);
+
+ if (!res)
+ return NULL;
+
+ ctx = res->data;
+ replacement_addr = ctx->replacement_addr;
+ kunit_put_resource(res);
+ return replacement_addr;
+}
+
+void kunit_deactivate_static_stub(struct kunit *test, void *real_fn_addr)
+{
+ struct kunit_resource *res;
+
+ KUNIT_ASSERT_PTR_NE_MSG(test, real_fn_addr, NULL,
+ "Tried to deactivate a NULL stub.");
+
+ /* Look up the existing stub for this function. */
+ res = kunit_find_resource(test,
+ __kunit_static_stub_resource_match,
+ real_fn_addr);
+
+ /* Error out if the stub doesn't exist. */
+ KUNIT_ASSERT_PTR_NE_MSG(test, res, NULL,
+ "Tried to deactivate a nonexistent stub.");
+
+ /* Free the stub. We 'put' twice, as we got a reference
+ * from kunit_find_resource()
+ */
+ kunit_remove_resource(test, res);
+ kunit_put_resource(res);
+}
+EXPORT_SYMBOL_GPL(kunit_deactivate_static_stub);
+
+/* Helper function for kunit_activate_static_stub(). The macro does
+ * typechecking, so use it instead.
+ */
+void __kunit_activate_static_stub(struct kunit *test,
+ void *real_fn_addr,
+ void *replacement_addr)
+{
+ struct kunit_static_stub_ctx *ctx;
+ struct kunit_resource *res;
+
+ KUNIT_ASSERT_PTR_NE_MSG(test, real_fn_addr, NULL,
+ "Tried to activate a stub for function NULL");
+
+ /* If the replacement address is NULL, deactivate the stub. */
+ if (!replacement_addr) {
+ kunit_deactivate_static_stub(test, replacement_addr);
+ return;
+ }
+
+ /* Look up any existing stubs for this function, and replace them. */
+ res = kunit_find_resource(test,
+ __kunit_static_stub_resource_match,
+ real_fn_addr);
+ if (res) {
+ ctx = res->data;
+ ctx->replacement_addr = replacement_addr;
+
+ /* We got an extra reference from find_resource(), so put it. */
+ kunit_put_resource(res);
+ } else {
+ ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+ ctx->real_fn_addr = real_fn_addr;
+ ctx->replacement_addr = replacement_addr;
+ res = kunit_alloc_resource(test, NULL,
+ &__kunit_static_stub_resource_free,
+ GFP_KERNEL, ctx);
+ }
+}
+EXPORT_SYMBOL_GPL(__kunit_activate_static_stub);
diff --git a/lib/kunit/string-stream-test.c b/lib/kunit/string-stream-test.c
new file mode 100644
index 000000000000..03fb511826f7
--- /dev/null
+++ b/lib/kunit/string-stream-test.c
@@ -0,0 +1,544 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * KUnit test for struct string_stream.
+ *
+ * Copyright (C) 2019, Google LLC.
+ * Author: Brendan Higgins <brendanhiggins@google.com>
+ */
+
+#include <kunit/static_stub.h>
+#include <kunit/test.h>
+#include <linux/ktime.h>
+#include <linux/slab.h>
+#include <linux/timekeeping.h>
+
+#include "string-stream.h"
+
+struct string_stream_test_priv {
+ /* For testing resource-managed free. */
+ struct string_stream *expected_free_stream;
+ bool stream_was_freed;
+ bool stream_free_again;
+};
+
+/* Avoids a cast warning if kfree() is passed direct to kunit_add_action(). */
+static void kfree_wrapper(void *p)
+{
+ kfree(p);
+}
+
+/* Avoids a cast warning if string_stream_destroy() is passed direct to kunit_add_action(). */
+static void cleanup_raw_stream(void *p)
+{
+ struct string_stream *stream = p;
+
+ string_stream_destroy(stream);
+}
+
+static char *get_concatenated_string(struct kunit *test, struct string_stream *stream)
+{
+ char *str = string_stream_get_string(stream);
+
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, str);
+ kunit_add_action(test, kfree_wrapper, (void *)str);
+
+ return str;
+}
+
+/* Managed string_stream object is initialized correctly. */
+static void string_stream_managed_init_test(struct kunit *test)
+{
+ struct string_stream *stream;
+
+ /* Resource-managed initialization. */
+ stream = kunit_alloc_string_stream(test, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, stream);
+
+ KUNIT_EXPECT_EQ(test, stream->length, 0);
+ KUNIT_EXPECT_TRUE(test, list_empty(&stream->fragments));
+ KUNIT_EXPECT_TRUE(test, (stream->gfp == GFP_KERNEL));
+ KUNIT_EXPECT_FALSE(test, stream->append_newlines);
+ KUNIT_EXPECT_TRUE(test, string_stream_is_empty(stream));
+}
+
+/* Unmanaged string_stream object is initialized correctly. */
+static void string_stream_unmanaged_init_test(struct kunit *test)
+{
+ struct string_stream *stream;
+
+ stream = alloc_string_stream(GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, stream);
+ kunit_add_action(test, cleanup_raw_stream, stream);
+
+ KUNIT_EXPECT_EQ(test, stream->length, 0);
+ KUNIT_EXPECT_TRUE(test, list_empty(&stream->fragments));
+ KUNIT_EXPECT_TRUE(test, (stream->gfp == GFP_KERNEL));
+ KUNIT_EXPECT_FALSE(test, stream->append_newlines);
+
+ KUNIT_EXPECT_TRUE(test, string_stream_is_empty(stream));
+}
+
+static void string_stream_destroy_stub(struct string_stream *stream)
+{
+ struct kunit *fake_test = kunit_get_current_test();
+ struct string_stream_test_priv *priv = fake_test->priv;
+
+ /* The kunit could own string_streams other than the one we are testing. */
+ if (stream == priv->expected_free_stream) {
+ if (priv->stream_was_freed)
+ priv->stream_free_again = true;
+ else
+ priv->stream_was_freed = true;
+ }
+
+ /*
+ * Calling string_stream_destroy() will only call this function again
+ * because the redirection stub is still active.
+ * Avoid calling deactivate_static_stub() or changing current->kunit_test
+ * during cleanup.
+ */
+ string_stream_clear(stream);
+ kfree(stream);
+}
+
+/* kunit_free_string_stream() calls string_stream_desrtoy() */
+static void string_stream_managed_free_test(struct kunit *test)
+{
+ struct string_stream_test_priv *priv = test->priv;
+
+ priv->expected_free_stream = NULL;
+ priv->stream_was_freed = false;
+ priv->stream_free_again = false;
+
+ kunit_activate_static_stub(test,
+ string_stream_destroy,
+ string_stream_destroy_stub);
+
+ priv->expected_free_stream = kunit_alloc_string_stream(test, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv->expected_free_stream);
+
+ /* This should call the stub function. */
+ kunit_free_string_stream(test, priv->expected_free_stream);
+
+ KUNIT_EXPECT_TRUE(test, priv->stream_was_freed);
+ KUNIT_EXPECT_FALSE(test, priv->stream_free_again);
+}
+
+/* string_stream object is freed when test is cleaned up. */
+static void string_stream_resource_free_test(struct kunit *test)
+{
+ struct string_stream_test_priv *priv = test->priv;
+ struct kunit *fake_test;
+
+ fake_test = kunit_kzalloc(test, sizeof(*fake_test), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, fake_test);
+
+ kunit_init_test(fake_test, "string_stream_fake_test", NULL);
+ fake_test->priv = priv;
+
+ /*
+ * Activate stub before creating string_stream so the
+ * string_stream will be cleaned up first.
+ */
+ priv->expected_free_stream = NULL;
+ priv->stream_was_freed = false;
+ priv->stream_free_again = false;
+
+ kunit_activate_static_stub(fake_test,
+ string_stream_destroy,
+ string_stream_destroy_stub);
+
+ priv->expected_free_stream = kunit_alloc_string_stream(fake_test, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv->expected_free_stream);
+
+ /* Set current->kunit_test to fake_test so the static stub will be called. */
+ current->kunit_test = fake_test;
+
+ /* Cleanup test - the stub function should be called */
+ kunit_cleanup(fake_test);
+
+ /* Set current->kunit_test back to current test. */
+ current->kunit_test = test;
+
+ KUNIT_EXPECT_TRUE(test, priv->stream_was_freed);
+ KUNIT_EXPECT_FALSE(test, priv->stream_free_again);
+}
+
+/*
+ * Add a series of lines to a string_stream. Check that all lines
+ * appear in the correct order and no characters are dropped.
+ */
+static void string_stream_line_add_test(struct kunit *test)
+{
+ struct string_stream *stream;
+ char line[60];
+ char *concat_string, *pos, *string_end;
+ size_t len, total_len;
+ int num_lines, i;
+
+ stream = kunit_alloc_string_stream(test, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, stream);
+
+ /* Add series of sequence numbered lines */
+ total_len = 0;
+ for (i = 0; i < 100; ++i) {
+ len = snprintf(line, sizeof(line),
+ "The quick brown fox jumps over the lazy penguin %d\n", i);
+
+ /* Sanity-check that our test string isn't truncated */
+ KUNIT_ASSERT_LT(test, len, sizeof(line));
+
+ string_stream_add(stream, line);
+ total_len += len;
+ }
+ num_lines = i;
+
+ concat_string = get_concatenated_string(test, stream);
+ KUNIT_EXPECT_NOT_ERR_OR_NULL(test, concat_string);
+ KUNIT_EXPECT_EQ(test, strlen(concat_string), total_len);
+
+ /*
+ * Split the concatenated string at the newlines and check that
+ * all the original added strings are present.
+ */
+ pos = concat_string;
+ for (i = 0; i < num_lines; ++i) {
+ string_end = strchr(pos, '\n');
+ KUNIT_EXPECT_NOT_NULL(test, string_end);
+
+ /* Convert to NULL-terminated string */
+ *string_end = '\0';
+
+ snprintf(line, sizeof(line),
+ "The quick brown fox jumps over the lazy penguin %d", i);
+ KUNIT_EXPECT_STREQ(test, pos, line);
+
+ pos = string_end + 1;
+ }
+
+ /* There shouldn't be any more data after this */
+ KUNIT_EXPECT_EQ(test, strlen(pos), 0);
+}
+
+/* Add a series of lines of variable length to a string_stream. */
+static void string_stream_variable_length_line_test(struct kunit *test)
+{
+ static const char line[] =
+ "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
+ " 0123456789!$%^&*()_-+={}[]:;@'~#<>,.?/|";
+ struct string_stream *stream;
+ struct rnd_state rnd;
+ char *concat_string, *pos, *string_end;
+ size_t offset, total_len;
+ int num_lines, i;
+
+ stream = kunit_alloc_string_stream(test, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, stream);
+
+ /*
+ * Log many lines of varying lengths until we have created
+ * many fragments.
+ * The "randomness" must be repeatable.
+ */
+ prandom_seed_state(&rnd, 3141592653589793238ULL);
+ total_len = 0;
+ for (i = 0; i < 100; ++i) {
+ offset = prandom_u32_state(&rnd) % (sizeof(line) - 1);
+ string_stream_add(stream, "%s\n", &line[offset]);
+ total_len += sizeof(line) - offset;
+ }
+ num_lines = i;
+
+ concat_string = get_concatenated_string(test, stream);
+ KUNIT_EXPECT_NOT_ERR_OR_NULL(test, concat_string);
+ KUNIT_EXPECT_EQ(test, strlen(concat_string), total_len);
+
+ /*
+ * Split the concatenated string at the newlines and check that
+ * all the original added strings are present.
+ */
+ prandom_seed_state(&rnd, 3141592653589793238ULL);
+ pos = concat_string;
+ for (i = 0; i < num_lines; ++i) {
+ string_end = strchr(pos, '\n');
+ KUNIT_EXPECT_NOT_NULL(test, string_end);
+
+ /* Convert to NULL-terminated string */
+ *string_end = '\0';
+
+ offset = prandom_u32_state(&rnd) % (sizeof(line) - 1);
+ KUNIT_EXPECT_STREQ(test, pos, &line[offset]);
+
+ pos = string_end + 1;
+ }
+
+ /* There shouldn't be any more data after this */
+ KUNIT_EXPECT_EQ(test, strlen(pos), 0);
+}
+
+/* Appending the content of one string stream to another. */
+static void string_stream_append_test(struct kunit *test)
+{
+ static const char * const strings_1[] = {
+ "one", "two", "three", "four", "five", "six",
+ "seven", "eight", "nine", "ten",
+ };
+ static const char * const strings_2[] = {
+ "Apple", "Pear", "Orange", "Banana", "Grape", "Apricot",
+ };
+ struct string_stream *stream_1, *stream_2;
+ const char *stream1_content_before_append, *stream_2_content;
+ char *combined_content;
+ size_t combined_length;
+ int i;
+
+ stream_1 = kunit_alloc_string_stream(test, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, stream_1);
+
+ stream_2 = kunit_alloc_string_stream(test, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, stream_2);
+
+ /* Append content of empty stream to empty stream */
+ string_stream_append(stream_1, stream_2);
+ KUNIT_EXPECT_EQ(test, strlen(get_concatenated_string(test, stream_1)), 0);
+
+ /* Add some data to stream_1 */
+ for (i = 0; i < ARRAY_SIZE(strings_1); ++i)
+ string_stream_add(stream_1, "%s\n", strings_1[i]);
+
+ stream1_content_before_append = get_concatenated_string(test, stream_1);
+
+ /* Append content of empty stream to non-empty stream */
+ string_stream_append(stream_1, stream_2);
+ KUNIT_EXPECT_STREQ(test, get_concatenated_string(test, stream_1),
+ stream1_content_before_append);
+
+ /* Add some data to stream_2 */
+ for (i = 0; i < ARRAY_SIZE(strings_2); ++i)
+ string_stream_add(stream_2, "%s\n", strings_2[i]);
+
+ /* Append content of non-empty stream to non-empty stream */
+ string_stream_append(stream_1, stream_2);
+
+ /*
+ * End result should be the original content of stream_1 plus
+ * the content of stream_2.
+ */
+ stream_2_content = get_concatenated_string(test, stream_2);
+ combined_length = strlen(stream1_content_before_append) + strlen(stream_2_content);
+ combined_length++; /* for terminating \0 */
+ combined_content = kunit_kmalloc(test, combined_length, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, combined_content);
+ snprintf(combined_content, combined_length, "%s%s",
+ stream1_content_before_append, stream_2_content);
+
+ KUNIT_EXPECT_STREQ(test, get_concatenated_string(test, stream_1), combined_content);
+
+ /* Append content of non-empty stream to empty stream */
+ kunit_free_string_stream(test, stream_1);
+
+ stream_1 = kunit_alloc_string_stream(test, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, stream_1);
+
+ string_stream_append(stream_1, stream_2);
+ KUNIT_EXPECT_STREQ(test, get_concatenated_string(test, stream_1), stream_2_content);
+}
+
+/* Appending the content of one string stream to one with auto-newlining. */
+static void string_stream_append_auto_newline_test(struct kunit *test)
+{
+ struct string_stream *stream_1, *stream_2;
+
+ /* Stream 1 has newline appending enabled */
+ stream_1 = kunit_alloc_string_stream(test, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, stream_1);
+ string_stream_set_append_newlines(stream_1, true);
+ KUNIT_EXPECT_TRUE(test, stream_1->append_newlines);
+
+ /* Stream 2 does not append newlines */
+ stream_2 = kunit_alloc_string_stream(test, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, stream_2);
+
+ /* Appending a stream with a newline should not add another newline */
+ string_stream_add(stream_1, "Original string\n");
+ string_stream_add(stream_2, "Appended content\n");
+ string_stream_add(stream_2, "More stuff\n");
+ string_stream_append(stream_1, stream_2);
+ KUNIT_EXPECT_STREQ(test, get_concatenated_string(test, stream_1),
+ "Original string\nAppended content\nMore stuff\n");
+
+ kunit_free_string_stream(test, stream_2);
+ stream_2 = kunit_alloc_string_stream(test, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, stream_2);
+
+ /*
+ * Appending a stream without newline should add a final newline.
+ * The appended string_stream is treated as a single string so newlines
+ * should not be inserted between fragments.
+ */
+ string_stream_add(stream_2, "Another");
+ string_stream_add(stream_2, "And again");
+ string_stream_append(stream_1, stream_2);
+ KUNIT_EXPECT_STREQ(test, get_concatenated_string(test, stream_1),
+ "Original string\nAppended content\nMore stuff\nAnotherAnd again\n");
+}
+
+/* Adding an empty string should not create a fragment. */
+static void string_stream_append_empty_string_test(struct kunit *test)
+{
+ struct string_stream *stream;
+ int original_frag_count;
+
+ stream = kunit_alloc_string_stream(test, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, stream);
+
+ /* Formatted empty string */
+ string_stream_add(stream, "%s", "");
+ KUNIT_EXPECT_TRUE(test, string_stream_is_empty(stream));
+ KUNIT_EXPECT_TRUE(test, list_empty(&stream->fragments));
+
+ /* Adding an empty string to a non-empty stream */
+ string_stream_add(stream, "Add this line");
+ original_frag_count = list_count_nodes(&stream->fragments);
+
+ string_stream_add(stream, "%s", "");
+ KUNIT_EXPECT_EQ(test, list_count_nodes(&stream->fragments), original_frag_count);
+ KUNIT_EXPECT_STREQ(test, get_concatenated_string(test, stream), "Add this line");
+}
+
+/* Adding strings without automatic newline appending */
+static void string_stream_no_auto_newline_test(struct kunit *test)
+{
+ struct string_stream *stream;
+
+ stream = kunit_alloc_string_stream(test, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, stream);
+
+ /*
+ * Add some strings with and without newlines. All formatted newlines
+ * should be preserved. It should not add any extra newlines.
+ */
+ string_stream_add(stream, "One");
+ string_stream_add(stream, "Two\n");
+ string_stream_add(stream, "%s\n", "Three");
+ string_stream_add(stream, "%s", "Four\n");
+ string_stream_add(stream, "Five\n%s", "Six");
+ string_stream_add(stream, "Seven\n\n");
+ string_stream_add(stream, "Eight");
+ KUNIT_EXPECT_STREQ(test, get_concatenated_string(test, stream),
+ "OneTwo\nThree\nFour\nFive\nSixSeven\n\nEight");
+}
+
+/* Adding strings with automatic newline appending */
+static void string_stream_auto_newline_test(struct kunit *test)
+{
+ struct string_stream *stream;
+
+ stream = kunit_alloc_string_stream(test, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, stream);
+
+ string_stream_set_append_newlines(stream, true);
+ KUNIT_EXPECT_TRUE(test, stream->append_newlines);
+
+ /*
+ * Add some strings with and without newlines. Newlines should
+ * be appended to lines that do not end with \n, but newlines
+ * resulting from the formatting should not be changed.
+ */
+ string_stream_add(stream, "One");
+ string_stream_add(stream, "Two\n");
+ string_stream_add(stream, "%s\n", "Three");
+ string_stream_add(stream, "%s", "Four\n");
+ string_stream_add(stream, "Five\n%s", "Six");
+ string_stream_add(stream, "Seven\n\n");
+ string_stream_add(stream, "Eight");
+ KUNIT_EXPECT_STREQ(test, get_concatenated_string(test, stream),
+ "One\nTwo\nThree\nFour\nFive\nSix\nSeven\n\nEight\n");
+}
+
+/*
+ * This doesn't actually "test" anything. It reports time taken
+ * and memory used for logging a large number of lines.
+ */
+static void string_stream_performance_test(struct kunit *test)
+{
+ struct string_stream_fragment *frag_container;
+ struct string_stream *stream;
+ char test_line[101];
+ ktime_t start_time, end_time;
+ size_t len, bytes_requested, actual_bytes_used, total_string_length;
+ int offset, i;
+
+ stream = kunit_alloc_string_stream(test, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, stream);
+
+ memset(test_line, 'x', sizeof(test_line) - 1);
+ test_line[sizeof(test_line) - 1] = '\0';
+
+ start_time = ktime_get();
+ for (i = 0; i < 10000; i++) {
+ offset = i % (sizeof(test_line) - 1);
+ string_stream_add(stream, "%s: %d\n", &test_line[offset], i);
+ }
+ end_time = ktime_get();
+
+ /*
+ * Calculate memory used. This doesn't include invisible
+ * overhead due to kernel allocator fragment size rounding.
+ */
+ bytes_requested = sizeof(*stream);
+ actual_bytes_used = ksize(stream);
+ total_string_length = 0;
+
+ list_for_each_entry(frag_container, &stream->fragments, node) {
+ bytes_requested += sizeof(*frag_container);
+ actual_bytes_used += ksize(frag_container);
+
+ len = strlen(frag_container->fragment);
+ total_string_length += len;
+ bytes_requested += len + 1; /* +1 for '\0' */
+ actual_bytes_used += ksize(frag_container->fragment);
+ }
+
+ kunit_info(test, "Time elapsed: %lld us\n",
+ ktime_us_delta(end_time, start_time));
+ kunit_info(test, "Total string length: %zu\n", total_string_length);
+ kunit_info(test, "Bytes requested: %zu\n", bytes_requested);
+ kunit_info(test, "Actual bytes allocated: %zu\n", actual_bytes_used);
+}
+
+static int string_stream_test_init(struct kunit *test)
+{
+ struct string_stream_test_priv *priv;
+
+ priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ test->priv = priv;
+
+ return 0;
+}
+
+static struct kunit_case string_stream_test_cases[] = {
+ KUNIT_CASE(string_stream_managed_init_test),
+ KUNIT_CASE(string_stream_unmanaged_init_test),
+ KUNIT_CASE(string_stream_managed_free_test),
+ KUNIT_CASE(string_stream_resource_free_test),
+ KUNIT_CASE(string_stream_line_add_test),
+ KUNIT_CASE(string_stream_variable_length_line_test),
+ KUNIT_CASE(string_stream_append_test),
+ KUNIT_CASE(string_stream_append_auto_newline_test),
+ KUNIT_CASE(string_stream_append_empty_string_test),
+ KUNIT_CASE(string_stream_no_auto_newline_test),
+ KUNIT_CASE(string_stream_auto_newline_test),
+ KUNIT_CASE(string_stream_performance_test),
+ {}
+};
+
+static struct kunit_suite string_stream_test_suite = {
+ .name = "string-stream-test",
+ .test_cases = string_stream_test_cases,
+ .init = string_stream_test_init,
+};
+kunit_test_suites(&string_stream_test_suite);
diff --git a/lib/kunit/string-stream.c b/lib/kunit/string-stream.c
new file mode 100644
index 000000000000..54f4fdcbfac8
--- /dev/null
+++ b/lib/kunit/string-stream.c
@@ -0,0 +1,207 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * C++ stream style string builder used in KUnit for building messages.
+ *
+ * Copyright (C) 2019, Google LLC.
+ * Author: Brendan Higgins <brendanhiggins@google.com>
+ */
+
+#include <kunit/static_stub.h>
+#include <kunit/test.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+
+#include "string-stream.h"
+
+
+static struct string_stream_fragment *alloc_string_stream_fragment(int len, gfp_t gfp)
+{
+ struct string_stream_fragment *frag;
+
+ frag = kzalloc(sizeof(*frag), gfp);
+ if (!frag)
+ return ERR_PTR(-ENOMEM);
+
+ frag->fragment = kmalloc(len, gfp);
+ if (!frag->fragment) {
+ kfree(frag);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ return frag;
+}
+
+static void string_stream_fragment_destroy(struct string_stream_fragment *frag)
+{
+ list_del(&frag->node);
+ kfree(frag->fragment);
+ kfree(frag);
+}
+
+int string_stream_vadd(struct string_stream *stream,
+ const char *fmt,
+ va_list args)
+{
+ struct string_stream_fragment *frag_container;
+ int buf_len, result_len;
+ va_list args_for_counting;
+
+ /* Make a copy because `vsnprintf` could change it */
+ va_copy(args_for_counting, args);
+
+ /* Evaluate length of formatted string */
+ buf_len = vsnprintf(NULL, 0, fmt, args_for_counting);
+
+ va_end(args_for_counting);
+
+ if (buf_len == 0)
+ return 0;
+
+ /* Reserve one extra for possible appended newline. */
+ if (stream->append_newlines)
+ buf_len++;
+
+ /* Need space for null byte. */
+ buf_len++;
+
+ frag_container = alloc_string_stream_fragment(buf_len, stream->gfp);
+ if (IS_ERR(frag_container))
+ return PTR_ERR(frag_container);
+
+ if (stream->append_newlines) {
+ /* Don't include reserved newline byte in writeable length. */
+ result_len = vsnprintf(frag_container->fragment, buf_len - 1, fmt, args);
+
+ /* Append newline if necessary. */
+ if (frag_container->fragment[result_len - 1] != '\n')
+ result_len = strlcat(frag_container->fragment, "\n", buf_len);
+ } else {
+ result_len = vsnprintf(frag_container->fragment, buf_len, fmt, args);
+ }
+
+ spin_lock(&stream->lock);
+ stream->length += result_len;
+ list_add_tail(&frag_container->node, &stream->fragments);
+ spin_unlock(&stream->lock);
+
+ return 0;
+}
+
+int string_stream_add(struct string_stream *stream, const char *fmt, ...)
+{
+ va_list args;
+ int result;
+
+ va_start(args, fmt);
+ result = string_stream_vadd(stream, fmt, args);
+ va_end(args);
+
+ return result;
+}
+
+void string_stream_clear(struct string_stream *stream)
+{
+ struct string_stream_fragment *frag_container, *frag_container_safe;
+
+ spin_lock(&stream->lock);
+ list_for_each_entry_safe(frag_container,
+ frag_container_safe,
+ &stream->fragments,
+ node) {
+ string_stream_fragment_destroy(frag_container);
+ }
+ stream->length = 0;
+ spin_unlock(&stream->lock);
+}
+
+char *string_stream_get_string(struct string_stream *stream)
+{
+ struct string_stream_fragment *frag_container;
+ size_t buf_len = stream->length + 1; /* +1 for null byte. */
+ char *buf;
+
+ buf = kzalloc(buf_len, stream->gfp);
+ if (!buf)
+ return NULL;
+
+ spin_lock(&stream->lock);
+ list_for_each_entry(frag_container, &stream->fragments, node)
+ strlcat(buf, frag_container->fragment, buf_len);
+ spin_unlock(&stream->lock);
+
+ return buf;
+}
+
+int string_stream_append(struct string_stream *stream,
+ struct string_stream *other)
+{
+ const char *other_content;
+ int ret;
+
+ other_content = string_stream_get_string(other);
+
+ if (!other_content)
+ return -ENOMEM;
+
+ ret = string_stream_add(stream, other_content);
+ kfree(other_content);
+
+ return ret;
+}
+
+bool string_stream_is_empty(struct string_stream *stream)
+{
+ return list_empty(&stream->fragments);
+}
+
+struct string_stream *alloc_string_stream(gfp_t gfp)
+{
+ struct string_stream *stream;
+
+ stream = kzalloc(sizeof(*stream), gfp);
+ if (!stream)
+ return ERR_PTR(-ENOMEM);
+
+ stream->gfp = gfp;
+ INIT_LIST_HEAD(&stream->fragments);
+ spin_lock_init(&stream->lock);
+
+ return stream;
+}
+
+void string_stream_destroy(struct string_stream *stream)
+{
+ KUNIT_STATIC_STUB_REDIRECT(string_stream_destroy, stream);
+
+ if (IS_ERR_OR_NULL(stream))
+ return;
+
+ string_stream_clear(stream);
+ kfree(stream);
+}
+
+static void resource_free_string_stream(void *p)
+{
+ struct string_stream *stream = p;
+
+ string_stream_destroy(stream);
+}
+
+struct string_stream *kunit_alloc_string_stream(struct kunit *test, gfp_t gfp)
+{
+ struct string_stream *stream;
+
+ stream = alloc_string_stream(gfp);
+ if (IS_ERR(stream))
+ return stream;
+
+ if (kunit_add_action_or_reset(test, resource_free_string_stream, stream) != 0)
+ return ERR_PTR(-ENOMEM);
+
+ return stream;
+}
+
+void kunit_free_string_stream(struct kunit *test, struct string_stream *stream)
+{
+ kunit_release_action(test, resource_free_string_stream, (void *)stream);
+}
diff --git a/lib/kunit/string-stream.h b/lib/kunit/string-stream.h
new file mode 100644
index 000000000000..7be2450c7079
--- /dev/null
+++ b/lib/kunit/string-stream.h
@@ -0,0 +1,62 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * C++ stream style string builder used in KUnit for building messages.
+ *
+ * Copyright (C) 2019, Google LLC.
+ * Author: Brendan Higgins <brendanhiggins@google.com>
+ */
+
+#ifndef _KUNIT_STRING_STREAM_H
+#define _KUNIT_STRING_STREAM_H
+
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/stdarg.h>
+
+struct string_stream_fragment {
+ struct list_head node;
+ char *fragment;
+};
+
+struct string_stream {
+ size_t length;
+ struct list_head fragments;
+ /* length and fragments are protected by this lock */
+ spinlock_t lock;
+ gfp_t gfp;
+ bool append_newlines;
+};
+
+struct kunit;
+
+struct string_stream *kunit_alloc_string_stream(struct kunit *test, gfp_t gfp);
+void kunit_free_string_stream(struct kunit *test, struct string_stream *stream);
+
+struct string_stream *alloc_string_stream(gfp_t gfp);
+void free_string_stream(struct string_stream *stream);
+
+int __printf(2, 3) string_stream_add(struct string_stream *stream,
+ const char *fmt, ...);
+
+int __printf(2, 0) string_stream_vadd(struct string_stream *stream,
+ const char *fmt,
+ va_list args);
+
+void string_stream_clear(struct string_stream *stream);
+
+char *string_stream_get_string(struct string_stream *stream);
+
+int string_stream_append(struct string_stream *stream,
+ struct string_stream *other);
+
+bool string_stream_is_empty(struct string_stream *stream);
+
+void string_stream_destroy(struct string_stream *stream);
+
+static inline void string_stream_set_append_newlines(struct string_stream *stream,
+ bool append_newlines)
+{
+ stream->append_newlines = append_newlines;
+}
+
+#endif /* _KUNIT_STRING_STREAM_H */
diff --git a/lib/kunit/test.c b/lib/kunit/test.c
new file mode 100644
index 000000000000..1d1475578515
--- /dev/null
+++ b/lib/kunit/test.c
@@ -0,0 +1,938 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Base unit test (KUnit) API.
+ *
+ * Copyright (C) 2019, Google LLC.
+ * Author: Brendan Higgins <brendanhiggins@google.com>
+ */
+
+#include <kunit/resource.h>
+#include <kunit/test.h>
+#include <kunit/test-bug.h>
+#include <kunit/attributes.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/mutex.h>
+#include <linux/panic.h>
+#include <linux/sched/debug.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+
+#include "debugfs.h"
+#include "device-impl.h"
+#include "hooks-impl.h"
+#include "string-stream.h"
+#include "try-catch-impl.h"
+
+static DEFINE_MUTEX(kunit_run_lock);
+
+/*
+ * Hook to fail the current test and print an error message to the log.
+ */
+void __printf(3, 4) __kunit_fail_current_test_impl(const char *file, int line, const char *fmt, ...)
+{
+ va_list args;
+ int len;
+ char *buffer;
+
+ if (!current->kunit_test)
+ return;
+
+ kunit_set_failure(current->kunit_test);
+
+ /* kunit_err() only accepts literals, so evaluate the args first. */
+ va_start(args, fmt);
+ len = vsnprintf(NULL, 0, fmt, args) + 1;
+ va_end(args);
+
+ buffer = kunit_kmalloc(current->kunit_test, len, GFP_KERNEL);
+ if (!buffer)
+ return;
+
+ va_start(args, fmt);
+ vsnprintf(buffer, len, fmt, args);
+ va_end(args);
+
+ kunit_err(current->kunit_test, "%s:%d: %s", file, line, buffer);
+ kunit_kfree(current->kunit_test, buffer);
+}
+
+/*
+ * Enable KUnit tests to run.
+ */
+#ifdef CONFIG_KUNIT_DEFAULT_ENABLED
+static bool enable_param = true;
+#else
+static bool enable_param;
+#endif
+module_param_named(enable, enable_param, bool, 0);
+MODULE_PARM_DESC(enable, "Enable KUnit tests");
+
+/*
+ * KUnit statistic mode:
+ * 0 - disabled
+ * 1 - only when there is more than one subtest
+ * 2 - enabled
+ */
+static int kunit_stats_enabled = 1;
+module_param_named(stats_enabled, kunit_stats_enabled, int, 0644);
+MODULE_PARM_DESC(stats_enabled,
+ "Print test stats: never (0), only for multiple subtests (1), or always (2)");
+
+struct kunit_result_stats {
+ unsigned long passed;
+ unsigned long skipped;
+ unsigned long failed;
+ unsigned long total;
+};
+
+static bool kunit_should_print_stats(struct kunit_result_stats stats)
+{
+ if (kunit_stats_enabled == 0)
+ return false;
+
+ if (kunit_stats_enabled == 2)
+ return true;
+
+ return (stats.total > 1);
+}
+
+static void kunit_print_test_stats(struct kunit *test,
+ struct kunit_result_stats stats)
+{
+ if (!kunit_should_print_stats(stats))
+ return;
+
+ kunit_log(KERN_INFO, test,
+ KUNIT_SUBTEST_INDENT
+ "# %s: pass:%lu fail:%lu skip:%lu total:%lu",
+ test->name,
+ stats.passed,
+ stats.failed,
+ stats.skipped,
+ stats.total);
+}
+
+/* Append formatted message to log. */
+void kunit_log_append(struct string_stream *log, const char *fmt, ...)
+{
+ va_list args;
+
+ if (!log)
+ return;
+
+ va_start(args, fmt);
+ string_stream_vadd(log, fmt, args);
+ va_end(args);
+}
+EXPORT_SYMBOL_GPL(kunit_log_append);
+
+size_t kunit_suite_num_test_cases(struct kunit_suite *suite)
+{
+ struct kunit_case *test_case;
+ size_t len = 0;
+
+ kunit_suite_for_each_test_case(suite, test_case)
+ len++;
+
+ return len;
+}
+EXPORT_SYMBOL_GPL(kunit_suite_num_test_cases);
+
+/* Currently supported test levels */
+enum {
+ KUNIT_LEVEL_SUITE = 0,
+ KUNIT_LEVEL_CASE,
+ KUNIT_LEVEL_CASE_PARAM,
+};
+
+static void kunit_print_suite_start(struct kunit_suite *suite)
+{
+ /*
+ * We do not log the test suite header as doing so would
+ * mean debugfs display would consist of the test suite
+ * header prior to individual test results.
+ * Hence directly printk the suite status, and we will
+ * separately seq_printf() the suite header for the debugfs
+ * representation.
+ */
+ pr_info(KUNIT_SUBTEST_INDENT "KTAP version 1\n");
+ pr_info(KUNIT_SUBTEST_INDENT "# Subtest: %s\n",
+ suite->name);
+ kunit_print_attr((void *)suite, false, KUNIT_LEVEL_CASE);
+ pr_info(KUNIT_SUBTEST_INDENT "1..%zd\n",
+ kunit_suite_num_test_cases(suite));
+}
+
+static void kunit_print_ok_not_ok(struct kunit *test,
+ unsigned int test_level,
+ enum kunit_status status,
+ size_t test_number,
+ const char *description,
+ const char *directive)
+{
+ const char *directive_header = (status == KUNIT_SKIPPED) ? " # SKIP " : "";
+ const char *directive_body = (status == KUNIT_SKIPPED) ? directive : "";
+
+ /*
+ * When test is NULL assume that results are from the suite
+ * and today suite results are expected at level 0 only.
+ */
+ WARN(!test && test_level, "suite test level can't be %u!\n", test_level);
+
+ /*
+ * We do not log the test suite results as doing so would
+ * mean debugfs display would consist of an incorrect test
+ * number. Hence directly printk the suite result, and we will
+ * separately seq_printf() the suite results for the debugfs
+ * representation.
+ */
+ if (!test)
+ pr_info("%s %zd %s%s%s\n",
+ kunit_status_to_ok_not_ok(status),
+ test_number, description, directive_header,
+ directive_body);
+ else
+ kunit_log(KERN_INFO, test,
+ "%*s%s %zd %s%s%s",
+ KUNIT_INDENT_LEN * test_level, "",
+ kunit_status_to_ok_not_ok(status),
+ test_number, description, directive_header,
+ directive_body);
+}
+
+enum kunit_status kunit_suite_has_succeeded(struct kunit_suite *suite)
+{
+ const struct kunit_case *test_case;
+ enum kunit_status status = KUNIT_SKIPPED;
+
+ if (suite->suite_init_err)
+ return KUNIT_FAILURE;
+
+ kunit_suite_for_each_test_case(suite, test_case) {
+ if (test_case->status == KUNIT_FAILURE)
+ return KUNIT_FAILURE;
+ else if (test_case->status == KUNIT_SUCCESS)
+ status = KUNIT_SUCCESS;
+ }
+
+ return status;
+}
+EXPORT_SYMBOL_GPL(kunit_suite_has_succeeded);
+
+static size_t kunit_suite_counter = 1;
+
+static void kunit_print_suite_end(struct kunit_suite *suite)
+{
+ kunit_print_ok_not_ok(NULL, KUNIT_LEVEL_SUITE,
+ kunit_suite_has_succeeded(suite),
+ kunit_suite_counter++,
+ suite->name,
+ suite->status_comment);
+}
+
+unsigned int kunit_test_case_num(struct kunit_suite *suite,
+ struct kunit_case *test_case)
+{
+ struct kunit_case *tc;
+ unsigned int i = 1;
+
+ kunit_suite_for_each_test_case(suite, tc) {
+ if (tc == test_case)
+ return i;
+ i++;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(kunit_test_case_num);
+
+static void kunit_print_string_stream(struct kunit *test,
+ struct string_stream *stream)
+{
+ struct string_stream_fragment *fragment;
+ char *buf;
+
+ if (string_stream_is_empty(stream))
+ return;
+
+ buf = string_stream_get_string(stream);
+ if (!buf) {
+ kunit_err(test,
+ "Could not allocate buffer, dumping stream:\n");
+ list_for_each_entry(fragment, &stream->fragments, node) {
+ kunit_err(test, "%s", fragment->fragment);
+ }
+ kunit_err(test, "\n");
+ } else {
+ kunit_err(test, "%s", buf);
+ kfree(buf);
+ }
+}
+
+static void kunit_fail(struct kunit *test, const struct kunit_loc *loc,
+ enum kunit_assert_type type, const struct kunit_assert *assert,
+ assert_format_t assert_format, const struct va_format *message)
+{
+ struct string_stream *stream;
+
+ kunit_set_failure(test);
+
+ stream = kunit_alloc_string_stream(test, GFP_KERNEL);
+ if (IS_ERR(stream)) {
+ WARN(true,
+ "Could not allocate stream to print failed assertion in %s:%d\n",
+ loc->file,
+ loc->line);
+ return;
+ }
+
+ kunit_assert_prologue(loc, type, stream);
+ assert_format(assert, message, stream);
+
+ kunit_print_string_stream(test, stream);
+
+ kunit_free_string_stream(test, stream);
+}
+
+void __noreturn __kunit_abort(struct kunit *test)
+{
+ kunit_try_catch_throw(&test->try_catch); /* Does not return. */
+
+ /*
+ * Throw could not abort from test.
+ *
+ * XXX: we should never reach this line! As kunit_try_catch_throw is
+ * marked __noreturn.
+ */
+ WARN_ONCE(true, "Throw could not abort from test!\n");
+}
+EXPORT_SYMBOL_GPL(__kunit_abort);
+
+void __kunit_do_failed_assertion(struct kunit *test,
+ const struct kunit_loc *loc,
+ enum kunit_assert_type type,
+ const struct kunit_assert *assert,
+ assert_format_t assert_format,
+ const char *fmt, ...)
+{
+ va_list args;
+ struct va_format message;
+ va_start(args, fmt);
+
+ message.fmt = fmt;
+ message.va = &args;
+
+ kunit_fail(test, loc, type, assert, assert_format, &message);
+
+ va_end(args);
+}
+EXPORT_SYMBOL_GPL(__kunit_do_failed_assertion);
+
+void kunit_init_test(struct kunit *test, const char *name, struct string_stream *log)
+{
+ spin_lock_init(&test->lock);
+ INIT_LIST_HEAD(&test->resources);
+ test->name = name;
+ test->log = log;
+ if (test->log)
+ string_stream_clear(log);
+ test->status = KUNIT_SUCCESS;
+ test->status_comment[0] = '\0';
+}
+EXPORT_SYMBOL_GPL(kunit_init_test);
+
+/* Only warn when a test takes more than twice the threshold */
+#define KUNIT_SPEED_WARNING_MULTIPLIER 2
+
+/* Slow tests are defined as taking more than 1s */
+#define KUNIT_SPEED_SLOW_THRESHOLD_S 1
+
+#define KUNIT_SPEED_SLOW_WARNING_THRESHOLD_S \
+ (KUNIT_SPEED_WARNING_MULTIPLIER * KUNIT_SPEED_SLOW_THRESHOLD_S)
+
+#define s_to_timespec64(s) ns_to_timespec64((s) * NSEC_PER_SEC)
+
+static void kunit_run_case_check_speed(struct kunit *test,
+ struct kunit_case *test_case,
+ struct timespec64 duration)
+{
+ struct timespec64 slow_thr =
+ s_to_timespec64(KUNIT_SPEED_SLOW_WARNING_THRESHOLD_S);
+ enum kunit_speed speed = test_case->attr.speed;
+
+ if (timespec64_compare(&duration, &slow_thr) < 0)
+ return;
+
+ if (speed == KUNIT_SPEED_VERY_SLOW || speed == KUNIT_SPEED_SLOW)
+ return;
+
+ kunit_warn(test,
+ "Test should be marked slow (runtime: %lld.%09lds)",
+ duration.tv_sec, duration.tv_nsec);
+}
+
+/*
+ * Initializes and runs test case. Does not clean up or do post validations.
+ */
+static void kunit_run_case_internal(struct kunit *test,
+ struct kunit_suite *suite,
+ struct kunit_case *test_case)
+{
+ struct timespec64 start, end;
+
+ if (suite->init) {
+ int ret;
+
+ ret = suite->init(test);
+ if (ret) {
+ kunit_err(test, "failed to initialize: %d\n", ret);
+ kunit_set_failure(test);
+ return;
+ }
+ }
+
+ ktime_get_ts64(&start);
+
+ test_case->run_case(test);
+
+ ktime_get_ts64(&end);
+
+ kunit_run_case_check_speed(test, test_case, timespec64_sub(end, start));
+}
+
+static void kunit_case_internal_cleanup(struct kunit *test)
+{
+ kunit_cleanup(test);
+}
+
+/*
+ * Performs post validations and cleanup after a test case was run.
+ * XXX: Should ONLY BE CALLED AFTER kunit_run_case_internal!
+ */
+static void kunit_run_case_cleanup(struct kunit *test,
+ struct kunit_suite *suite)
+{
+ if (suite->exit)
+ suite->exit(test);
+
+ kunit_case_internal_cleanup(test);
+}
+
+struct kunit_try_catch_context {
+ struct kunit *test;
+ struct kunit_suite *suite;
+ struct kunit_case *test_case;
+};
+
+static void kunit_try_run_case(void *data)
+{
+ struct kunit_try_catch_context *ctx = data;
+ struct kunit *test = ctx->test;
+ struct kunit_suite *suite = ctx->suite;
+ struct kunit_case *test_case = ctx->test_case;
+
+ current->kunit_test = test;
+
+ /*
+ * kunit_run_case_internal may encounter a fatal error; if it does,
+ * abort will be called, this thread will exit, and finally the parent
+ * thread will resume control and handle any necessary clean up.
+ */
+ kunit_run_case_internal(test, suite, test_case);
+}
+
+static void kunit_try_run_case_cleanup(void *data)
+{
+ struct kunit_try_catch_context *ctx = data;
+ struct kunit *test = ctx->test;
+ struct kunit_suite *suite = ctx->suite;
+
+ current->kunit_test = test;
+
+ kunit_run_case_cleanup(test, suite);
+}
+
+static void kunit_catch_run_case_cleanup(void *data)
+{
+ struct kunit_try_catch_context *ctx = data;
+ struct kunit *test = ctx->test;
+ int try_exit_code = kunit_try_catch_get_result(&test->try_catch);
+
+ /* It is always a failure if cleanup aborts. */
+ kunit_set_failure(test);
+
+ if (try_exit_code) {
+ /*
+ * Test case could not finish, we have no idea what state it is
+ * in, so don't do clean up.
+ */
+ if (try_exit_code == -ETIMEDOUT) {
+ kunit_err(test, "test case cleanup timed out\n");
+ /*
+ * Unknown internal error occurred preventing test case from
+ * running, so there is nothing to clean up.
+ */
+ } else {
+ kunit_err(test, "internal error occurred during test case cleanup: %d\n",
+ try_exit_code);
+ }
+ return;
+ }
+
+ kunit_err(test, "test aborted during cleanup. continuing without cleaning up\n");
+}
+
+
+static void kunit_catch_run_case(void *data)
+{
+ struct kunit_try_catch_context *ctx = data;
+ struct kunit *test = ctx->test;
+ int try_exit_code = kunit_try_catch_get_result(&test->try_catch);
+
+ if (try_exit_code) {
+ kunit_set_failure(test);
+ /*
+ * Test case could not finish, we have no idea what state it is
+ * in, so don't do clean up.
+ */
+ if (try_exit_code == -ETIMEDOUT) {
+ kunit_err(test, "test case timed out\n");
+ /*
+ * Unknown internal error occurred preventing test case from
+ * running, so there is nothing to clean up.
+ */
+ } else {
+ kunit_err(test, "internal error occurred preventing test case from running: %d\n",
+ try_exit_code);
+ }
+ return;
+ }
+}
+
+/*
+ * Performs all logic to run a test case. It also catches most errors that
+ * occur in a test case and reports them as failures.
+ */
+static void kunit_run_case_catch_errors(struct kunit_suite *suite,
+ struct kunit_case *test_case,
+ struct kunit *test)
+{
+ struct kunit_try_catch_context context;
+ struct kunit_try_catch *try_catch;
+
+ try_catch = &test->try_catch;
+
+ kunit_try_catch_init(try_catch,
+ test,
+ kunit_try_run_case,
+ kunit_catch_run_case);
+ context.test = test;
+ context.suite = suite;
+ context.test_case = test_case;
+ kunit_try_catch_run(try_catch, &context);
+
+ /* Now run the cleanup */
+ kunit_try_catch_init(try_catch,
+ test,
+ kunit_try_run_case_cleanup,
+ kunit_catch_run_case_cleanup);
+ kunit_try_catch_run(try_catch, &context);
+
+ /* Propagate the parameter result to the test case. */
+ if (test->status == KUNIT_FAILURE)
+ test_case->status = KUNIT_FAILURE;
+ else if (test_case->status != KUNIT_FAILURE && test->status == KUNIT_SUCCESS)
+ test_case->status = KUNIT_SUCCESS;
+}
+
+static void kunit_print_suite_stats(struct kunit_suite *suite,
+ struct kunit_result_stats suite_stats,
+ struct kunit_result_stats param_stats)
+{
+ if (kunit_should_print_stats(suite_stats)) {
+ kunit_log(KERN_INFO, suite,
+ "# %s: pass:%lu fail:%lu skip:%lu total:%lu",
+ suite->name,
+ suite_stats.passed,
+ suite_stats.failed,
+ suite_stats.skipped,
+ suite_stats.total);
+ }
+
+ if (kunit_should_print_stats(param_stats)) {
+ kunit_log(KERN_INFO, suite,
+ "# Totals: pass:%lu fail:%lu skip:%lu total:%lu",
+ param_stats.passed,
+ param_stats.failed,
+ param_stats.skipped,
+ param_stats.total);
+ }
+}
+
+static void kunit_update_stats(struct kunit_result_stats *stats,
+ enum kunit_status status)
+{
+ switch (status) {
+ case KUNIT_SUCCESS:
+ stats->passed++;
+ break;
+ case KUNIT_SKIPPED:
+ stats->skipped++;
+ break;
+ case KUNIT_FAILURE:
+ stats->failed++;
+ break;
+ }
+
+ stats->total++;
+}
+
+static void kunit_accumulate_stats(struct kunit_result_stats *total,
+ struct kunit_result_stats add)
+{
+ total->passed += add.passed;
+ total->skipped += add.skipped;
+ total->failed += add.failed;
+ total->total += add.total;
+}
+
+int kunit_run_tests(struct kunit_suite *suite)
+{
+ char param_desc[KUNIT_PARAM_DESC_SIZE];
+ struct kunit_case *test_case;
+ struct kunit_result_stats suite_stats = { 0 };
+ struct kunit_result_stats total_stats = { 0 };
+
+ /* Taint the kernel so we know we've run tests. */
+ add_taint(TAINT_TEST, LOCKDEP_STILL_OK);
+
+ if (suite->suite_init) {
+ suite->suite_init_err = suite->suite_init(suite);
+ if (suite->suite_init_err) {
+ kunit_err(suite, KUNIT_SUBTEST_INDENT
+ "# failed to initialize (%d)", suite->suite_init_err);
+ goto suite_end;
+ }
+ }
+
+ kunit_print_suite_start(suite);
+
+ kunit_suite_for_each_test_case(suite, test_case) {
+ struct kunit test = { .param_value = NULL, .param_index = 0 };
+ struct kunit_result_stats param_stats = { 0 };
+
+ kunit_init_test(&test, test_case->name, test_case->log);
+ if (test_case->status == KUNIT_SKIPPED) {
+ /* Test marked as skip */
+ test.status = KUNIT_SKIPPED;
+ kunit_update_stats(&param_stats, test.status);
+ } else if (!test_case->generate_params) {
+ /* Non-parameterised test. */
+ test_case->status = KUNIT_SKIPPED;
+ kunit_run_case_catch_errors(suite, test_case, &test);
+ kunit_update_stats(&param_stats, test.status);
+ } else {
+ /* Get initial param. */
+ param_desc[0] = '\0';
+ test.param_value = test_case->generate_params(NULL, param_desc);
+ test_case->status = KUNIT_SKIPPED;
+ kunit_log(KERN_INFO, &test, KUNIT_SUBTEST_INDENT KUNIT_SUBTEST_INDENT
+ "KTAP version 1\n");
+ kunit_log(KERN_INFO, &test, KUNIT_SUBTEST_INDENT KUNIT_SUBTEST_INDENT
+ "# Subtest: %s", test_case->name);
+
+ while (test.param_value) {
+ kunit_run_case_catch_errors(suite, test_case, &test);
+
+ if (param_desc[0] == '\0') {
+ snprintf(param_desc, sizeof(param_desc),
+ "param-%d", test.param_index);
+ }
+
+ kunit_print_ok_not_ok(&test, KUNIT_LEVEL_CASE_PARAM,
+ test.status,
+ test.param_index + 1,
+ param_desc,
+ test.status_comment);
+
+ kunit_update_stats(&param_stats, test.status);
+
+ /* Get next param. */
+ param_desc[0] = '\0';
+ test.param_value = test_case->generate_params(test.param_value, param_desc);
+ test.param_index++;
+ test.status = KUNIT_SUCCESS;
+ test.status_comment[0] = '\0';
+ test.priv = NULL;
+ }
+ }
+
+ kunit_print_attr((void *)test_case, true, KUNIT_LEVEL_CASE);
+
+ kunit_print_test_stats(&test, param_stats);
+
+ kunit_print_ok_not_ok(&test, KUNIT_LEVEL_CASE, test_case->status,
+ kunit_test_case_num(suite, test_case),
+ test_case->name,
+ test.status_comment);
+
+ kunit_update_stats(&suite_stats, test_case->status);
+ kunit_accumulate_stats(&total_stats, param_stats);
+ }
+
+ if (suite->suite_exit)
+ suite->suite_exit(suite);
+
+ kunit_print_suite_stats(suite, suite_stats, total_stats);
+suite_end:
+ kunit_print_suite_end(suite);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(kunit_run_tests);
+
+static void kunit_init_suite(struct kunit_suite *suite)
+{
+ kunit_debugfs_create_suite(suite);
+ suite->status_comment[0] = '\0';
+ suite->suite_init_err = 0;
+
+ if (suite->log)
+ string_stream_clear(suite->log);
+}
+
+bool kunit_enabled(void)
+{
+ return enable_param;
+}
+
+int __kunit_test_suites_init(struct kunit_suite * const * const suites, int num_suites)
+{
+ unsigned int i;
+
+ if (!kunit_enabled() && num_suites > 0) {
+ pr_info("kunit: disabled\n");
+ return 0;
+ }
+
+ kunit_suite_counter = 1;
+
+ /* Use mutex lock to guard against running tests concurrently. */
+ if (mutex_lock_interruptible(&kunit_run_lock)) {
+ pr_err("kunit: test interrupted\n");
+ return -EINTR;
+ }
+ static_branch_inc(&kunit_running);
+
+ for (i = 0; i < num_suites; i++) {
+ kunit_init_suite(suites[i]);
+ kunit_run_tests(suites[i]);
+ }
+
+ static_branch_dec(&kunit_running);
+ mutex_unlock(&kunit_run_lock);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(__kunit_test_suites_init);
+
+static void kunit_exit_suite(struct kunit_suite *suite)
+{
+ kunit_debugfs_destroy_suite(suite);
+}
+
+void __kunit_test_suites_exit(struct kunit_suite **suites, int num_suites)
+{
+ unsigned int i;
+
+ if (!kunit_enabled())
+ return;
+
+ for (i = 0; i < num_suites; i++)
+ kunit_exit_suite(suites[i]);
+}
+EXPORT_SYMBOL_GPL(__kunit_test_suites_exit);
+
+#ifdef CONFIG_MODULES
+static void kunit_module_init(struct module *mod)
+{
+ struct kunit_suite_set suite_set, filtered_set;
+ struct kunit_suite_set normal_suite_set = {
+ mod->kunit_suites, mod->kunit_suites + mod->num_kunit_suites,
+ };
+ struct kunit_suite_set init_suite_set = {
+ mod->kunit_init_suites, mod->kunit_init_suites + mod->num_kunit_init_suites,
+ };
+ const char *action = kunit_action();
+ int err = 0;
+
+ if (mod->num_kunit_init_suites > 0)
+ suite_set = kunit_merge_suite_sets(init_suite_set, normal_suite_set);
+ else
+ suite_set = normal_suite_set;
+
+ filtered_set = kunit_filter_suites(&suite_set,
+ kunit_filter_glob() ?: "*.*",
+ kunit_filter(), kunit_filter_action(),
+ &err);
+ if (err)
+ pr_err("kunit module: error filtering suites: %d\n", err);
+
+ mod->kunit_suites = (struct kunit_suite **)filtered_set.start;
+ mod->num_kunit_suites = filtered_set.end - filtered_set.start;
+
+ if (mod->num_kunit_init_suites > 0)
+ kfree(suite_set.start);
+
+ if (!action)
+ kunit_exec_run_tests(&filtered_set, false);
+ else if (!strcmp(action, "list"))
+ kunit_exec_list_tests(&filtered_set, false);
+ else if (!strcmp(action, "list_attr"))
+ kunit_exec_list_tests(&filtered_set, true);
+ else
+ pr_err("kunit: unknown action '%s'\n", action);
+}
+
+static void kunit_module_exit(struct module *mod)
+{
+ struct kunit_suite_set suite_set = {
+ mod->kunit_suites, mod->kunit_suites + mod->num_kunit_suites,
+ };
+ const char *action = kunit_action();
+
+ /*
+ * Check if the start address is a valid virtual address to detect
+ * if the module load sequence has failed and the suite set has not
+ * been initialized and filtered.
+ */
+ if (!suite_set.start || !virt_addr_valid(suite_set.start))
+ return;
+
+ if (!action)
+ __kunit_test_suites_exit(mod->kunit_suites,
+ mod->num_kunit_suites);
+
+ kunit_free_suite_set(suite_set);
+}
+
+static int kunit_module_notify(struct notifier_block *nb, unsigned long val,
+ void *data)
+{
+ struct module *mod = data;
+
+ switch (val) {
+ case MODULE_STATE_LIVE:
+ kunit_module_init(mod);
+ break;
+ case MODULE_STATE_GOING:
+ kunit_module_exit(mod);
+ break;
+ case MODULE_STATE_COMING:
+ break;
+ case MODULE_STATE_UNFORMED:
+ break;
+ }
+
+ return 0;
+}
+
+static struct notifier_block kunit_mod_nb = {
+ .notifier_call = kunit_module_notify,
+ .priority = 0,
+};
+#endif
+
+KUNIT_DEFINE_ACTION_WRAPPER(kfree_action_wrapper, kfree, const void *)
+
+void *kunit_kmalloc_array(struct kunit *test, size_t n, size_t size, gfp_t gfp)
+{
+ void *data;
+
+ data = kmalloc_array(n, size, gfp);
+
+ if (!data)
+ return NULL;
+
+ if (kunit_add_action_or_reset(test, kfree_action_wrapper, data) != 0)
+ return NULL;
+
+ return data;
+}
+EXPORT_SYMBOL_GPL(kunit_kmalloc_array);
+
+void kunit_kfree(struct kunit *test, const void *ptr)
+{
+ if (!ptr)
+ return;
+
+ kunit_release_action(test, kfree_action_wrapper, (void *)ptr);
+}
+EXPORT_SYMBOL_GPL(kunit_kfree);
+
+void kunit_cleanup(struct kunit *test)
+{
+ struct kunit_resource *res;
+ unsigned long flags;
+
+ /*
+ * test->resources is a stack - each allocation must be freed in the
+ * reverse order from which it was added since one resource may depend
+ * on another for its entire lifetime.
+ * Also, we cannot use the normal list_for_each constructs, even the
+ * safe ones because *arbitrary* nodes may be deleted when
+ * kunit_resource_free is called; the list_for_each_safe variants only
+ * protect against the current node being deleted, not the next.
+ */
+ while (true) {
+ spin_lock_irqsave(&test->lock, flags);
+ if (list_empty(&test->resources)) {
+ spin_unlock_irqrestore(&test->lock, flags);
+ break;
+ }
+ res = list_last_entry(&test->resources,
+ struct kunit_resource,
+ node);
+ /*
+ * Need to unlock here as a resource may remove another
+ * resource, and this can't happen if the test->lock
+ * is held.
+ */
+ spin_unlock_irqrestore(&test->lock, flags);
+ kunit_remove_resource(test, res);
+ }
+ current->kunit_test = NULL;
+}
+EXPORT_SYMBOL_GPL(kunit_cleanup);
+
+static int __init kunit_init(void)
+{
+ /* Install the KUnit hook functions. */
+ kunit_install_hooks();
+
+ kunit_debugfs_init();
+
+ kunit_bus_init();
+#ifdef CONFIG_MODULES
+ return register_module_notifier(&kunit_mod_nb);
+#else
+ return 0;
+#endif
+}
+late_initcall(kunit_init);
+
+static void __exit kunit_exit(void)
+{
+ memset(&kunit_hooks, 0, sizeof(kunit_hooks));
+#ifdef CONFIG_MODULES
+ unregister_module_notifier(&kunit_mod_nb);
+#endif
+
+ kunit_bus_shutdown();
+
+ kunit_debugfs_cleanup();
+}
+module_exit(kunit_exit);
+
+MODULE_LICENSE("GPL v2");
diff --git a/lib/kunit/try-catch-impl.h b/lib/kunit/try-catch-impl.h
new file mode 100644
index 000000000000..203ba6a5e740
--- /dev/null
+++ b/lib/kunit/try-catch-impl.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Internal kunit try catch implementation to be shared with tests.
+ *
+ * Copyright (C) 2019, Google LLC.
+ * Author: Brendan Higgins <brendanhiggins@google.com>
+ */
+
+#ifndef _KUNIT_TRY_CATCH_IMPL_H
+#define _KUNIT_TRY_CATCH_IMPL_H
+
+#include <kunit/try-catch.h>
+#include <linux/types.h>
+
+struct kunit;
+
+static inline void kunit_try_catch_init(struct kunit_try_catch *try_catch,
+ struct kunit *test,
+ kunit_try_catch_func_t try,
+ kunit_try_catch_func_t catch)
+{
+ try_catch->test = test;
+ try_catch->try = try;
+ try_catch->catch = catch;
+}
+
+#endif /* _KUNIT_TRY_CATCH_IMPL_H */
diff --git a/lib/kunit/try-catch.c b/lib/kunit/try-catch.c
new file mode 100644
index 000000000000..f7825991d576
--- /dev/null
+++ b/lib/kunit/try-catch.c
@@ -0,0 +1,98 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * An API to allow a function, that may fail, to be executed, and recover in a
+ * controlled manner.
+ *
+ * Copyright (C) 2019, Google LLC.
+ * Author: Brendan Higgins <brendanhiggins@google.com>
+ */
+
+#include <kunit/test.h>
+#include <linux/completion.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+
+#include "try-catch-impl.h"
+
+void __noreturn kunit_try_catch_throw(struct kunit_try_catch *try_catch)
+{
+ try_catch->try_result = -EFAULT;
+ kthread_complete_and_exit(try_catch->try_completion, -EFAULT);
+}
+EXPORT_SYMBOL_GPL(kunit_try_catch_throw);
+
+static int kunit_generic_run_threadfn_adapter(void *data)
+{
+ struct kunit_try_catch *try_catch = data;
+
+ try_catch->try(try_catch->context);
+
+ kthread_complete_and_exit(try_catch->try_completion, 0);
+}
+
+static unsigned long kunit_test_timeout(void)
+{
+ /*
+ * TODO(brendanhiggins@google.com): We should probably have some type of
+ * variable timeout here. The only question is what that timeout value
+ * should be.
+ *
+ * The intention has always been, at some point, to be able to label
+ * tests with some type of size bucket (unit/small, integration/medium,
+ * large/system/end-to-end, etc), where each size bucket would get a
+ * default timeout value kind of like what Bazel does:
+ * https://docs.bazel.build/versions/master/be/common-definitions.html#test.size
+ * There is still some debate to be had on exactly how we do this. (For
+ * one, we probably want to have some sort of test runner level
+ * timeout.)
+ *
+ * For more background on this topic, see:
+ * https://mike-bland.com/2011/11/01/small-medium-large.html
+ *
+ * If tests timeout due to exceeding sysctl_hung_task_timeout_secs,
+ * the task will be killed and an oops generated.
+ */
+ return 300 * msecs_to_jiffies(MSEC_PER_SEC); /* 5 min */
+}
+
+void kunit_try_catch_run(struct kunit_try_catch *try_catch, void *context)
+{
+ DECLARE_COMPLETION_ONSTACK(try_completion);
+ struct kunit *test = try_catch->test;
+ struct task_struct *task_struct;
+ int exit_code, time_remaining;
+
+ try_catch->context = context;
+ try_catch->try_completion = &try_completion;
+ try_catch->try_result = 0;
+ task_struct = kthread_run(kunit_generic_run_threadfn_adapter,
+ try_catch,
+ "kunit_try_catch_thread");
+ if (IS_ERR(task_struct)) {
+ try_catch->catch(try_catch->context);
+ return;
+ }
+
+ time_remaining = wait_for_completion_timeout(&try_completion,
+ kunit_test_timeout());
+ if (time_remaining == 0) {
+ kunit_err(test, "try timed out\n");
+ try_catch->try_result = -ETIMEDOUT;
+ kthread_stop(task_struct);
+ }
+
+ exit_code = try_catch->try_result;
+
+ if (!exit_code)
+ return;
+
+ if (exit_code == -EFAULT)
+ try_catch->try_result = 0;
+ else if (exit_code == -EINTR)
+ kunit_err(test, "wake_up_process() was never called\n");
+ else if (exit_code)
+ kunit_err(test, "Unknown error: %d\n", exit_code);
+
+ try_catch->catch(try_catch->context);
+}
+EXPORT_SYMBOL_GPL(kunit_try_catch_run);
diff --git a/lib/kunit_iov_iter.c b/lib/kunit_iov_iter.c
new file mode 100644
index 000000000000..859b67c4d697
--- /dev/null
+++ b/lib/kunit_iov_iter.c
@@ -0,0 +1,777 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* I/O iterator tests. This can only test kernel-backed iterator types.
+ *
+ * Copyright (C) 2023 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+#include <linux/mm.h>
+#include <linux/uio.h>
+#include <linux/bvec.h>
+#include <kunit/test.h>
+
+MODULE_DESCRIPTION("iov_iter testing");
+MODULE_AUTHOR("David Howells <dhowells@redhat.com>");
+MODULE_LICENSE("GPL");
+
+struct kvec_test_range {
+ int from, to;
+};
+
+static const struct kvec_test_range kvec_test_ranges[] = {
+ { 0x00002, 0x00002 },
+ { 0x00027, 0x03000 },
+ { 0x05193, 0x18794 },
+ { 0x20000, 0x20000 },
+ { 0x20000, 0x24000 },
+ { 0x24000, 0x27001 },
+ { 0x29000, 0xffffb },
+ { 0xffffd, 0xffffe },
+ { -1 }
+};
+
+static inline u8 pattern(unsigned long x)
+{
+ return x & 0xff;
+}
+
+static void iov_kunit_unmap(void *data)
+{
+ vunmap(data);
+}
+
+static void *__init iov_kunit_create_buffer(struct kunit *test,
+ struct page ***ppages,
+ size_t npages)
+{
+ struct page **pages;
+ unsigned long got;
+ void *buffer;
+
+ pages = kunit_kcalloc(test, npages, sizeof(struct page *), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, pages);
+ *ppages = pages;
+
+ got = alloc_pages_bulk_array(GFP_KERNEL, npages, pages);
+ if (got != npages) {
+ release_pages(pages, got);
+ KUNIT_ASSERT_EQ(test, got, npages);
+ }
+
+ buffer = vmap(pages, npages, VM_MAP | VM_MAP_PUT_PAGES, PAGE_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buffer);
+
+ kunit_add_action_or_reset(test, iov_kunit_unmap, buffer);
+ return buffer;
+}
+
+static void __init iov_kunit_load_kvec(struct kunit *test,
+ struct iov_iter *iter, int dir,
+ struct kvec *kvec, unsigned int kvmax,
+ void *buffer, size_t bufsize,
+ const struct kvec_test_range *pr)
+{
+ size_t size = 0;
+ int i;
+
+ for (i = 0; i < kvmax; i++, pr++) {
+ if (pr->from < 0)
+ break;
+ KUNIT_ASSERT_GE(test, pr->to, pr->from);
+ KUNIT_ASSERT_LE(test, pr->to, bufsize);
+ kvec[i].iov_base = buffer + pr->from;
+ kvec[i].iov_len = pr->to - pr->from;
+ size += pr->to - pr->from;
+ }
+ KUNIT_ASSERT_LE(test, size, bufsize);
+
+ iov_iter_kvec(iter, dir, kvec, i, size);
+}
+
+/*
+ * Test copying to a ITER_KVEC-type iterator.
+ */
+static void __init iov_kunit_copy_to_kvec(struct kunit *test)
+{
+ const struct kvec_test_range *pr;
+ struct iov_iter iter;
+ struct page **spages, **bpages;
+ struct kvec kvec[8];
+ u8 *scratch, *buffer;
+ size_t bufsize, npages, size, copied;
+ int i, patt;
+
+ bufsize = 0x100000;
+ npages = bufsize / PAGE_SIZE;
+
+ scratch = iov_kunit_create_buffer(test, &spages, npages);
+ for (i = 0; i < bufsize; i++)
+ scratch[i] = pattern(i);
+
+ buffer = iov_kunit_create_buffer(test, &bpages, npages);
+ memset(buffer, 0, bufsize);
+
+ iov_kunit_load_kvec(test, &iter, READ, kvec, ARRAY_SIZE(kvec),
+ buffer, bufsize, kvec_test_ranges);
+ size = iter.count;
+
+ copied = copy_to_iter(scratch, size, &iter);
+
+ KUNIT_EXPECT_EQ(test, copied, size);
+ KUNIT_EXPECT_EQ(test, iter.count, 0);
+ KUNIT_EXPECT_EQ(test, iter.nr_segs, 0);
+
+ /* Build the expected image in the scratch buffer. */
+ patt = 0;
+ memset(scratch, 0, bufsize);
+ for (pr = kvec_test_ranges; pr->from >= 0; pr++)
+ for (i = pr->from; i < pr->to; i++)
+ scratch[i] = pattern(patt++);
+
+ /* Compare the images */
+ for (i = 0; i < bufsize; i++) {
+ KUNIT_EXPECT_EQ_MSG(test, buffer[i], scratch[i], "at i=%x", i);
+ if (buffer[i] != scratch[i])
+ return;
+ }
+
+ KUNIT_SUCCEED();
+}
+
+/*
+ * Test copying from a ITER_KVEC-type iterator.
+ */
+static void __init iov_kunit_copy_from_kvec(struct kunit *test)
+{
+ const struct kvec_test_range *pr;
+ struct iov_iter iter;
+ struct page **spages, **bpages;
+ struct kvec kvec[8];
+ u8 *scratch, *buffer;
+ size_t bufsize, npages, size, copied;
+ int i, j;
+
+ bufsize = 0x100000;
+ npages = bufsize / PAGE_SIZE;
+
+ buffer = iov_kunit_create_buffer(test, &bpages, npages);
+ for (i = 0; i < bufsize; i++)
+ buffer[i] = pattern(i);
+
+ scratch = iov_kunit_create_buffer(test, &spages, npages);
+ memset(scratch, 0, bufsize);
+
+ iov_kunit_load_kvec(test, &iter, WRITE, kvec, ARRAY_SIZE(kvec),
+ buffer, bufsize, kvec_test_ranges);
+ size = min(iter.count, bufsize);
+
+ copied = copy_from_iter(scratch, size, &iter);
+
+ KUNIT_EXPECT_EQ(test, copied, size);
+ KUNIT_EXPECT_EQ(test, iter.count, 0);
+ KUNIT_EXPECT_EQ(test, iter.nr_segs, 0);
+
+ /* Build the expected image in the main buffer. */
+ i = 0;
+ memset(buffer, 0, bufsize);
+ for (pr = kvec_test_ranges; pr->from >= 0; pr++) {
+ for (j = pr->from; j < pr->to; j++) {
+ buffer[i++] = pattern(j);
+ if (i >= bufsize)
+ goto stop;
+ }
+ }
+stop:
+
+ /* Compare the images */
+ for (i = 0; i < bufsize; i++) {
+ KUNIT_EXPECT_EQ_MSG(test, scratch[i], buffer[i], "at i=%x", i);
+ if (scratch[i] != buffer[i])
+ return;
+ }
+
+ KUNIT_SUCCEED();
+}
+
+struct bvec_test_range {
+ int page, from, to;
+};
+
+static const struct bvec_test_range bvec_test_ranges[] = {
+ { 0, 0x0002, 0x0002 },
+ { 1, 0x0027, 0x0893 },
+ { 2, 0x0193, 0x0794 },
+ { 3, 0x0000, 0x1000 },
+ { 4, 0x0000, 0x1000 },
+ { 5, 0x0000, 0x1000 },
+ { 6, 0x0000, 0x0ffb },
+ { 6, 0x0ffd, 0x0ffe },
+ { -1, -1, -1 }
+};
+
+static void __init iov_kunit_load_bvec(struct kunit *test,
+ struct iov_iter *iter, int dir,
+ struct bio_vec *bvec, unsigned int bvmax,
+ struct page **pages, size_t npages,
+ size_t bufsize,
+ const struct bvec_test_range *pr)
+{
+ struct page *can_merge = NULL, *page;
+ size_t size = 0;
+ int i;
+
+ for (i = 0; i < bvmax; i++, pr++) {
+ if (pr->from < 0)
+ break;
+ KUNIT_ASSERT_LT(test, pr->page, npages);
+ KUNIT_ASSERT_LT(test, pr->page * PAGE_SIZE, bufsize);
+ KUNIT_ASSERT_GE(test, pr->from, 0);
+ KUNIT_ASSERT_GE(test, pr->to, pr->from);
+ KUNIT_ASSERT_LE(test, pr->to, PAGE_SIZE);
+
+ page = pages[pr->page];
+ if (pr->from == 0 && pr->from != pr->to && page == can_merge) {
+ i--;
+ bvec[i].bv_len += pr->to;
+ } else {
+ bvec_set_page(&bvec[i], page, pr->to - pr->from, pr->from);
+ }
+
+ size += pr->to - pr->from;
+ if ((pr->to & ~PAGE_MASK) == 0)
+ can_merge = page + pr->to / PAGE_SIZE;
+ else
+ can_merge = NULL;
+ }
+
+ iov_iter_bvec(iter, dir, bvec, i, size);
+}
+
+/*
+ * Test copying to a ITER_BVEC-type iterator.
+ */
+static void __init iov_kunit_copy_to_bvec(struct kunit *test)
+{
+ const struct bvec_test_range *pr;
+ struct iov_iter iter;
+ struct bio_vec bvec[8];
+ struct page **spages, **bpages;
+ u8 *scratch, *buffer;
+ size_t bufsize, npages, size, copied;
+ int i, b, patt;
+
+ bufsize = 0x100000;
+ npages = bufsize / PAGE_SIZE;
+
+ scratch = iov_kunit_create_buffer(test, &spages, npages);
+ for (i = 0; i < bufsize; i++)
+ scratch[i] = pattern(i);
+
+ buffer = iov_kunit_create_buffer(test, &bpages, npages);
+ memset(buffer, 0, bufsize);
+
+ iov_kunit_load_bvec(test, &iter, READ, bvec, ARRAY_SIZE(bvec),
+ bpages, npages, bufsize, bvec_test_ranges);
+ size = iter.count;
+
+ copied = copy_to_iter(scratch, size, &iter);
+
+ KUNIT_EXPECT_EQ(test, copied, size);
+ KUNIT_EXPECT_EQ(test, iter.count, 0);
+ KUNIT_EXPECT_EQ(test, iter.nr_segs, 0);
+
+ /* Build the expected image in the scratch buffer. */
+ b = 0;
+ patt = 0;
+ memset(scratch, 0, bufsize);
+ for (pr = bvec_test_ranges; pr->from >= 0; pr++, b++) {
+ u8 *p = scratch + pr->page * PAGE_SIZE;
+
+ for (i = pr->from; i < pr->to; i++)
+ p[i] = pattern(patt++);
+ }
+
+ /* Compare the images */
+ for (i = 0; i < bufsize; i++) {
+ KUNIT_EXPECT_EQ_MSG(test, buffer[i], scratch[i], "at i=%x", i);
+ if (buffer[i] != scratch[i])
+ return;
+ }
+
+ KUNIT_SUCCEED();
+}
+
+/*
+ * Test copying from a ITER_BVEC-type iterator.
+ */
+static void __init iov_kunit_copy_from_bvec(struct kunit *test)
+{
+ const struct bvec_test_range *pr;
+ struct iov_iter iter;
+ struct bio_vec bvec[8];
+ struct page **spages, **bpages;
+ u8 *scratch, *buffer;
+ size_t bufsize, npages, size, copied;
+ int i, j;
+
+ bufsize = 0x100000;
+ npages = bufsize / PAGE_SIZE;
+
+ buffer = iov_kunit_create_buffer(test, &bpages, npages);
+ for (i = 0; i < bufsize; i++)
+ buffer[i] = pattern(i);
+
+ scratch = iov_kunit_create_buffer(test, &spages, npages);
+ memset(scratch, 0, bufsize);
+
+ iov_kunit_load_bvec(test, &iter, WRITE, bvec, ARRAY_SIZE(bvec),
+ bpages, npages, bufsize, bvec_test_ranges);
+ size = iter.count;
+
+ copied = copy_from_iter(scratch, size, &iter);
+
+ KUNIT_EXPECT_EQ(test, copied, size);
+ KUNIT_EXPECT_EQ(test, iter.count, 0);
+ KUNIT_EXPECT_EQ(test, iter.nr_segs, 0);
+
+ /* Build the expected image in the main buffer. */
+ i = 0;
+ memset(buffer, 0, bufsize);
+ for (pr = bvec_test_ranges; pr->from >= 0; pr++) {
+ size_t patt = pr->page * PAGE_SIZE;
+
+ for (j = pr->from; j < pr->to; j++) {
+ buffer[i++] = pattern(patt + j);
+ if (i >= bufsize)
+ goto stop;
+ }
+ }
+stop:
+
+ /* Compare the images */
+ for (i = 0; i < bufsize; i++) {
+ KUNIT_EXPECT_EQ_MSG(test, scratch[i], buffer[i], "at i=%x", i);
+ if (scratch[i] != buffer[i])
+ return;
+ }
+
+ KUNIT_SUCCEED();
+}
+
+static void iov_kunit_destroy_xarray(void *data)
+{
+ struct xarray *xarray = data;
+
+ xa_destroy(xarray);
+ kfree(xarray);
+}
+
+static void __init iov_kunit_load_xarray(struct kunit *test,
+ struct iov_iter *iter, int dir,
+ struct xarray *xarray,
+ struct page **pages, size_t npages)
+{
+ size_t size = 0;
+ int i;
+
+ for (i = 0; i < npages; i++) {
+ void *x = xa_store(xarray, i, pages[i], GFP_KERNEL);
+
+ KUNIT_ASSERT_FALSE(test, xa_is_err(x));
+ size += PAGE_SIZE;
+ }
+ iov_iter_xarray(iter, dir, xarray, 0, size);
+}
+
+static struct xarray *iov_kunit_create_xarray(struct kunit *test)
+{
+ struct xarray *xarray;
+
+ xarray = kzalloc(sizeof(struct xarray), GFP_KERNEL);
+ xa_init(xarray);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, xarray);
+ kunit_add_action_or_reset(test, iov_kunit_destroy_xarray, xarray);
+ return xarray;
+}
+
+/*
+ * Test copying to a ITER_XARRAY-type iterator.
+ */
+static void __init iov_kunit_copy_to_xarray(struct kunit *test)
+{
+ const struct kvec_test_range *pr;
+ struct iov_iter iter;
+ struct xarray *xarray;
+ struct page **spages, **bpages;
+ u8 *scratch, *buffer;
+ size_t bufsize, npages, size, copied;
+ int i, patt;
+
+ bufsize = 0x100000;
+ npages = bufsize / PAGE_SIZE;
+
+ xarray = iov_kunit_create_xarray(test);
+
+ scratch = iov_kunit_create_buffer(test, &spages, npages);
+ for (i = 0; i < bufsize; i++)
+ scratch[i] = pattern(i);
+
+ buffer = iov_kunit_create_buffer(test, &bpages, npages);
+ memset(buffer, 0, bufsize);
+
+ iov_kunit_load_xarray(test, &iter, READ, xarray, bpages, npages);
+
+ i = 0;
+ for (pr = kvec_test_ranges; pr->from >= 0; pr++) {
+ size = pr->to - pr->from;
+ KUNIT_ASSERT_LE(test, pr->to, bufsize);
+
+ iov_iter_xarray(&iter, READ, xarray, pr->from, size);
+ copied = copy_to_iter(scratch + i, size, &iter);
+
+ KUNIT_EXPECT_EQ(test, copied, size);
+ KUNIT_EXPECT_EQ(test, iter.count, 0);
+ KUNIT_EXPECT_EQ(test, iter.iov_offset, size);
+ i += size;
+ }
+
+ /* Build the expected image in the scratch buffer. */
+ patt = 0;
+ memset(scratch, 0, bufsize);
+ for (pr = kvec_test_ranges; pr->from >= 0; pr++)
+ for (i = pr->from; i < pr->to; i++)
+ scratch[i] = pattern(patt++);
+
+ /* Compare the images */
+ for (i = 0; i < bufsize; i++) {
+ KUNIT_EXPECT_EQ_MSG(test, buffer[i], scratch[i], "at i=%x", i);
+ if (buffer[i] != scratch[i])
+ return;
+ }
+
+ KUNIT_SUCCEED();
+}
+
+/*
+ * Test copying from a ITER_XARRAY-type iterator.
+ */
+static void __init iov_kunit_copy_from_xarray(struct kunit *test)
+{
+ const struct kvec_test_range *pr;
+ struct iov_iter iter;
+ struct xarray *xarray;
+ struct page **spages, **bpages;
+ u8 *scratch, *buffer;
+ size_t bufsize, npages, size, copied;
+ int i, j;
+
+ bufsize = 0x100000;
+ npages = bufsize / PAGE_SIZE;
+
+ xarray = iov_kunit_create_xarray(test);
+
+ buffer = iov_kunit_create_buffer(test, &bpages, npages);
+ for (i = 0; i < bufsize; i++)
+ buffer[i] = pattern(i);
+
+ scratch = iov_kunit_create_buffer(test, &spages, npages);
+ memset(scratch, 0, bufsize);
+
+ iov_kunit_load_xarray(test, &iter, READ, xarray, bpages, npages);
+
+ i = 0;
+ for (pr = kvec_test_ranges; pr->from >= 0; pr++) {
+ size = pr->to - pr->from;
+ KUNIT_ASSERT_LE(test, pr->to, bufsize);
+
+ iov_iter_xarray(&iter, WRITE, xarray, pr->from, size);
+ copied = copy_from_iter(scratch + i, size, &iter);
+
+ KUNIT_EXPECT_EQ(test, copied, size);
+ KUNIT_EXPECT_EQ(test, iter.count, 0);
+ KUNIT_EXPECT_EQ(test, iter.iov_offset, size);
+ i += size;
+ }
+
+ /* Build the expected image in the main buffer. */
+ i = 0;
+ memset(buffer, 0, bufsize);
+ for (pr = kvec_test_ranges; pr->from >= 0; pr++) {
+ for (j = pr->from; j < pr->to; j++) {
+ buffer[i++] = pattern(j);
+ if (i >= bufsize)
+ goto stop;
+ }
+ }
+stop:
+
+ /* Compare the images */
+ for (i = 0; i < bufsize; i++) {
+ KUNIT_EXPECT_EQ_MSG(test, scratch[i], buffer[i], "at i=%x", i);
+ if (scratch[i] != buffer[i])
+ return;
+ }
+
+ KUNIT_SUCCEED();
+}
+
+/*
+ * Test the extraction of ITER_KVEC-type iterators.
+ */
+static void __init iov_kunit_extract_pages_kvec(struct kunit *test)
+{
+ const struct kvec_test_range *pr;
+ struct iov_iter iter;
+ struct page **bpages, *pagelist[8], **pages = pagelist;
+ struct kvec kvec[8];
+ u8 *buffer;
+ ssize_t len;
+ size_t bufsize, size = 0, npages;
+ int i, from;
+
+ bufsize = 0x100000;
+ npages = bufsize / PAGE_SIZE;
+
+ buffer = iov_kunit_create_buffer(test, &bpages, npages);
+
+ iov_kunit_load_kvec(test, &iter, READ, kvec, ARRAY_SIZE(kvec),
+ buffer, bufsize, kvec_test_ranges);
+ size = iter.count;
+
+ pr = kvec_test_ranges;
+ from = pr->from;
+ do {
+ size_t offset0 = LONG_MAX;
+
+ for (i = 0; i < ARRAY_SIZE(pagelist); i++)
+ pagelist[i] = (void *)(unsigned long)0xaa55aa55aa55aa55ULL;
+
+ len = iov_iter_extract_pages(&iter, &pages, 100 * 1024,
+ ARRAY_SIZE(pagelist), 0, &offset0);
+ KUNIT_EXPECT_GE(test, len, 0);
+ if (len < 0)
+ break;
+ KUNIT_EXPECT_GE(test, (ssize_t)offset0, 0);
+ KUNIT_EXPECT_LT(test, offset0, PAGE_SIZE);
+ KUNIT_EXPECT_LE(test, len, size);
+ KUNIT_EXPECT_EQ(test, iter.count, size - len);
+ size -= len;
+
+ if (len == 0)
+ break;
+
+ for (i = 0; i < ARRAY_SIZE(pagelist); i++) {
+ struct page *p;
+ ssize_t part = min_t(ssize_t, len, PAGE_SIZE - offset0);
+ int ix;
+
+ KUNIT_ASSERT_GE(test, part, 0);
+ while (from == pr->to) {
+ pr++;
+ from = pr->from;
+ if (from < 0)
+ goto stop;
+ }
+ ix = from / PAGE_SIZE;
+ KUNIT_ASSERT_LT(test, ix, npages);
+ p = bpages[ix];
+ KUNIT_EXPECT_PTR_EQ(test, pagelist[i], p);
+ KUNIT_EXPECT_EQ(test, offset0, from % PAGE_SIZE);
+ from += part;
+ len -= part;
+ KUNIT_ASSERT_GE(test, len, 0);
+ if (len == 0)
+ break;
+ offset0 = 0;
+ }
+
+ if (test->status == KUNIT_FAILURE)
+ break;
+ } while (iov_iter_count(&iter) > 0);
+
+stop:
+ KUNIT_EXPECT_EQ(test, size, 0);
+ KUNIT_EXPECT_EQ(test, iter.count, 0);
+ KUNIT_SUCCEED();
+}
+
+/*
+ * Test the extraction of ITER_BVEC-type iterators.
+ */
+static void __init iov_kunit_extract_pages_bvec(struct kunit *test)
+{
+ const struct bvec_test_range *pr;
+ struct iov_iter iter;
+ struct page **bpages, *pagelist[8], **pages = pagelist;
+ struct bio_vec bvec[8];
+ ssize_t len;
+ size_t bufsize, size = 0, npages;
+ int i, from;
+
+ bufsize = 0x100000;
+ npages = bufsize / PAGE_SIZE;
+
+ iov_kunit_create_buffer(test, &bpages, npages);
+ iov_kunit_load_bvec(test, &iter, READ, bvec, ARRAY_SIZE(bvec),
+ bpages, npages, bufsize, bvec_test_ranges);
+ size = iter.count;
+
+ pr = bvec_test_ranges;
+ from = pr->from;
+ do {
+ size_t offset0 = LONG_MAX;
+
+ for (i = 0; i < ARRAY_SIZE(pagelist); i++)
+ pagelist[i] = (void *)(unsigned long)0xaa55aa55aa55aa55ULL;
+
+ len = iov_iter_extract_pages(&iter, &pages, 100 * 1024,
+ ARRAY_SIZE(pagelist), 0, &offset0);
+ KUNIT_EXPECT_GE(test, len, 0);
+ if (len < 0)
+ break;
+ KUNIT_EXPECT_GE(test, (ssize_t)offset0, 0);
+ KUNIT_EXPECT_LT(test, offset0, PAGE_SIZE);
+ KUNIT_EXPECT_LE(test, len, size);
+ KUNIT_EXPECT_EQ(test, iter.count, size - len);
+ size -= len;
+
+ if (len == 0)
+ break;
+
+ for (i = 0; i < ARRAY_SIZE(pagelist); i++) {
+ struct page *p;
+ ssize_t part = min_t(ssize_t, len, PAGE_SIZE - offset0);
+ int ix;
+
+ KUNIT_ASSERT_GE(test, part, 0);
+ while (from == pr->to) {
+ pr++;
+ from = pr->from;
+ if (from < 0)
+ goto stop;
+ }
+ ix = pr->page + from / PAGE_SIZE;
+ KUNIT_ASSERT_LT(test, ix, npages);
+ p = bpages[ix];
+ KUNIT_EXPECT_PTR_EQ(test, pagelist[i], p);
+ KUNIT_EXPECT_EQ(test, offset0, from % PAGE_SIZE);
+ from += part;
+ len -= part;
+ KUNIT_ASSERT_GE(test, len, 0);
+ if (len == 0)
+ break;
+ offset0 = 0;
+ }
+
+ if (test->status == KUNIT_FAILURE)
+ break;
+ } while (iov_iter_count(&iter) > 0);
+
+stop:
+ KUNIT_EXPECT_EQ(test, size, 0);
+ KUNIT_EXPECT_EQ(test, iter.count, 0);
+ KUNIT_SUCCEED();
+}
+
+/*
+ * Test the extraction of ITER_XARRAY-type iterators.
+ */
+static void __init iov_kunit_extract_pages_xarray(struct kunit *test)
+{
+ const struct kvec_test_range *pr;
+ struct iov_iter iter;
+ struct xarray *xarray;
+ struct page **bpages, *pagelist[8], **pages = pagelist;
+ ssize_t len;
+ size_t bufsize, size = 0, npages;
+ int i, from;
+
+ bufsize = 0x100000;
+ npages = bufsize / PAGE_SIZE;
+
+ xarray = iov_kunit_create_xarray(test);
+
+ iov_kunit_create_buffer(test, &bpages, npages);
+ iov_kunit_load_xarray(test, &iter, READ, xarray, bpages, npages);
+
+ for (pr = kvec_test_ranges; pr->from >= 0; pr++) {
+ from = pr->from;
+ size = pr->to - from;
+ KUNIT_ASSERT_LE(test, pr->to, bufsize);
+
+ iov_iter_xarray(&iter, WRITE, xarray, from, size);
+
+ do {
+ size_t offset0 = LONG_MAX;
+
+ for (i = 0; i < ARRAY_SIZE(pagelist); i++)
+ pagelist[i] = (void *)(unsigned long)0xaa55aa55aa55aa55ULL;
+
+ len = iov_iter_extract_pages(&iter, &pages, 100 * 1024,
+ ARRAY_SIZE(pagelist), 0, &offset0);
+ KUNIT_EXPECT_GE(test, len, 0);
+ if (len < 0)
+ break;
+ KUNIT_EXPECT_LE(test, len, size);
+ KUNIT_EXPECT_EQ(test, iter.count, size - len);
+ if (len == 0)
+ break;
+ size -= len;
+ KUNIT_EXPECT_GE(test, (ssize_t)offset0, 0);
+ KUNIT_EXPECT_LT(test, offset0, PAGE_SIZE);
+
+ for (i = 0; i < ARRAY_SIZE(pagelist); i++) {
+ struct page *p;
+ ssize_t part = min_t(ssize_t, len, PAGE_SIZE - offset0);
+ int ix;
+
+ KUNIT_ASSERT_GE(test, part, 0);
+ ix = from / PAGE_SIZE;
+ KUNIT_ASSERT_LT(test, ix, npages);
+ p = bpages[ix];
+ KUNIT_EXPECT_PTR_EQ(test, pagelist[i], p);
+ KUNIT_EXPECT_EQ(test, offset0, from % PAGE_SIZE);
+ from += part;
+ len -= part;
+ KUNIT_ASSERT_GE(test, len, 0);
+ if (len == 0)
+ break;
+ offset0 = 0;
+ }
+
+ if (test->status == KUNIT_FAILURE)
+ goto stop;
+ } while (iov_iter_count(&iter) > 0);
+
+ KUNIT_EXPECT_EQ(test, size, 0);
+ KUNIT_EXPECT_EQ(test, iter.count, 0);
+ KUNIT_EXPECT_EQ(test, iter.iov_offset, pr->to - pr->from);
+ }
+
+stop:
+ KUNIT_SUCCEED();
+}
+
+static struct kunit_case __refdata iov_kunit_cases[] = {
+ KUNIT_CASE(iov_kunit_copy_to_kvec),
+ KUNIT_CASE(iov_kunit_copy_from_kvec),
+ KUNIT_CASE(iov_kunit_copy_to_bvec),
+ KUNIT_CASE(iov_kunit_copy_from_bvec),
+ KUNIT_CASE(iov_kunit_copy_to_xarray),
+ KUNIT_CASE(iov_kunit_copy_from_xarray),
+ KUNIT_CASE(iov_kunit_extract_pages_kvec),
+ KUNIT_CASE(iov_kunit_extract_pages_bvec),
+ KUNIT_CASE(iov_kunit_extract_pages_xarray),
+ {}
+};
+
+static struct kunit_suite iov_kunit_suite = {
+ .name = "iov_iter",
+ .test_cases = iov_kunit_cases,
+};
+
+kunit_test_suites(&iov_kunit_suite);
diff --git a/lib/libcrc32c.c b/lib/libcrc32c.c
index 77ab839644c5..649e687413a0 100644
--- a/lib/libcrc32c.c
+++ b/lib/libcrc32c.c
@@ -12,7 +12,7 @@
* pages = {},
* month = {June},
*}
- * Used by the iSCSI driver, possibly others, and derived from the
+ * Used by the iSCSI driver, possibly others, and derived from
* the iscsi-crc.c module of the linux-iscsi driver at
* http://linux-iscsi.sourceforge.net.
*
@@ -65,12 +65,6 @@ static void __exit libcrc32c_mod_fini(void)
crypto_free_shash(tfm);
}
-const char *crc32c_impl(void)
-{
- return crypto_shash_driver_name(tfm);
-}
-EXPORT_SYMBOL(crc32c_impl);
-
module_init(libcrc32c_mod_init);
module_exit(libcrc32c_mod_fini);
diff --git a/lib/linear_ranges.c b/lib/linear_ranges.c
new file mode 100644
index 000000000000..a1a7dfa881de
--- /dev/null
+++ b/lib/linear_ranges.c
@@ -0,0 +1,276 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * helpers to map values in a linear range to range index
+ *
+ * Original idea borrowed from regulator framework
+ *
+ * It might be useful if we could support also inversely proportional ranges?
+ * Copyright 2020 ROHM Semiconductors
+ */
+
+#include <linux/errno.h>
+#include <linux/export.h>
+#include <linux/kernel.h>
+#include <linux/linear_range.h>
+#include <linux/module.h>
+
+/**
+ * linear_range_values_in_range - return the amount of values in a range
+ * @r: pointer to linear range where values are counted
+ *
+ * Compute the amount of values in range pointed by @r. Note, values can
+ * be all equal - range with selectors 0,...,2 with step 0 still contains
+ * 3 values even though they are all equal.
+ *
+ * Return: the amount of values in range pointed by @r
+ */
+unsigned int linear_range_values_in_range(const struct linear_range *r)
+{
+ if (!r)
+ return 0;
+ return r->max_sel - r->min_sel + 1;
+}
+EXPORT_SYMBOL_GPL(linear_range_values_in_range);
+
+/**
+ * linear_range_values_in_range_array - return the amount of values in ranges
+ * @r: pointer to array of linear ranges where values are counted
+ * @ranges: amount of ranges we include in computation.
+ *
+ * Compute the amount of values in ranges pointed by @r. Note, values can
+ * be all equal - range with selectors 0,...,2 with step 0 still contains
+ * 3 values even though they are all equal.
+ *
+ * Return: the amount of values in first @ranges ranges pointed by @r
+ */
+unsigned int linear_range_values_in_range_array(const struct linear_range *r,
+ int ranges)
+{
+ int i, values_in_range = 0;
+
+ for (i = 0; i < ranges; i++) {
+ int values;
+
+ values = linear_range_values_in_range(&r[i]);
+ if (!values)
+ return values;
+
+ values_in_range += values;
+ }
+ return values_in_range;
+}
+EXPORT_SYMBOL_GPL(linear_range_values_in_range_array);
+
+/**
+ * linear_range_get_max_value - return the largest value in a range
+ * @r: pointer to linear range where value is looked from
+ *
+ * Return: the largest value in the given range
+ */
+unsigned int linear_range_get_max_value(const struct linear_range *r)
+{
+ return r->min + (r->max_sel - r->min_sel) * r->step;
+}
+EXPORT_SYMBOL_GPL(linear_range_get_max_value);
+
+/**
+ * linear_range_get_value - fetch a value from given range
+ * @r: pointer to linear range where value is looked from
+ * @selector: selector for which the value is searched
+ * @val: address where found value is updated
+ *
+ * Search given ranges for value which matches given selector.
+ *
+ * Return: 0 on success, -EINVAL given selector is not found from any of the
+ * ranges.
+ */
+int linear_range_get_value(const struct linear_range *r, unsigned int selector,
+ unsigned int *val)
+{
+ if (r->min_sel > selector || r->max_sel < selector)
+ return -EINVAL;
+
+ *val = r->min + (selector - r->min_sel) * r->step;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(linear_range_get_value);
+
+/**
+ * linear_range_get_value_array - fetch a value from array of ranges
+ * @r: pointer to array of linear ranges where value is looked from
+ * @ranges: amount of ranges in an array
+ * @selector: selector for which the value is searched
+ * @val: address where found value is updated
+ *
+ * Search through an array of ranges for value which matches given selector.
+ *
+ * Return: 0 on success, -EINVAL given selector is not found from any of the
+ * ranges.
+ */
+int linear_range_get_value_array(const struct linear_range *r, int ranges,
+ unsigned int selector, unsigned int *val)
+{
+ int i;
+
+ for (i = 0; i < ranges; i++)
+ if (r[i].min_sel <= selector && r[i].max_sel >= selector)
+ return linear_range_get_value(&r[i], selector, val);
+
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(linear_range_get_value_array);
+
+/**
+ * linear_range_get_selector_low - return linear range selector for value
+ * @r: pointer to linear range where selector is looked from
+ * @val: value for which the selector is searched
+ * @selector: address where found selector value is updated
+ * @found: flag to indicate that given value was in the range
+ *
+ * Return selector for which range value is closest match for given
+ * input value. Value is matching if it is equal or smaller than given
+ * value. If given value is in the range, then @found is set true.
+ *
+ * Return: 0 on success, -EINVAL if range is invalid or does not contain
+ * value smaller or equal to given value
+ */
+int linear_range_get_selector_low(const struct linear_range *r,
+ unsigned int val, unsigned int *selector,
+ bool *found)
+{
+ *found = false;
+
+ if (r->min > val)
+ return -EINVAL;
+
+ if (linear_range_get_max_value(r) < val) {
+ *selector = r->max_sel;
+ return 0;
+ }
+
+ *found = true;
+
+ if (r->step == 0)
+ *selector = r->min_sel;
+ else
+ *selector = (val - r->min) / r->step + r->min_sel;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(linear_range_get_selector_low);
+
+/**
+ * linear_range_get_selector_low_array - return linear range selector for value
+ * @r: pointer to array of linear ranges where selector is looked from
+ * @ranges: amount of ranges to scan from array
+ * @val: value for which the selector is searched
+ * @selector: address where found selector value is updated
+ * @found: flag to indicate that given value was in the range
+ *
+ * Scan array of ranges for selector for which range value matches given
+ * input value. Value is matching if it is equal or smaller than given
+ * value. If given value is found to be in a range scanning is stopped and
+ * @found is set true. If a range with values smaller than given value is found
+ * but the range max is being smaller than given value, then the range's
+ * biggest selector is updated to @selector but scanning ranges is continued
+ * and @found is set to false.
+ *
+ * Return: 0 on success, -EINVAL if range array is invalid or does not contain
+ * range with a value smaller or equal to given value
+ */
+int linear_range_get_selector_low_array(const struct linear_range *r,
+ int ranges, unsigned int val,
+ unsigned int *selector, bool *found)
+{
+ int i;
+ int ret = -EINVAL;
+
+ for (i = 0; i < ranges; i++) {
+ int tmpret;
+
+ tmpret = linear_range_get_selector_low(&r[i], val, selector,
+ found);
+ if (!tmpret)
+ ret = 0;
+
+ if (*found)
+ break;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(linear_range_get_selector_low_array);
+
+/**
+ * linear_range_get_selector_high - return linear range selector for value
+ * @r: pointer to linear range where selector is looked from
+ * @val: value for which the selector is searched
+ * @selector: address where found selector value is updated
+ * @found: flag to indicate that given value was in the range
+ *
+ * Return selector for which range value is closest match for given
+ * input value. Value is matching if it is equal or higher than given
+ * value. If given value is in the range, then @found is set true.
+ *
+ * Return: 0 on success, -EINVAL if range is invalid or does not contain
+ * value greater or equal to given value
+ */
+int linear_range_get_selector_high(const struct linear_range *r,
+ unsigned int val, unsigned int *selector,
+ bool *found)
+{
+ *found = false;
+
+ if (linear_range_get_max_value(r) < val)
+ return -EINVAL;
+
+ if (r->min > val) {
+ *selector = r->min_sel;
+ return 0;
+ }
+
+ *found = true;
+
+ if (r->step == 0)
+ *selector = r->max_sel;
+ else
+ *selector = DIV_ROUND_UP(val - r->min, r->step) + r->min_sel;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(linear_range_get_selector_high);
+
+/**
+ * linear_range_get_selector_within - return linear range selector for value
+ * @r: pointer to linear range where selector is looked from
+ * @val: value for which the selector is searched
+ * @selector: address where found selector value is updated
+ *
+ * Return selector for which range value is closest match for given
+ * input value. Value is matching if it is equal or lower than given
+ * value. But return maximum selector if given value is higher than
+ * maximum value.
+ */
+void linear_range_get_selector_within(const struct linear_range *r,
+ unsigned int val, unsigned int *selector)
+{
+ if (r->min > val) {
+ *selector = r->min_sel;
+ return;
+ }
+
+ if (linear_range_get_max_value(r) < val) {
+ *selector = r->max_sel;
+ return;
+ }
+
+ if (r->step == 0)
+ *selector = r->min_sel;
+ else
+ *selector = (val - r->min) / r->step + r->min_sel;
+}
+EXPORT_SYMBOL_GPL(linear_range_get_selector_within);
+
+MODULE_DESCRIPTION("linear-ranges helper");
+MODULE_LICENSE("GPL");
diff --git a/lib/list-test.c b/lib/list-test.c
new file mode 100644
index 000000000000..0cc27de9cec8
--- /dev/null
+++ b/lib/list-test.c
@@ -0,0 +1,1502 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * KUnit test for the Kernel Linked-list structures.
+ *
+ * Copyright (C) 2019, Google LLC.
+ * Author: David Gow <davidgow@google.com>
+ */
+#include <kunit/test.h>
+
+#include <linux/list.h>
+#include <linux/klist.h>
+
+struct list_test_struct {
+ int data;
+ struct list_head list;
+};
+
+static void list_test_list_init(struct kunit *test)
+{
+ /* Test the different ways of initialising a list. */
+ struct list_head list1 = LIST_HEAD_INIT(list1);
+ struct list_head list2;
+ LIST_HEAD(list3);
+ struct list_head *list4;
+ struct list_head *list5;
+
+ INIT_LIST_HEAD(&list2);
+
+ list4 = kzalloc(sizeof(*list4), GFP_KERNEL | __GFP_NOFAIL);
+ INIT_LIST_HEAD(list4);
+
+ list5 = kmalloc(sizeof(*list5), GFP_KERNEL | __GFP_NOFAIL);
+ memset(list5, 0xFF, sizeof(*list5));
+ INIT_LIST_HEAD(list5);
+
+ /* list_empty_careful() checks both next and prev. */
+ KUNIT_EXPECT_TRUE(test, list_empty_careful(&list1));
+ KUNIT_EXPECT_TRUE(test, list_empty_careful(&list2));
+ KUNIT_EXPECT_TRUE(test, list_empty_careful(&list3));
+ KUNIT_EXPECT_TRUE(test, list_empty_careful(list4));
+ KUNIT_EXPECT_TRUE(test, list_empty_careful(list5));
+
+ kfree(list4);
+ kfree(list5);
+}
+
+static void list_test_list_add(struct kunit *test)
+{
+ struct list_head a, b;
+ LIST_HEAD(list);
+
+ list_add(&a, &list);
+ list_add(&b, &list);
+
+ /* should be [list] -> b -> a */
+ KUNIT_EXPECT_PTR_EQ(test, list.next, &b);
+ KUNIT_EXPECT_PTR_EQ(test, b.prev, &list);
+ KUNIT_EXPECT_PTR_EQ(test, b.next, &a);
+}
+
+static void list_test_list_add_tail(struct kunit *test)
+{
+ struct list_head a, b;
+ LIST_HEAD(list);
+
+ list_add_tail(&a, &list);
+ list_add_tail(&b, &list);
+
+ /* should be [list] -> a -> b */
+ KUNIT_EXPECT_PTR_EQ(test, list.next, &a);
+ KUNIT_EXPECT_PTR_EQ(test, a.prev, &list);
+ KUNIT_EXPECT_PTR_EQ(test, a.next, &b);
+}
+
+static void list_test_list_del(struct kunit *test)
+{
+ struct list_head a, b;
+ LIST_HEAD(list);
+
+ list_add_tail(&a, &list);
+ list_add_tail(&b, &list);
+
+ /* before: [list] -> a -> b */
+ list_del(&a);
+
+ /* now: [list] -> b */
+ KUNIT_EXPECT_PTR_EQ(test, list.next, &b);
+ KUNIT_EXPECT_PTR_EQ(test, b.prev, &list);
+}
+
+static void list_test_list_replace(struct kunit *test)
+{
+ struct list_head a_old, a_new, b;
+ LIST_HEAD(list);
+
+ list_add_tail(&a_old, &list);
+ list_add_tail(&b, &list);
+
+ /* before: [list] -> a_old -> b */
+ list_replace(&a_old, &a_new);
+
+ /* now: [list] -> a_new -> b */
+ KUNIT_EXPECT_PTR_EQ(test, list.next, &a_new);
+ KUNIT_EXPECT_PTR_EQ(test, b.prev, &a_new);
+}
+
+static void list_test_list_replace_init(struct kunit *test)
+{
+ struct list_head a_old, a_new, b;
+ LIST_HEAD(list);
+
+ list_add_tail(&a_old, &list);
+ list_add_tail(&b, &list);
+
+ /* before: [list] -> a_old -> b */
+ list_replace_init(&a_old, &a_new);
+
+ /* now: [list] -> a_new -> b */
+ KUNIT_EXPECT_PTR_EQ(test, list.next, &a_new);
+ KUNIT_EXPECT_PTR_EQ(test, b.prev, &a_new);
+
+ /* check a_old is empty (initialized) */
+ KUNIT_EXPECT_TRUE(test, list_empty_careful(&a_old));
+}
+
+static void list_test_list_swap(struct kunit *test)
+{
+ struct list_head a, b;
+ LIST_HEAD(list);
+
+ list_add_tail(&a, &list);
+ list_add_tail(&b, &list);
+
+ /* before: [list] -> a -> b */
+ list_swap(&a, &b);
+
+ /* after: [list] -> b -> a */
+ KUNIT_EXPECT_PTR_EQ(test, &b, list.next);
+ KUNIT_EXPECT_PTR_EQ(test, &a, list.prev);
+
+ KUNIT_EXPECT_PTR_EQ(test, &a, b.next);
+ KUNIT_EXPECT_PTR_EQ(test, &list, b.prev);
+
+ KUNIT_EXPECT_PTR_EQ(test, &list, a.next);
+ KUNIT_EXPECT_PTR_EQ(test, &b, a.prev);
+}
+
+static void list_test_list_del_init(struct kunit *test)
+{
+ struct list_head a, b;
+ LIST_HEAD(list);
+
+ list_add_tail(&a, &list);
+ list_add_tail(&b, &list);
+
+ /* before: [list] -> a -> b */
+ list_del_init(&a);
+ /* after: [list] -> b, a initialised */
+
+ KUNIT_EXPECT_PTR_EQ(test, list.next, &b);
+ KUNIT_EXPECT_PTR_EQ(test, b.prev, &list);
+ KUNIT_EXPECT_TRUE(test, list_empty_careful(&a));
+}
+
+static void list_test_list_del_init_careful(struct kunit *test)
+{
+ /* NOTE: This test only checks the behaviour of this function in
+ * isolation. It does not verify memory model guarantees.
+ */
+ struct list_head a, b;
+ LIST_HEAD(list);
+
+ list_add_tail(&a, &list);
+ list_add_tail(&b, &list);
+
+ /* before: [list] -> a -> b */
+ list_del_init_careful(&a);
+ /* after: [list] -> b, a initialised */
+
+ KUNIT_EXPECT_PTR_EQ(test, list.next, &b);
+ KUNIT_EXPECT_PTR_EQ(test, b.prev, &list);
+ KUNIT_EXPECT_TRUE(test, list_empty_careful(&a));
+}
+
+static void list_test_list_move(struct kunit *test)
+{
+ struct list_head a, b;
+ LIST_HEAD(list1);
+ LIST_HEAD(list2);
+
+ list_add_tail(&a, &list1);
+ list_add_tail(&b, &list2);
+
+ /* before: [list1] -> a, [list2] -> b */
+ list_move(&a, &list2);
+ /* after: [list1] empty, [list2] -> a -> b */
+
+ KUNIT_EXPECT_TRUE(test, list_empty(&list1));
+
+ KUNIT_EXPECT_PTR_EQ(test, &a, list2.next);
+ KUNIT_EXPECT_PTR_EQ(test, &b, a.next);
+}
+
+static void list_test_list_move_tail(struct kunit *test)
+{
+ struct list_head a, b;
+ LIST_HEAD(list1);
+ LIST_HEAD(list2);
+
+ list_add_tail(&a, &list1);
+ list_add_tail(&b, &list2);
+
+ /* before: [list1] -> a, [list2] -> b */
+ list_move_tail(&a, &list2);
+ /* after: [list1] empty, [list2] -> b -> a */
+
+ KUNIT_EXPECT_TRUE(test, list_empty(&list1));
+
+ KUNIT_EXPECT_PTR_EQ(test, &b, list2.next);
+ KUNIT_EXPECT_PTR_EQ(test, &a, b.next);
+}
+
+static void list_test_list_bulk_move_tail(struct kunit *test)
+{
+ struct list_head a, b, c, d, x, y;
+ struct list_head *list1_values[] = { &x, &b, &c, &y };
+ struct list_head *list2_values[] = { &a, &d };
+ struct list_head *ptr;
+ LIST_HEAD(list1);
+ LIST_HEAD(list2);
+ int i = 0;
+
+ list_add_tail(&x, &list1);
+ list_add_tail(&y, &list1);
+
+ list_add_tail(&a, &list2);
+ list_add_tail(&b, &list2);
+ list_add_tail(&c, &list2);
+ list_add_tail(&d, &list2);
+
+ /* before: [list1] -> x -> y, [list2] -> a -> b -> c -> d */
+ list_bulk_move_tail(&y, &b, &c);
+ /* after: [list1] -> x -> b -> c -> y, [list2] -> a -> d */
+
+ list_for_each(ptr, &list1) {
+ KUNIT_EXPECT_PTR_EQ(test, ptr, list1_values[i]);
+ i++;
+ }
+ KUNIT_EXPECT_EQ(test, i, 4);
+ i = 0;
+ list_for_each(ptr, &list2) {
+ KUNIT_EXPECT_PTR_EQ(test, ptr, list2_values[i]);
+ i++;
+ }
+ KUNIT_EXPECT_EQ(test, i, 2);
+}
+
+static void list_test_list_is_head(struct kunit *test)
+{
+ struct list_head a, b, c;
+
+ /* Two lists: [a] -> b, [c] */
+ INIT_LIST_HEAD(&a);
+ INIT_LIST_HEAD(&c);
+ list_add_tail(&b, &a);
+
+ KUNIT_EXPECT_TRUE_MSG(test, list_is_head(&a, &a),
+ "Head element of same list");
+ KUNIT_EXPECT_FALSE_MSG(test, list_is_head(&a, &b),
+ "Non-head element of same list");
+ KUNIT_EXPECT_FALSE_MSG(test, list_is_head(&a, &c),
+ "Head element of different list");
+}
+
+
+static void list_test_list_is_first(struct kunit *test)
+{
+ struct list_head a, b;
+ LIST_HEAD(list);
+
+ list_add_tail(&a, &list);
+ list_add_tail(&b, &list);
+
+ KUNIT_EXPECT_TRUE(test, list_is_first(&a, &list));
+ KUNIT_EXPECT_FALSE(test, list_is_first(&b, &list));
+}
+
+static void list_test_list_is_last(struct kunit *test)
+{
+ struct list_head a, b;
+ LIST_HEAD(list);
+
+ list_add_tail(&a, &list);
+ list_add_tail(&b, &list);
+
+ KUNIT_EXPECT_FALSE(test, list_is_last(&a, &list));
+ KUNIT_EXPECT_TRUE(test, list_is_last(&b, &list));
+}
+
+static void list_test_list_empty(struct kunit *test)
+{
+ struct list_head a;
+ LIST_HEAD(list1);
+ LIST_HEAD(list2);
+
+ list_add_tail(&a, &list1);
+
+ KUNIT_EXPECT_FALSE(test, list_empty(&list1));
+ KUNIT_EXPECT_TRUE(test, list_empty(&list2));
+}
+
+static void list_test_list_empty_careful(struct kunit *test)
+{
+ /* This test doesn't check correctness under concurrent access */
+ struct list_head a;
+ LIST_HEAD(list1);
+ LIST_HEAD(list2);
+
+ list_add_tail(&a, &list1);
+
+ KUNIT_EXPECT_FALSE(test, list_empty_careful(&list1));
+ KUNIT_EXPECT_TRUE(test, list_empty_careful(&list2));
+}
+
+static void list_test_list_rotate_left(struct kunit *test)
+{
+ struct list_head a, b;
+ LIST_HEAD(list);
+
+ list_add_tail(&a, &list);
+ list_add_tail(&b, &list);
+
+ /* before: [list] -> a -> b */
+ list_rotate_left(&list);
+ /* after: [list] -> b -> a */
+
+ KUNIT_EXPECT_PTR_EQ(test, list.next, &b);
+ KUNIT_EXPECT_PTR_EQ(test, b.prev, &list);
+ KUNIT_EXPECT_PTR_EQ(test, b.next, &a);
+}
+
+static void list_test_list_rotate_to_front(struct kunit *test)
+{
+ struct list_head a, b, c, d;
+ struct list_head *list_values[] = { &c, &d, &a, &b };
+ struct list_head *ptr;
+ LIST_HEAD(list);
+ int i = 0;
+
+ list_add_tail(&a, &list);
+ list_add_tail(&b, &list);
+ list_add_tail(&c, &list);
+ list_add_tail(&d, &list);
+
+ /* before: [list] -> a -> b -> c -> d */
+ list_rotate_to_front(&c, &list);
+ /* after: [list] -> c -> d -> a -> b */
+
+ list_for_each(ptr, &list) {
+ KUNIT_EXPECT_PTR_EQ(test, ptr, list_values[i]);
+ i++;
+ }
+ KUNIT_EXPECT_EQ(test, i, 4);
+}
+
+static void list_test_list_is_singular(struct kunit *test)
+{
+ struct list_head a, b;
+ LIST_HEAD(list);
+
+ /* [list] empty */
+ KUNIT_EXPECT_FALSE(test, list_is_singular(&list));
+
+ list_add_tail(&a, &list);
+
+ /* [list] -> a */
+ KUNIT_EXPECT_TRUE(test, list_is_singular(&list));
+
+ list_add_tail(&b, &list);
+
+ /* [list] -> a -> b */
+ KUNIT_EXPECT_FALSE(test, list_is_singular(&list));
+}
+
+static void list_test_list_cut_position(struct kunit *test)
+{
+ struct list_head entries[3], *cur;
+ LIST_HEAD(list1);
+ LIST_HEAD(list2);
+ int i = 0;
+
+ list_add_tail(&entries[0], &list1);
+ list_add_tail(&entries[1], &list1);
+ list_add_tail(&entries[2], &list1);
+
+ /* before: [list1] -> entries[0] -> entries[1] -> entries[2] */
+ list_cut_position(&list2, &list1, &entries[1]);
+ /* after: [list2] -> entries[0] -> entries[1], [list1] -> entries[2] */
+
+ list_for_each(cur, &list2) {
+ KUNIT_EXPECT_PTR_EQ(test, cur, &entries[i]);
+ i++;
+ }
+
+ KUNIT_EXPECT_EQ(test, i, 2);
+
+ list_for_each(cur, &list1) {
+ KUNIT_EXPECT_PTR_EQ(test, cur, &entries[i]);
+ i++;
+ }
+}
+
+static void list_test_list_cut_before(struct kunit *test)
+{
+ struct list_head entries[3], *cur;
+ LIST_HEAD(list1);
+ LIST_HEAD(list2);
+ int i = 0;
+
+ list_add_tail(&entries[0], &list1);
+ list_add_tail(&entries[1], &list1);
+ list_add_tail(&entries[2], &list1);
+
+ /* before: [list1] -> entries[0] -> entries[1] -> entries[2] */
+ list_cut_before(&list2, &list1, &entries[1]);
+ /* after: [list2] -> entries[0], [list1] -> entries[1] -> entries[2] */
+
+ list_for_each(cur, &list2) {
+ KUNIT_EXPECT_PTR_EQ(test, cur, &entries[i]);
+ i++;
+ }
+
+ KUNIT_EXPECT_EQ(test, i, 1);
+
+ list_for_each(cur, &list1) {
+ KUNIT_EXPECT_PTR_EQ(test, cur, &entries[i]);
+ i++;
+ }
+}
+
+static void list_test_list_splice(struct kunit *test)
+{
+ struct list_head entries[5], *cur;
+ LIST_HEAD(list1);
+ LIST_HEAD(list2);
+ int i = 0;
+
+ list_add_tail(&entries[0], &list1);
+ list_add_tail(&entries[1], &list1);
+ list_add_tail(&entries[2], &list2);
+ list_add_tail(&entries[3], &list2);
+ list_add_tail(&entries[4], &list1);
+
+ /* before: [list1]->e[0]->e[1]->e[4], [list2]->e[2]->e[3] */
+ list_splice(&list2, &entries[1]);
+ /* after: [list1]->e[0]->e[1]->e[2]->e[3]->e[4], [list2] uninit */
+
+ list_for_each(cur, &list1) {
+ KUNIT_EXPECT_PTR_EQ(test, cur, &entries[i]);
+ i++;
+ }
+
+ KUNIT_EXPECT_EQ(test, i, 5);
+}
+
+static void list_test_list_splice_tail(struct kunit *test)
+{
+ struct list_head entries[5], *cur;
+ LIST_HEAD(list1);
+ LIST_HEAD(list2);
+ int i = 0;
+
+ list_add_tail(&entries[0], &list1);
+ list_add_tail(&entries[1], &list1);
+ list_add_tail(&entries[2], &list2);
+ list_add_tail(&entries[3], &list2);
+ list_add_tail(&entries[4], &list1);
+
+ /* before: [list1]->e[0]->e[1]->e[4], [list2]->e[2]->e[3] */
+ list_splice_tail(&list2, &entries[4]);
+ /* after: [list1]->e[0]->e[1]->e[2]->e[3]->e[4], [list2] uninit */
+
+ list_for_each(cur, &list1) {
+ KUNIT_EXPECT_PTR_EQ(test, cur, &entries[i]);
+ i++;
+ }
+
+ KUNIT_EXPECT_EQ(test, i, 5);
+}
+
+static void list_test_list_splice_init(struct kunit *test)
+{
+ struct list_head entries[5], *cur;
+ LIST_HEAD(list1);
+ LIST_HEAD(list2);
+ int i = 0;
+
+ list_add_tail(&entries[0], &list1);
+ list_add_tail(&entries[1], &list1);
+ list_add_tail(&entries[2], &list2);
+ list_add_tail(&entries[3], &list2);
+ list_add_tail(&entries[4], &list1);
+
+ /* before: [list1]->e[0]->e[1]->e[4], [list2]->e[2]->e[3] */
+ list_splice_init(&list2, &entries[1]);
+ /* after: [list1]->e[0]->e[1]->e[2]->e[3]->e[4], [list2] empty */
+
+ list_for_each(cur, &list1) {
+ KUNIT_EXPECT_PTR_EQ(test, cur, &entries[i]);
+ i++;
+ }
+
+ KUNIT_EXPECT_EQ(test, i, 5);
+
+ KUNIT_EXPECT_TRUE(test, list_empty_careful(&list2));
+}
+
+static void list_test_list_splice_tail_init(struct kunit *test)
+{
+ struct list_head entries[5], *cur;
+ LIST_HEAD(list1);
+ LIST_HEAD(list2);
+ int i = 0;
+
+ list_add_tail(&entries[0], &list1);
+ list_add_tail(&entries[1], &list1);
+ list_add_tail(&entries[2], &list2);
+ list_add_tail(&entries[3], &list2);
+ list_add_tail(&entries[4], &list1);
+
+ /* before: [list1]->e[0]->e[1]->e[4], [list2]->e[2]->e[3] */
+ list_splice_tail_init(&list2, &entries[4]);
+ /* after: [list1]->e[0]->e[1]->e[2]->e[3]->e[4], [list2] empty */
+
+ list_for_each(cur, &list1) {
+ KUNIT_EXPECT_PTR_EQ(test, cur, &entries[i]);
+ i++;
+ }
+
+ KUNIT_EXPECT_EQ(test, i, 5);
+
+ KUNIT_EXPECT_TRUE(test, list_empty_careful(&list2));
+}
+
+static void list_test_list_entry(struct kunit *test)
+{
+ struct list_test_struct test_struct;
+
+ KUNIT_EXPECT_PTR_EQ(test, &test_struct, list_entry(&(test_struct.list),
+ struct list_test_struct, list));
+}
+
+static void list_test_list_entry_is_head(struct kunit *test)
+{
+ struct list_test_struct test_struct1, test_struct2, test_struct3;
+
+ INIT_LIST_HEAD(&test_struct1.list);
+ INIT_LIST_HEAD(&test_struct3.list);
+
+ list_add_tail(&test_struct2.list, &test_struct1.list);
+
+ KUNIT_EXPECT_TRUE_MSG(test,
+ list_entry_is_head((&test_struct1), &test_struct1.list, list),
+ "Head element of same list");
+ KUNIT_EXPECT_FALSE_MSG(test,
+ list_entry_is_head((&test_struct2), &test_struct1.list, list),
+ "Non-head element of same list");
+ KUNIT_EXPECT_FALSE_MSG(test,
+ list_entry_is_head((&test_struct3), &test_struct1.list, list),
+ "Head element of different list");
+}
+
+static void list_test_list_first_entry(struct kunit *test)
+{
+ struct list_test_struct test_struct1, test_struct2;
+ LIST_HEAD(list);
+
+ list_add_tail(&test_struct1.list, &list);
+ list_add_tail(&test_struct2.list, &list);
+
+
+ KUNIT_EXPECT_PTR_EQ(test, &test_struct1, list_first_entry(&list,
+ struct list_test_struct, list));
+}
+
+static void list_test_list_last_entry(struct kunit *test)
+{
+ struct list_test_struct test_struct1, test_struct2;
+ LIST_HEAD(list);
+
+ list_add_tail(&test_struct1.list, &list);
+ list_add_tail(&test_struct2.list, &list);
+
+
+ KUNIT_EXPECT_PTR_EQ(test, &test_struct2, list_last_entry(&list,
+ struct list_test_struct, list));
+}
+
+static void list_test_list_first_entry_or_null(struct kunit *test)
+{
+ struct list_test_struct test_struct1, test_struct2;
+ LIST_HEAD(list);
+
+ KUNIT_EXPECT_FALSE(test, list_first_entry_or_null(&list,
+ struct list_test_struct, list));
+
+ list_add_tail(&test_struct1.list, &list);
+ list_add_tail(&test_struct2.list, &list);
+
+ KUNIT_EXPECT_PTR_EQ(test, &test_struct1,
+ list_first_entry_or_null(&list,
+ struct list_test_struct, list));
+}
+
+static void list_test_list_next_entry(struct kunit *test)
+{
+ struct list_test_struct test_struct1, test_struct2;
+ LIST_HEAD(list);
+
+ list_add_tail(&test_struct1.list, &list);
+ list_add_tail(&test_struct2.list, &list);
+
+
+ KUNIT_EXPECT_PTR_EQ(test, &test_struct2, list_next_entry(&test_struct1,
+ list));
+}
+
+static void list_test_list_prev_entry(struct kunit *test)
+{
+ struct list_test_struct test_struct1, test_struct2;
+ LIST_HEAD(list);
+
+ list_add_tail(&test_struct1.list, &list);
+ list_add_tail(&test_struct2.list, &list);
+
+
+ KUNIT_EXPECT_PTR_EQ(test, &test_struct1, list_prev_entry(&test_struct2,
+ list));
+}
+
+static void list_test_list_for_each(struct kunit *test)
+{
+ struct list_head entries[3], *cur;
+ LIST_HEAD(list);
+ int i = 0;
+
+ list_add_tail(&entries[0], &list);
+ list_add_tail(&entries[1], &list);
+ list_add_tail(&entries[2], &list);
+
+ list_for_each(cur, &list) {
+ KUNIT_EXPECT_PTR_EQ(test, cur, &entries[i]);
+ i++;
+ }
+
+ KUNIT_EXPECT_EQ(test, i, 3);
+}
+
+static void list_test_list_for_each_prev(struct kunit *test)
+{
+ struct list_head entries[3], *cur;
+ LIST_HEAD(list);
+ int i = 2;
+
+ list_add_tail(&entries[0], &list);
+ list_add_tail(&entries[1], &list);
+ list_add_tail(&entries[2], &list);
+
+ list_for_each_prev(cur, &list) {
+ KUNIT_EXPECT_PTR_EQ(test, cur, &entries[i]);
+ i--;
+ }
+
+ KUNIT_EXPECT_EQ(test, i, -1);
+}
+
+static void list_test_list_for_each_safe(struct kunit *test)
+{
+ struct list_head entries[3], *cur, *n;
+ LIST_HEAD(list);
+ int i = 0;
+
+
+ list_add_tail(&entries[0], &list);
+ list_add_tail(&entries[1], &list);
+ list_add_tail(&entries[2], &list);
+
+ list_for_each_safe(cur, n, &list) {
+ KUNIT_EXPECT_PTR_EQ(test, cur, &entries[i]);
+ list_del(&entries[i]);
+ i++;
+ }
+
+ KUNIT_EXPECT_EQ(test, i, 3);
+ KUNIT_EXPECT_TRUE(test, list_empty(&list));
+}
+
+static void list_test_list_for_each_prev_safe(struct kunit *test)
+{
+ struct list_head entries[3], *cur, *n;
+ LIST_HEAD(list);
+ int i = 2;
+
+ list_add_tail(&entries[0], &list);
+ list_add_tail(&entries[1], &list);
+ list_add_tail(&entries[2], &list);
+
+ list_for_each_prev_safe(cur, n, &list) {
+ KUNIT_EXPECT_PTR_EQ(test, cur, &entries[i]);
+ list_del(&entries[i]);
+ i--;
+ }
+
+ KUNIT_EXPECT_EQ(test, i, -1);
+ KUNIT_EXPECT_TRUE(test, list_empty(&list));
+}
+
+static void list_test_list_for_each_entry(struct kunit *test)
+{
+ struct list_test_struct entries[5], *cur;
+ LIST_HEAD(list);
+ int i = 0;
+
+ for (i = 0; i < 5; ++i) {
+ entries[i].data = i;
+ list_add_tail(&entries[i].list, &list);
+ }
+
+ i = 0;
+
+ list_for_each_entry(cur, &list, list) {
+ KUNIT_EXPECT_EQ(test, cur->data, i);
+ i++;
+ }
+
+ KUNIT_EXPECT_EQ(test, i, 5);
+}
+
+static void list_test_list_for_each_entry_reverse(struct kunit *test)
+{
+ struct list_test_struct entries[5], *cur;
+ LIST_HEAD(list);
+ int i = 0;
+
+ for (i = 0; i < 5; ++i) {
+ entries[i].data = i;
+ list_add_tail(&entries[i].list, &list);
+ }
+
+ i = 4;
+
+ list_for_each_entry_reverse(cur, &list, list) {
+ KUNIT_EXPECT_EQ(test, cur->data, i);
+ i--;
+ }
+
+ KUNIT_EXPECT_EQ(test, i, -1);
+}
+
+static struct kunit_case list_test_cases[] = {
+ KUNIT_CASE(list_test_list_init),
+ KUNIT_CASE(list_test_list_add),
+ KUNIT_CASE(list_test_list_add_tail),
+ KUNIT_CASE(list_test_list_del),
+ KUNIT_CASE(list_test_list_replace),
+ KUNIT_CASE(list_test_list_replace_init),
+ KUNIT_CASE(list_test_list_swap),
+ KUNIT_CASE(list_test_list_del_init),
+ KUNIT_CASE(list_test_list_del_init_careful),
+ KUNIT_CASE(list_test_list_move),
+ KUNIT_CASE(list_test_list_move_tail),
+ KUNIT_CASE(list_test_list_bulk_move_tail),
+ KUNIT_CASE(list_test_list_is_head),
+ KUNIT_CASE(list_test_list_is_first),
+ KUNIT_CASE(list_test_list_is_last),
+ KUNIT_CASE(list_test_list_empty),
+ KUNIT_CASE(list_test_list_empty_careful),
+ KUNIT_CASE(list_test_list_rotate_left),
+ KUNIT_CASE(list_test_list_rotate_to_front),
+ KUNIT_CASE(list_test_list_is_singular),
+ KUNIT_CASE(list_test_list_cut_position),
+ KUNIT_CASE(list_test_list_cut_before),
+ KUNIT_CASE(list_test_list_splice),
+ KUNIT_CASE(list_test_list_splice_tail),
+ KUNIT_CASE(list_test_list_splice_init),
+ KUNIT_CASE(list_test_list_splice_tail_init),
+ KUNIT_CASE(list_test_list_entry),
+ KUNIT_CASE(list_test_list_entry_is_head),
+ KUNIT_CASE(list_test_list_first_entry),
+ KUNIT_CASE(list_test_list_last_entry),
+ KUNIT_CASE(list_test_list_first_entry_or_null),
+ KUNIT_CASE(list_test_list_next_entry),
+ KUNIT_CASE(list_test_list_prev_entry),
+ KUNIT_CASE(list_test_list_for_each),
+ KUNIT_CASE(list_test_list_for_each_prev),
+ KUNIT_CASE(list_test_list_for_each_safe),
+ KUNIT_CASE(list_test_list_for_each_prev_safe),
+ KUNIT_CASE(list_test_list_for_each_entry),
+ KUNIT_CASE(list_test_list_for_each_entry_reverse),
+ {},
+};
+
+static struct kunit_suite list_test_module = {
+ .name = "list-kunit-test",
+ .test_cases = list_test_cases,
+};
+
+struct hlist_test_struct {
+ int data;
+ struct hlist_node list;
+};
+
+static void hlist_test_init(struct kunit *test)
+{
+ /* Test the different ways of initialising a list. */
+ struct hlist_head list1 = HLIST_HEAD_INIT;
+ struct hlist_head list2;
+ HLIST_HEAD(list3);
+ struct hlist_head *list4;
+ struct hlist_head *list5;
+
+ INIT_HLIST_HEAD(&list2);
+
+ list4 = kzalloc(sizeof(*list4), GFP_KERNEL | __GFP_NOFAIL);
+ INIT_HLIST_HEAD(list4);
+
+ list5 = kmalloc(sizeof(*list5), GFP_KERNEL | __GFP_NOFAIL);
+ memset(list5, 0xFF, sizeof(*list5));
+ INIT_HLIST_HEAD(list5);
+
+ KUNIT_EXPECT_TRUE(test, hlist_empty(&list1));
+ KUNIT_EXPECT_TRUE(test, hlist_empty(&list2));
+ KUNIT_EXPECT_TRUE(test, hlist_empty(&list3));
+ KUNIT_EXPECT_TRUE(test, hlist_empty(list4));
+ KUNIT_EXPECT_TRUE(test, hlist_empty(list5));
+
+ kfree(list4);
+ kfree(list5);
+}
+
+static void hlist_test_unhashed(struct kunit *test)
+{
+ struct hlist_node a;
+ HLIST_HEAD(list);
+
+ INIT_HLIST_NODE(&a);
+
+ /* is unhashed by default */
+ KUNIT_EXPECT_TRUE(test, hlist_unhashed(&a));
+
+ hlist_add_head(&a, &list);
+
+ /* is hashed once added to list */
+ KUNIT_EXPECT_FALSE(test, hlist_unhashed(&a));
+
+ hlist_del_init(&a);
+
+ /* is again unhashed after del_init */
+ KUNIT_EXPECT_TRUE(test, hlist_unhashed(&a));
+}
+
+/* Doesn't test concurrency guarantees */
+static void hlist_test_unhashed_lockless(struct kunit *test)
+{
+ struct hlist_node a;
+ HLIST_HEAD(list);
+
+ INIT_HLIST_NODE(&a);
+
+ /* is unhashed by default */
+ KUNIT_EXPECT_TRUE(test, hlist_unhashed_lockless(&a));
+
+ hlist_add_head(&a, &list);
+
+ /* is hashed once added to list */
+ KUNIT_EXPECT_FALSE(test, hlist_unhashed_lockless(&a));
+
+ hlist_del_init(&a);
+
+ /* is again unhashed after del_init */
+ KUNIT_EXPECT_TRUE(test, hlist_unhashed_lockless(&a));
+}
+
+static void hlist_test_del(struct kunit *test)
+{
+ struct hlist_node a, b;
+ HLIST_HEAD(list);
+
+ hlist_add_head(&a, &list);
+ hlist_add_behind(&b, &a);
+
+ /* before: [list] -> a -> b */
+ hlist_del(&a);
+
+ /* now: [list] -> b */
+ KUNIT_EXPECT_PTR_EQ(test, list.first, &b);
+ KUNIT_EXPECT_PTR_EQ(test, b.pprev, &list.first);
+}
+
+static void hlist_test_del_init(struct kunit *test)
+{
+ struct hlist_node a, b;
+ HLIST_HEAD(list);
+
+ hlist_add_head(&a, &list);
+ hlist_add_behind(&b, &a);
+
+ /* before: [list] -> a -> b */
+ hlist_del_init(&a);
+
+ /* now: [list] -> b */
+ KUNIT_EXPECT_PTR_EQ(test, list.first, &b);
+ KUNIT_EXPECT_PTR_EQ(test, b.pprev, &list.first);
+
+ /* a is now initialised */
+ KUNIT_EXPECT_PTR_EQ(test, a.next, NULL);
+ KUNIT_EXPECT_PTR_EQ(test, a.pprev, NULL);
+}
+
+/* Tests all three hlist_add_* functions */
+static void hlist_test_add(struct kunit *test)
+{
+ struct hlist_node a, b, c, d;
+ HLIST_HEAD(list);
+
+ hlist_add_head(&a, &list);
+ hlist_add_head(&b, &list);
+ hlist_add_before(&c, &a);
+ hlist_add_behind(&d, &a);
+
+ /* should be [list] -> b -> c -> a -> d */
+ KUNIT_EXPECT_PTR_EQ(test, list.first, &b);
+
+ KUNIT_EXPECT_PTR_EQ(test, c.pprev, &(b.next));
+ KUNIT_EXPECT_PTR_EQ(test, b.next, &c);
+
+ KUNIT_EXPECT_PTR_EQ(test, a.pprev, &(c.next));
+ KUNIT_EXPECT_PTR_EQ(test, c.next, &a);
+
+ KUNIT_EXPECT_PTR_EQ(test, d.pprev, &(a.next));
+ KUNIT_EXPECT_PTR_EQ(test, a.next, &d);
+}
+
+/* Tests both hlist_fake() and hlist_add_fake() */
+static void hlist_test_fake(struct kunit *test)
+{
+ struct hlist_node a;
+
+ INIT_HLIST_NODE(&a);
+
+ /* not fake after init */
+ KUNIT_EXPECT_FALSE(test, hlist_fake(&a));
+
+ hlist_add_fake(&a);
+
+ /* is now fake */
+ KUNIT_EXPECT_TRUE(test, hlist_fake(&a));
+}
+
+static void hlist_test_is_singular_node(struct kunit *test)
+{
+ struct hlist_node a, b;
+ HLIST_HEAD(list);
+
+ INIT_HLIST_NODE(&a);
+ KUNIT_EXPECT_FALSE(test, hlist_is_singular_node(&a, &list));
+
+ hlist_add_head(&a, &list);
+ KUNIT_EXPECT_TRUE(test, hlist_is_singular_node(&a, &list));
+
+ hlist_add_head(&b, &list);
+ KUNIT_EXPECT_FALSE(test, hlist_is_singular_node(&a, &list));
+ KUNIT_EXPECT_FALSE(test, hlist_is_singular_node(&b, &list));
+}
+
+static void hlist_test_empty(struct kunit *test)
+{
+ struct hlist_node a;
+ HLIST_HEAD(list);
+
+ /* list starts off empty */
+ KUNIT_EXPECT_TRUE(test, hlist_empty(&list));
+
+ hlist_add_head(&a, &list);
+
+ /* list is no longer empty */
+ KUNIT_EXPECT_FALSE(test, hlist_empty(&list));
+}
+
+static void hlist_test_move_list(struct kunit *test)
+{
+ struct hlist_node a;
+ HLIST_HEAD(list1);
+ HLIST_HEAD(list2);
+
+ hlist_add_head(&a, &list1);
+
+ KUNIT_EXPECT_FALSE(test, hlist_empty(&list1));
+ KUNIT_EXPECT_TRUE(test, hlist_empty(&list2));
+ hlist_move_list(&list1, &list2);
+ KUNIT_EXPECT_TRUE(test, hlist_empty(&list1));
+ KUNIT_EXPECT_FALSE(test, hlist_empty(&list2));
+
+}
+
+static void hlist_test_entry(struct kunit *test)
+{
+ struct hlist_test_struct test_struct;
+
+ KUNIT_EXPECT_PTR_EQ(test, &test_struct,
+ hlist_entry(&(test_struct.list),
+ struct hlist_test_struct, list));
+}
+
+static void hlist_test_entry_safe(struct kunit *test)
+{
+ struct hlist_test_struct test_struct;
+
+ KUNIT_EXPECT_PTR_EQ(test, &test_struct,
+ hlist_entry_safe(&(test_struct.list),
+ struct hlist_test_struct, list));
+
+ KUNIT_EXPECT_PTR_EQ(test, NULL,
+ hlist_entry_safe((struct hlist_node *)NULL,
+ struct hlist_test_struct, list));
+}
+
+static void hlist_test_for_each(struct kunit *test)
+{
+ struct hlist_node entries[3], *cur;
+ HLIST_HEAD(list);
+ int i = 0;
+
+ hlist_add_head(&entries[0], &list);
+ hlist_add_behind(&entries[1], &entries[0]);
+ hlist_add_behind(&entries[2], &entries[1]);
+
+ hlist_for_each(cur, &list) {
+ KUNIT_EXPECT_PTR_EQ(test, cur, &entries[i]);
+ i++;
+ }
+
+ KUNIT_EXPECT_EQ(test, i, 3);
+}
+
+
+static void hlist_test_for_each_safe(struct kunit *test)
+{
+ struct hlist_node entries[3], *cur, *n;
+ HLIST_HEAD(list);
+ int i = 0;
+
+ hlist_add_head(&entries[0], &list);
+ hlist_add_behind(&entries[1], &entries[0]);
+ hlist_add_behind(&entries[2], &entries[1]);
+
+ hlist_for_each_safe(cur, n, &list) {
+ KUNIT_EXPECT_PTR_EQ(test, cur, &entries[i]);
+ hlist_del(&entries[i]);
+ i++;
+ }
+
+ KUNIT_EXPECT_EQ(test, i, 3);
+ KUNIT_EXPECT_TRUE(test, hlist_empty(&list));
+}
+
+static void hlist_test_for_each_entry(struct kunit *test)
+{
+ struct hlist_test_struct entries[5], *cur;
+ HLIST_HEAD(list);
+ int i = 0;
+
+ entries[0].data = 0;
+ hlist_add_head(&entries[0].list, &list);
+ for (i = 1; i < 5; ++i) {
+ entries[i].data = i;
+ hlist_add_behind(&entries[i].list, &entries[i-1].list);
+ }
+
+ i = 0;
+
+ hlist_for_each_entry(cur, &list, list) {
+ KUNIT_EXPECT_EQ(test, cur->data, i);
+ i++;
+ }
+
+ KUNIT_EXPECT_EQ(test, i, 5);
+}
+
+static void hlist_test_for_each_entry_continue(struct kunit *test)
+{
+ struct hlist_test_struct entries[5], *cur;
+ HLIST_HEAD(list);
+ int i = 0;
+
+ entries[0].data = 0;
+ hlist_add_head(&entries[0].list, &list);
+ for (i = 1; i < 5; ++i) {
+ entries[i].data = i;
+ hlist_add_behind(&entries[i].list, &entries[i-1].list);
+ }
+
+ /* We skip the first (zero-th) entry. */
+ i = 1;
+
+ cur = &entries[0];
+ hlist_for_each_entry_continue(cur, list) {
+ KUNIT_EXPECT_EQ(test, cur->data, i);
+ /* Stamp over the entry. */
+ cur->data = 42;
+ i++;
+ }
+
+ KUNIT_EXPECT_EQ(test, i, 5);
+ /* The first entry was not visited. */
+ KUNIT_EXPECT_EQ(test, entries[0].data, 0);
+ /* The second (and presumably others), were. */
+ KUNIT_EXPECT_EQ(test, entries[1].data, 42);
+}
+
+static void hlist_test_for_each_entry_from(struct kunit *test)
+{
+ struct hlist_test_struct entries[5], *cur;
+ HLIST_HEAD(list);
+ int i = 0;
+
+ entries[0].data = 0;
+ hlist_add_head(&entries[0].list, &list);
+ for (i = 1; i < 5; ++i) {
+ entries[i].data = i;
+ hlist_add_behind(&entries[i].list, &entries[i-1].list);
+ }
+
+ i = 0;
+
+ cur = &entries[0];
+ hlist_for_each_entry_from(cur, list) {
+ KUNIT_EXPECT_EQ(test, cur->data, i);
+ /* Stamp over the entry. */
+ cur->data = 42;
+ i++;
+ }
+
+ KUNIT_EXPECT_EQ(test, i, 5);
+ /* The first entry was visited. */
+ KUNIT_EXPECT_EQ(test, entries[0].data, 42);
+}
+
+static void hlist_test_for_each_entry_safe(struct kunit *test)
+{
+ struct hlist_test_struct entries[5], *cur;
+ struct hlist_node *tmp_node;
+ HLIST_HEAD(list);
+ int i = 0;
+
+ entries[0].data = 0;
+ hlist_add_head(&entries[0].list, &list);
+ for (i = 1; i < 5; ++i) {
+ entries[i].data = i;
+ hlist_add_behind(&entries[i].list, &entries[i-1].list);
+ }
+
+ i = 0;
+
+ hlist_for_each_entry_safe(cur, tmp_node, &list, list) {
+ KUNIT_EXPECT_EQ(test, cur->data, i);
+ hlist_del(&cur->list);
+ i++;
+ }
+
+ KUNIT_EXPECT_EQ(test, i, 5);
+ KUNIT_EXPECT_TRUE(test, hlist_empty(&list));
+}
+
+
+static struct kunit_case hlist_test_cases[] = {
+ KUNIT_CASE(hlist_test_init),
+ KUNIT_CASE(hlist_test_unhashed),
+ KUNIT_CASE(hlist_test_unhashed_lockless),
+ KUNIT_CASE(hlist_test_del),
+ KUNIT_CASE(hlist_test_del_init),
+ KUNIT_CASE(hlist_test_add),
+ KUNIT_CASE(hlist_test_fake),
+ KUNIT_CASE(hlist_test_is_singular_node),
+ KUNIT_CASE(hlist_test_empty),
+ KUNIT_CASE(hlist_test_move_list),
+ KUNIT_CASE(hlist_test_entry),
+ KUNIT_CASE(hlist_test_entry_safe),
+ KUNIT_CASE(hlist_test_for_each),
+ KUNIT_CASE(hlist_test_for_each_safe),
+ KUNIT_CASE(hlist_test_for_each_entry),
+ KUNIT_CASE(hlist_test_for_each_entry_continue),
+ KUNIT_CASE(hlist_test_for_each_entry_from),
+ KUNIT_CASE(hlist_test_for_each_entry_safe),
+ {},
+};
+
+static struct kunit_suite hlist_test_module = {
+ .name = "hlist",
+ .test_cases = hlist_test_cases,
+};
+
+
+struct klist_test_struct {
+ int data;
+ struct klist klist;
+ struct klist_node klist_node;
+};
+
+static int node_count;
+static struct klist_node *last_node;
+
+static void check_node(struct klist_node *node_ptr)
+{
+ node_count++;
+ last_node = node_ptr;
+}
+
+static void check_delete_node(struct klist_node *node_ptr)
+{
+ node_count--;
+ last_node = node_ptr;
+}
+
+static void klist_test_add_tail(struct kunit *test)
+{
+ struct klist_node a, b;
+ struct klist mylist;
+ struct klist_iter i;
+
+ node_count = 0;
+ klist_init(&mylist, &check_node, NULL);
+
+ klist_add_tail(&a, &mylist);
+ KUNIT_EXPECT_EQ(test, node_count, 1);
+ KUNIT_EXPECT_PTR_EQ(test, last_node, &a);
+
+ klist_add_tail(&b, &mylist);
+ KUNIT_EXPECT_EQ(test, node_count, 2);
+ KUNIT_EXPECT_PTR_EQ(test, last_node, &b);
+
+ /* should be [list] -> a -> b */
+ klist_iter_init(&mylist, &i);
+
+ KUNIT_EXPECT_PTR_EQ(test, klist_next(&i), &a);
+ KUNIT_EXPECT_PTR_EQ(test, klist_next(&i), &b);
+ KUNIT_EXPECT_NULL(test, klist_next(&i));
+
+ klist_iter_exit(&i);
+
+}
+
+static void klist_test_add_head(struct kunit *test)
+{
+ struct klist_node a, b;
+ struct klist mylist;
+ struct klist_iter i;
+
+ node_count = 0;
+ klist_init(&mylist, &check_node, NULL);
+
+ klist_add_head(&a, &mylist);
+ KUNIT_EXPECT_EQ(test, node_count, 1);
+ KUNIT_EXPECT_PTR_EQ(test, last_node, &a);
+
+ klist_add_head(&b, &mylist);
+ KUNIT_EXPECT_EQ(test, node_count, 2);
+ KUNIT_EXPECT_PTR_EQ(test, last_node, &b);
+
+ /* should be [list] -> b -> a */
+ klist_iter_init(&mylist, &i);
+
+ KUNIT_EXPECT_PTR_EQ(test, klist_next(&i), &b);
+ KUNIT_EXPECT_PTR_EQ(test, klist_next(&i), &a);
+ KUNIT_EXPECT_NULL(test, klist_next(&i));
+
+ klist_iter_exit(&i);
+
+}
+
+static void klist_test_add_behind(struct kunit *test)
+{
+ struct klist_node a, b, c, d;
+ struct klist mylist;
+ struct klist_iter i;
+
+ node_count = 0;
+ klist_init(&mylist, &check_node, NULL);
+
+ klist_add_head(&a, &mylist);
+ klist_add_head(&b, &mylist);
+
+ klist_add_behind(&c, &a);
+ KUNIT_EXPECT_EQ(test, node_count, 3);
+ KUNIT_EXPECT_PTR_EQ(test, last_node, &c);
+
+ klist_add_behind(&d, &b);
+ KUNIT_EXPECT_EQ(test, node_count, 4);
+ KUNIT_EXPECT_PTR_EQ(test, last_node, &d);
+
+ klist_iter_init(&mylist, &i);
+
+ /* should be [list] -> b -> d -> a -> c*/
+ KUNIT_EXPECT_PTR_EQ(test, klist_next(&i), &b);
+ KUNIT_EXPECT_PTR_EQ(test, klist_next(&i), &d);
+ KUNIT_EXPECT_PTR_EQ(test, klist_next(&i), &a);
+ KUNIT_EXPECT_PTR_EQ(test, klist_next(&i), &c);
+ KUNIT_EXPECT_NULL(test, klist_next(&i));
+
+ klist_iter_exit(&i);
+
+}
+
+static void klist_test_add_before(struct kunit *test)
+{
+ struct klist_node a, b, c, d;
+ struct klist mylist;
+ struct klist_iter i;
+
+ node_count = 0;
+ klist_init(&mylist, &check_node, NULL);
+
+ klist_add_head(&a, &mylist);
+ klist_add_head(&b, &mylist);
+ klist_add_before(&c, &a);
+ KUNIT_EXPECT_EQ(test, node_count, 3);
+ KUNIT_EXPECT_PTR_EQ(test, last_node, &c);
+
+ klist_add_before(&d, &b);
+ KUNIT_EXPECT_EQ(test, node_count, 4);
+ KUNIT_EXPECT_PTR_EQ(test, last_node, &d);
+
+ klist_iter_init(&mylist, &i);
+
+ /* should be [list] -> b -> d -> a -> c*/
+ KUNIT_EXPECT_PTR_EQ(test, klist_next(&i), &d);
+ KUNIT_EXPECT_PTR_EQ(test, klist_next(&i), &b);
+ KUNIT_EXPECT_PTR_EQ(test, klist_next(&i), &c);
+ KUNIT_EXPECT_PTR_EQ(test, klist_next(&i), &a);
+ KUNIT_EXPECT_NULL(test, klist_next(&i));
+
+ klist_iter_exit(&i);
+
+}
+
+/*
+ * Verify that klist_del() delays the deletion of a node until there
+ * are no other references to it
+ */
+static void klist_test_del_refcount_greater_than_zero(struct kunit *test)
+{
+ struct klist_node a, b, c, d;
+ struct klist mylist;
+ struct klist_iter i;
+
+ node_count = 0;
+ klist_init(&mylist, &check_node, &check_delete_node);
+
+ /* Add nodes a,b,c,d to the list*/
+ klist_add_tail(&a, &mylist);
+ klist_add_tail(&b, &mylist);
+ klist_add_tail(&c, &mylist);
+ klist_add_tail(&d, &mylist);
+
+ klist_iter_init(&mylist, &i);
+
+ KUNIT_EXPECT_PTR_EQ(test, klist_next(&i), &a);
+ KUNIT_EXPECT_PTR_EQ(test, klist_next(&i), &b);
+ /* Advance the iterator to point to node c*/
+ KUNIT_EXPECT_PTR_EQ(test, klist_next(&i), &c);
+
+ /* Try to delete node c while there is a reference to it*/
+ klist_del(&c);
+
+ /*
+ * Verify that node c is still attached to the list even after being
+ * deleted. Since the iterator still points to c, the reference count is not
+ * decreased to 0
+ */
+ KUNIT_EXPECT_TRUE(test, klist_node_attached(&c));
+
+ /* Check that node c has not been removed yet*/
+ KUNIT_EXPECT_EQ(test, node_count, 4);
+ KUNIT_EXPECT_PTR_EQ(test, last_node, &d);
+
+ klist_iter_exit(&i);
+
+ /*
+ * Since the iterator is no longer pointing to node c, node c is removed
+ * from the list
+ */
+ KUNIT_EXPECT_EQ(test, node_count, 3);
+ KUNIT_EXPECT_PTR_EQ(test, last_node, &c);
+
+}
+
+/*
+ * Verify that klist_del() deletes a node immediately when there are no
+ * other references to it.
+ */
+static void klist_test_del_refcount_zero(struct kunit *test)
+{
+ struct klist_node a, b, c, d;
+ struct klist mylist;
+ struct klist_iter i;
+
+ node_count = 0;
+ klist_init(&mylist, &check_node, &check_delete_node);
+
+ /* Add nodes a,b,c,d to the list*/
+ klist_add_tail(&a, &mylist);
+ klist_add_tail(&b, &mylist);
+ klist_add_tail(&c, &mylist);
+ klist_add_tail(&d, &mylist);
+ /* Delete node c*/
+ klist_del(&c);
+
+ /* Check that node c is deleted from the list*/
+ KUNIT_EXPECT_EQ(test, node_count, 3);
+ KUNIT_EXPECT_PTR_EQ(test, last_node, &c);
+
+ /* Should be [list] -> a -> b -> d*/
+ klist_iter_init(&mylist, &i);
+
+ KUNIT_EXPECT_PTR_EQ(test, klist_next(&i), &a);
+ KUNIT_EXPECT_PTR_EQ(test, klist_next(&i), &b);
+ KUNIT_EXPECT_PTR_EQ(test, klist_next(&i), &d);
+ KUNIT_EXPECT_NULL(test, klist_next(&i));
+
+ klist_iter_exit(&i);
+
+}
+
+static void klist_test_remove(struct kunit *test)
+{
+ /* This test doesn't check correctness under concurrent access */
+ struct klist_node a, b, c, d;
+ struct klist mylist;
+ struct klist_iter i;
+
+ node_count = 0;
+ klist_init(&mylist, &check_node, &check_delete_node);
+
+ /* Add nodes a,b,c,d to the list*/
+ klist_add_tail(&a, &mylist);
+ klist_add_tail(&b, &mylist);
+ klist_add_tail(&c, &mylist);
+ klist_add_tail(&d, &mylist);
+ /* Delete node c*/
+ klist_remove(&c);
+
+ /* Check the nodes in the list*/
+ KUNIT_EXPECT_EQ(test, node_count, 3);
+ KUNIT_EXPECT_PTR_EQ(test, last_node, &c);
+
+ /* should be [list] -> a -> b -> d*/
+ klist_iter_init(&mylist, &i);
+
+ KUNIT_EXPECT_PTR_EQ(test, klist_next(&i), &a);
+ KUNIT_EXPECT_PTR_EQ(test, klist_next(&i), &b);
+ KUNIT_EXPECT_PTR_EQ(test, klist_next(&i), &d);
+ KUNIT_EXPECT_NULL(test, klist_next(&i));
+
+ klist_iter_exit(&i);
+
+}
+
+static void klist_test_node_attached(struct kunit *test)
+{
+ struct klist_node a = {};
+ struct klist mylist;
+
+ klist_init(&mylist, NULL, NULL);
+
+ KUNIT_EXPECT_FALSE(test, klist_node_attached(&a));
+ klist_add_head(&a, &mylist);
+ KUNIT_EXPECT_TRUE(test, klist_node_attached(&a));
+ klist_del(&a);
+ KUNIT_EXPECT_FALSE(test, klist_node_attached(&a));
+
+}
+
+static struct kunit_case klist_test_cases[] = {
+ KUNIT_CASE(klist_test_add_tail),
+ KUNIT_CASE(klist_test_add_head),
+ KUNIT_CASE(klist_test_add_behind),
+ KUNIT_CASE(klist_test_add_before),
+ KUNIT_CASE(klist_test_del_refcount_greater_than_zero),
+ KUNIT_CASE(klist_test_del_refcount_zero),
+ KUNIT_CASE(klist_test_remove),
+ KUNIT_CASE(klist_test_node_attached),
+ {},
+};
+
+static struct kunit_suite klist_test_module = {
+ .name = "klist",
+ .test_cases = klist_test_cases,
+};
+
+kunit_test_suites(&list_test_module, &hlist_test_module, &klist_test_module);
+
+MODULE_LICENSE("GPL v2");
diff --git a/lib/list_debug.c b/lib/list_debug.c
index 5d5424b51b74..db602417febf 100644
--- a/lib/list_debug.c
+++ b/lib/list_debug.c
@@ -2,7 +2,8 @@
* Copyright 2006, Red Hat, Inc., Dave Jones
* Released under the General Public License (GPL).
*
- * This file contains the linked list validation for DEBUG_LIST.
+ * This file contains the linked list validation and error reporting for
+ * LIST_HARDENED and DEBUG_LIST.
*/
#include <linux/export.h>
@@ -17,10 +18,15 @@
* attempt).
*/
-bool __list_add_valid(struct list_head *new, struct list_head *prev,
- struct list_head *next)
+__list_valid_slowpath
+bool __list_add_valid_or_report(struct list_head *new, struct list_head *prev,
+ struct list_head *next)
{
- if (CHECK_DATA_CORRUPTION(next->prev != prev,
+ if (CHECK_DATA_CORRUPTION(prev == NULL,
+ "list_add corruption. prev is NULL.\n") ||
+ CHECK_DATA_CORRUPTION(next == NULL,
+ "list_add corruption. next is NULL.\n") ||
+ CHECK_DATA_CORRUPTION(next->prev != prev,
"list_add corruption. next->prev should be prev (%px), but was %px. (next=%px).\n",
prev, next->prev, next) ||
CHECK_DATA_CORRUPTION(prev->next != next,
@@ -33,30 +39,34 @@ bool __list_add_valid(struct list_head *new, struct list_head *prev,
return true;
}
-EXPORT_SYMBOL(__list_add_valid);
+EXPORT_SYMBOL(__list_add_valid_or_report);
-bool __list_del_entry_valid(struct list_head *entry)
+__list_valid_slowpath
+bool __list_del_entry_valid_or_report(struct list_head *entry)
{
struct list_head *prev, *next;
prev = entry->prev;
next = entry->next;
- if (CHECK_DATA_CORRUPTION(next == LIST_POISON1,
+ if (CHECK_DATA_CORRUPTION(next == NULL,
+ "list_del corruption, %px->next is NULL\n", entry) ||
+ CHECK_DATA_CORRUPTION(prev == NULL,
+ "list_del corruption, %px->prev is NULL\n", entry) ||
+ CHECK_DATA_CORRUPTION(next == LIST_POISON1,
"list_del corruption, %px->next is LIST_POISON1 (%px)\n",
entry, LIST_POISON1) ||
CHECK_DATA_CORRUPTION(prev == LIST_POISON2,
"list_del corruption, %px->prev is LIST_POISON2 (%px)\n",
entry, LIST_POISON2) ||
CHECK_DATA_CORRUPTION(prev->next != entry,
- "list_del corruption. prev->next should be %px, but was %px\n",
- entry, prev->next) ||
+ "list_del corruption. prev->next should be %px, but was %px. (prev=%px)\n",
+ entry, prev->next, prev) ||
CHECK_DATA_CORRUPTION(next->prev != entry,
- "list_del corruption. next->prev should be %px, but was %px\n",
- entry, next->prev))
+ "list_del corruption. next->prev should be %px, but was %px. (next=%px)\n",
+ entry, next->prev, next))
return false;
return true;
-
}
-EXPORT_SYMBOL(__list_del_entry_valid);
+EXPORT_SYMBOL(__list_del_entry_valid_or_report);
diff --git a/lib/list_sort.c b/lib/list_sort.c
index 712ed1f4eb64..0fb59e92ca2d 100644
--- a/lib/list_sort.c
+++ b/lib/list_sort.c
@@ -7,16 +7,13 @@
#include <linux/list_sort.h>
#include <linux/list.h>
-typedef int __attribute__((nonnull(2,3))) (*cmp_func)(void *,
- struct list_head const *, struct list_head const *);
-
/*
* Returns a list organized in an intermediate format suited
* to chaining of merge() calls: null-terminated, no reserved or
* sentinel head node, "prev" links not maintained.
*/
__attribute__((nonnull(2,3,4)))
-static struct list_head *merge(void *priv, cmp_func cmp,
+static struct list_head *merge(void *priv, list_cmp_func_t cmp,
struct list_head *a, struct list_head *b)
{
struct list_head *head, **tail = &head;
@@ -52,7 +49,7 @@ static struct list_head *merge(void *priv, cmp_func cmp,
* throughout.
*/
__attribute__((nonnull(2,3,4,5)))
-static void merge_final(void *priv, cmp_func cmp, struct list_head *head,
+static void merge_final(void *priv, list_cmp_func_t cmp, struct list_head *head,
struct list_head *a, struct list_head *b)
{
struct list_head *tail = head;
@@ -107,7 +104,7 @@ static void merge_final(void *priv, cmp_func cmp, struct list_head *head,
* @head: the list to sort
* @cmp: the elements comparison function
*
- * The comparison funtion @cmp must return > 0 if @a should sort after
+ * The comparison function @cmp must return > 0 if @a should sort after
* @b ("@a > @b" if you want an ascending sort), and <= 0 if @a should
* sort before @b *or* their original order should be preserved. It is
* always called with the element that came first in the input in @a,
@@ -140,7 +137,7 @@ static void merge_final(void *priv, cmp_func cmp, struct list_head *head,
*
*
* The merging is controlled by "count", the number of elements in the
- * pending lists. This is beautiully simple code, but rather subtle.
+ * pending lists. This is beautifully simple code, but rather subtle.
*
* Each time we increment "count", we set one bit (bit k) and clear
* bits k-1 .. 0. Each time this happens (except the very first time
@@ -157,9 +154,11 @@ static void merge_final(void *priv, cmp_func cmp, struct list_head *head,
*
* The number of pending lists of size 2^k is determined by the
* state of bit k of "count" plus two extra pieces of information:
+ *
* - The state of bit k-1 (when k == 0, consider bit -1 always set), and
* - Whether the higher-order bits are zero or non-zero (i.e.
* is count >= 2^(k+1)).
+ *
* There are six states we distinguish. "x" represents some arbitrary
* bits, and "y" represents some arbitrary non-zero bits:
* 0: 00x: 0 pending of size 2^k; x pending of sizes < 2^k
@@ -183,9 +182,7 @@ static void merge_final(void *priv, cmp_func cmp, struct list_head *head,
* 2^(k+1) - 1 (second merge of case 5 when x == 2^(k-1) - 1).
*/
__attribute__((nonnull(2,3)))
-void list_sort(void *priv, struct list_head *head,
- int (*cmp)(void *priv, struct list_head *a,
- struct list_head *b))
+void list_sort(void *priv, struct list_head *head, list_cmp_func_t cmp)
{
struct list_head *list = head->next, *pending = NULL;
size_t count = 0; /* Count of pending */
@@ -225,7 +222,7 @@ void list_sort(void *priv, struct list_head *head,
if (likely(bits)) {
struct list_head *a = *tail, *b = a->prev;
- a = merge(priv, (cmp_func)cmp, b, a);
+ a = merge(priv, cmp, b, a);
/* Install the merged result in place of the inputs */
a->prev = b->prev;
*tail = a;
@@ -247,10 +244,10 @@ void list_sort(void *priv, struct list_head *head,
if (!next)
break;
- list = merge(priv, (cmp_func)cmp, pending, list);
+ list = merge(priv, cmp, pending, list);
pending = next;
}
/* The final merge, rebuilding prev links */
- merge_final(priv, (cmp_func)cmp, head, pending, list);
+ merge_final(priv, cmp, head, pending, list);
}
EXPORT_SYMBOL(list_sort);
diff --git a/lib/livepatch/Makefile b/lib/livepatch/Makefile
index 26900ddaef82..dcc912b3478f 100644
--- a/lib/livepatch/Makefile
+++ b/lib/livepatch/Makefile
@@ -8,8 +8,7 @@ obj-$(CONFIG_TEST_LIVEPATCH) += test_klp_atomic_replace.o \
test_klp_callbacks_busy.o \
test_klp_callbacks_mod.o \
test_klp_livepatch.o \
- test_klp_shadow_vars.o
-
-# Target modules to be livepatched require CC_FLAGS_FTRACE
-CFLAGS_test_klp_callbacks_busy.o += $(CC_FLAGS_FTRACE)
-CFLAGS_test_klp_callbacks_mod.o += $(CC_FLAGS_FTRACE)
+ test_klp_shadow_vars.o \
+ test_klp_state.o \
+ test_klp_state2.o \
+ test_klp_state3.o
diff --git a/lib/livepatch/test_klp_callbacks_busy.c b/lib/livepatch/test_klp_callbacks_busy.c
index 40beddf8a0e2..133929e0ce8f 100644
--- a/lib/livepatch/test_klp_callbacks_busy.c
+++ b/lib/livepatch/test_klp_callbacks_busy.c
@@ -5,34 +5,61 @@
#include <linux/module.h>
#include <linux/kernel.h>
+#include <linux/sched.h>
#include <linux/workqueue.h>
#include <linux/delay.h>
-static int sleep_secs;
-module_param(sleep_secs, int, 0644);
-MODULE_PARM_DESC(sleep_secs, "sleep_secs (default=0)");
+/* load/run-time control from sysfs writer */
+static bool block_transition;
+module_param(block_transition, bool, 0644);
+MODULE_PARM_DESC(block_transition, "block_transition (default=false)");
static void busymod_work_func(struct work_struct *work);
-static DECLARE_DELAYED_WORK(work, busymod_work_func);
+static DECLARE_WORK(work, busymod_work_func);
+static DECLARE_COMPLETION(busymod_work_started);
static void busymod_work_func(struct work_struct *work)
{
- pr_info("%s, sleeping %d seconds ...\n", __func__, sleep_secs);
- msleep(sleep_secs * 1000);
+ pr_info("%s enter\n", __func__);
+ complete(&busymod_work_started);
+
+ while (READ_ONCE(block_transition)) {
+ /*
+ * Busy-wait until the sysfs writer has acknowledged a
+ * blocked transition and clears the flag.
+ */
+ msleep(20);
+ }
+
pr_info("%s exit\n", __func__);
}
static int test_klp_callbacks_busy_init(void)
{
pr_info("%s\n", __func__);
- schedule_delayed_work(&work,
- msecs_to_jiffies(1000 * 0));
+ schedule_work(&work);
+
+ /*
+ * To synchronize kernel messages, hold the init function from
+ * exiting until the work function's entry message has printed.
+ */
+ wait_for_completion(&busymod_work_started);
+
+ if (!block_transition) {
+ /*
+ * Serialize output: print all messages from the work
+ * function before returning from init().
+ */
+ flush_work(&work);
+ }
+
return 0;
}
static void test_klp_callbacks_busy_exit(void)
{
- cancel_delayed_work_sync(&work);
+ WRITE_ONCE(block_transition, false);
+ flush_work(&work);
pr_info("%s\n", __func__);
}
diff --git a/lib/livepatch/test_klp_shadow_vars.c b/lib/livepatch/test_klp_shadow_vars.c
index fe5c413efe96..b99116490858 100644
--- a/lib/livepatch/test_klp_shadow_vars.c
+++ b/lib/livepatch/test_klp_shadow_vars.c
@@ -60,36 +60,43 @@ static int ptr_id(void *ptr)
*/
static void *shadow_get(void *obj, unsigned long id)
{
- void *ret = klp_shadow_get(obj, id);
+ int **sv;
+ sv = klp_shadow_get(obj, id);
pr_info("klp_%s(obj=PTR%d, id=0x%lx) = PTR%d\n",
- __func__, ptr_id(obj), id, ptr_id(ret));
+ __func__, ptr_id(obj), id, ptr_id(sv));
- return ret;
+ return sv;
}
static void *shadow_alloc(void *obj, unsigned long id, size_t size,
gfp_t gfp_flags, klp_shadow_ctor_t ctor,
void *ctor_data)
{
- void *ret = klp_shadow_alloc(obj, id, size, gfp_flags, ctor,
- ctor_data);
+ int **var = ctor_data;
+ int **sv;
+
+ sv = klp_shadow_alloc(obj, id, size, gfp_flags, ctor, var);
pr_info("klp_%s(obj=PTR%d, id=0x%lx, size=%zx, gfp_flags=%pGg), ctor=PTR%d, ctor_data=PTR%d = PTR%d\n",
__func__, ptr_id(obj), id, size, &gfp_flags, ptr_id(ctor),
- ptr_id(ctor_data), ptr_id(ret));
- return ret;
+ ptr_id(*var), ptr_id(sv));
+
+ return sv;
}
static void *shadow_get_or_alloc(void *obj, unsigned long id, size_t size,
gfp_t gfp_flags, klp_shadow_ctor_t ctor,
void *ctor_data)
{
- void *ret = klp_shadow_get_or_alloc(obj, id, size, gfp_flags, ctor,
- ctor_data);
+ int **var = ctor_data;
+ int **sv;
+
+ sv = klp_shadow_get_or_alloc(obj, id, size, gfp_flags, ctor, var);
pr_info("klp_%s(obj=PTR%d, id=0x%lx, size=%zx, gfp_flags=%pGg), ctor=PTR%d, ctor_data=PTR%d = PTR%d\n",
__func__, ptr_id(obj), id, size, &gfp_flags, ptr_id(ctor),
- ptr_id(ctor_data), ptr_id(ret));
- return ret;
+ ptr_id(*var), ptr_id(sv));
+
+ return sv;
}
static void shadow_free(void *obj, unsigned long id, klp_shadow_dtor_t dtor)
@@ -102,149 +109,185 @@ static void shadow_free(void *obj, unsigned long id, klp_shadow_dtor_t dtor)
static void shadow_free_all(unsigned long id, klp_shadow_dtor_t dtor)
{
klp_shadow_free_all(id, dtor);
- pr_info("klp_%s(id=0x%lx, dtor=PTR%d)\n",
- __func__, id, ptr_id(dtor));
+ pr_info("klp_%s(id=0x%lx, dtor=PTR%d)\n", __func__, id, ptr_id(dtor));
}
/* Shadow variable constructor - remember simple pointer data */
static int shadow_ctor(void *obj, void *shadow_data, void *ctor_data)
{
- int **shadow_int = shadow_data;
- *shadow_int = ctor_data;
- pr_info("%s: PTR%d -> PTR%d\n",
- __func__, ptr_id(shadow_int), ptr_id(ctor_data));
+ int **sv = shadow_data;
+ int **var = ctor_data;
+
+ if (!var)
+ return -EINVAL;
+
+ *sv = *var;
+ pr_info("%s: PTR%d -> PTR%d\n", __func__, ptr_id(sv), ptr_id(*var));
return 0;
}
+/*
+ * With more than one item to free in the list, order is not determined and
+ * shadow_dtor will not be passed to shadow_free_all() which would make the
+ * test fail. (see pass 6)
+ */
static void shadow_dtor(void *obj, void *shadow_data)
{
+ int **sv = shadow_data;
+
pr_info("%s(obj=PTR%d, shadow_data=PTR%d)\n",
- __func__, ptr_id(obj), ptr_id(shadow_data));
+ __func__, ptr_id(obj), ptr_id(sv));
}
-static int test_klp_shadow_vars_init(void)
-{
- void *obj = THIS_MODULE;
- int id = 0x1234;
- size_t size = sizeof(int *);
- gfp_t gfp_flags = GFP_KERNEL;
+/* number of objects we simulate that need shadow vars */
+#define NUM_OBJS 3
- int var1, var2, var3, var4;
- int **sv1, **sv2, **sv3, **sv4;
+/* dynamically created obj fields have the following shadow var id values */
+#define SV_ID1 0x1234
+#define SV_ID2 0x1235
- void *ret;
+/*
+ * The main test case adds/removes new fields (shadow var) to each of these
+ * test structure instances. The last group of fields in the struct represent
+ * the idea that shadow variables may be added and removed to and from the
+ * struct during execution.
+ */
+struct test_object {
+ /* add anything here below and avoid to define an empty struct */
+ struct shadow_ptr sp;
+
+ /* these represent shadow vars added and removed with SV_ID{1,2} */
+ /* char nfield1; */
+ /* int nfield2; */
+};
+
+static int test_klp_shadow_vars_init(void)
+{
+ struct test_object objs[NUM_OBJS];
+ char nfields1[NUM_OBJS], *pnfields1[NUM_OBJS], **sv1[NUM_OBJS];
+ char *pndup[NUM_OBJS];
+ int nfields2[NUM_OBJS], *pnfields2[NUM_OBJS], **sv2[NUM_OBJS];
+ void **sv;
+ int ret;
+ int i;
ptr_id(NULL);
- ptr_id(&var1);
- ptr_id(&var2);
- ptr_id(&var3);
- ptr_id(&var4);
/*
* With an empty shadow variable hash table, expect not to find
* any matches.
*/
- ret = shadow_get(obj, id);
- if (!ret)
+ sv = shadow_get(&objs[0], SV_ID1);
+ if (!sv)
pr_info(" got expected NULL result\n");
- /*
- * Allocate a few shadow variables with different <obj> and <id>.
- */
- sv1 = shadow_alloc(obj, id, size, gfp_flags, shadow_ctor, &var1);
- if (!sv1)
- return -ENOMEM;
-
- sv2 = shadow_alloc(obj + 1, id, size, gfp_flags, shadow_ctor, &var2);
- if (!sv2)
- return -ENOMEM;
-
- sv3 = shadow_alloc(obj, id + 1, size, gfp_flags, shadow_ctor, &var3);
- if (!sv3)
- return -ENOMEM;
-
- /*
- * Verify we can find our new shadow variables and that they point
- * to expected data.
- */
- ret = shadow_get(obj, id);
- if (!ret)
- return -EINVAL;
- if (ret == sv1 && *sv1 == &var1)
- pr_info(" got expected PTR%d -> PTR%d result\n",
- ptr_id(sv1), ptr_id(*sv1));
-
- ret = shadow_get(obj + 1, id);
- if (!ret)
- return -EINVAL;
- if (ret == sv2 && *sv2 == &var2)
- pr_info(" got expected PTR%d -> PTR%d result\n",
- ptr_id(sv2), ptr_id(*sv2));
- ret = shadow_get(obj, id + 1);
- if (!ret)
- return -EINVAL;
- if (ret == sv3 && *sv3 == &var3)
- pr_info(" got expected PTR%d -> PTR%d result\n",
- ptr_id(sv3), ptr_id(*sv3));
-
- /*
- * Allocate or get a few more, this time with the same <obj>, <id>.
- * The second invocation should return the same shadow var.
- */
- sv4 = shadow_get_or_alloc(obj + 2, id, size, gfp_flags, shadow_ctor, &var4);
- if (!sv4)
- return -ENOMEM;
-
- ret = shadow_get_or_alloc(obj + 2, id, size, gfp_flags, shadow_ctor, &var4);
- if (!ret)
- return -EINVAL;
- if (ret == sv4 && *sv4 == &var4)
- pr_info(" got expected PTR%d -> PTR%d result\n",
- ptr_id(sv4), ptr_id(*sv4));
-
- /*
- * Free the <obj=*, id> shadow variables and check that we can no
- * longer find them.
- */
- shadow_free(obj, id, shadow_dtor); /* sv1 */
- ret = shadow_get(obj, id);
- if (!ret)
- pr_info(" got expected NULL result\n");
+ /* pass 1: init & alloc a char+int pair of svars for each objs */
+ for (i = 0; i < NUM_OBJS; i++) {
+ pnfields1[i] = &nfields1[i];
+ ptr_id(pnfields1[i]);
+
+ if (i % 2) {
+ sv1[i] = shadow_alloc(&objs[i], SV_ID1,
+ sizeof(pnfields1[i]), GFP_KERNEL,
+ shadow_ctor, &pnfields1[i]);
+ } else {
+ sv1[i] = shadow_get_or_alloc(&objs[i], SV_ID1,
+ sizeof(pnfields1[i]), GFP_KERNEL,
+ shadow_ctor, &pnfields1[i]);
+ }
+ if (!sv1[i]) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ pnfields2[i] = &nfields2[i];
+ ptr_id(pnfields2[i]);
+ sv2[i] = shadow_alloc(&objs[i], SV_ID2, sizeof(pnfields2[i]),
+ GFP_KERNEL, shadow_ctor, &pnfields2[i]);
+ if (!sv2[i]) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ }
- shadow_free(obj + 1, id, shadow_dtor); /* sv2 */
- ret = shadow_get(obj + 1, id);
- if (!ret)
- pr_info(" got expected NULL result\n");
+ /* pass 2: verify we find allocated svars and where they point to */
+ for (i = 0; i < NUM_OBJS; i++) {
+ /* check the "char" svar for all objects */
+ sv = shadow_get(&objs[i], SV_ID1);
+ if (!sv) {
+ ret = -EINVAL;
+ goto out;
+ }
+ if ((char **)sv == sv1[i] && *sv1[i] == pnfields1[i])
+ pr_info(" got expected PTR%d -> PTR%d result\n",
+ ptr_id(sv1[i]), ptr_id(*sv1[i]));
+
+ /* check the "int" svar for all objects */
+ sv = shadow_get(&objs[i], SV_ID2);
+ if (!sv) {
+ ret = -EINVAL;
+ goto out;
+ }
+ if ((int **)sv == sv2[i] && *sv2[i] == pnfields2[i])
+ pr_info(" got expected PTR%d -> PTR%d result\n",
+ ptr_id(sv2[i]), ptr_id(*sv2[i]));
+ }
- shadow_free(obj + 2, id, shadow_dtor); /* sv4 */
- ret = shadow_get(obj + 2, id);
- if (!ret)
- pr_info(" got expected NULL result\n");
+ /* pass 3: verify that 'get_or_alloc' returns already allocated svars */
+ for (i = 0; i < NUM_OBJS; i++) {
+ pndup[i] = &nfields1[i];
+ ptr_id(pndup[i]);
+
+ sv = shadow_get_or_alloc(&objs[i], SV_ID1, sizeof(pndup[i]),
+ GFP_KERNEL, shadow_ctor, &pndup[i]);
+ if (!sv) {
+ ret = -EINVAL;
+ goto out;
+ }
+ if ((char **)sv == sv1[i] && *sv1[i] == pnfields1[i])
+ pr_info(" got expected PTR%d -> PTR%d result\n",
+ ptr_id(sv1[i]), ptr_id(*sv1[i]));
+ }
- /*
- * We should still find an <id+1> variable.
- */
- ret = shadow_get(obj, id + 1);
- if (!ret)
- return -EINVAL;
- if (ret == sv3 && *sv3 == &var3)
- pr_info(" got expected PTR%d -> PTR%d result\n",
- ptr_id(sv3), ptr_id(*sv3));
+ /* pass 4: free <objs[*], SV_ID1> pairs of svars, verify removal */
+ for (i = 0; i < NUM_OBJS; i++) {
+ shadow_free(&objs[i], SV_ID1, shadow_dtor); /* 'char' pairs */
+ sv = shadow_get(&objs[i], SV_ID1);
+ if (!sv)
+ pr_info(" got expected NULL result\n");
+ }
- /*
- * Free all the <id+1> variables, too.
- */
- shadow_free_all(id + 1, shadow_dtor); /* sv3 */
- ret = shadow_get(obj, id);
- if (!ret)
- pr_info(" shadow_get() got expected NULL result\n");
+ /* pass 5: check we still find <objs[*], SV_ID2> svar pairs */
+ for (i = 0; i < NUM_OBJS; i++) {
+ sv = shadow_get(&objs[i], SV_ID2); /* 'int' pairs */
+ if (!sv) {
+ ret = -EINVAL;
+ goto out;
+ }
+ if ((int **)sv == sv2[i] && *sv2[i] == pnfields2[i])
+ pr_info(" got expected PTR%d -> PTR%d result\n",
+ ptr_id(sv2[i]), ptr_id(*sv2[i]));
+ }
+ /* pass 6: free all the <objs[*], SV_ID2> svar pairs too. */
+ shadow_free_all(SV_ID2, NULL); /* 'int' pairs */
+ for (i = 0; i < NUM_OBJS; i++) {
+ sv = shadow_get(&objs[i], SV_ID2);
+ if (!sv)
+ pr_info(" got expected NULL result\n");
+ }
free_ptr_list();
return 0;
+out:
+ shadow_free_all(SV_ID1, NULL); /* 'char' pairs */
+ shadow_free_all(SV_ID2, NULL); /* 'int' pairs */
+ free_ptr_list();
+
+ return ret;
}
static void test_klp_shadow_vars_exit(void)
diff --git a/lib/livepatch/test_klp_state.c b/lib/livepatch/test_klp_state.c
new file mode 100644
index 000000000000..57a4253acb01
--- /dev/null
+++ b/lib/livepatch/test_klp_state.c
@@ -0,0 +1,162 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2019 SUSE
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/printk.h>
+#include <linux/livepatch.h>
+
+#define CONSOLE_LOGLEVEL_STATE 1
+/* Version 1 does not support migration. */
+#define CONSOLE_LOGLEVEL_STATE_VERSION 1
+
+static const char *const module_state[] = {
+ [MODULE_STATE_LIVE] = "[MODULE_STATE_LIVE] Normal state",
+ [MODULE_STATE_COMING] = "[MODULE_STATE_COMING] Full formed, running module_init",
+ [MODULE_STATE_GOING] = "[MODULE_STATE_GOING] Going away",
+ [MODULE_STATE_UNFORMED] = "[MODULE_STATE_UNFORMED] Still setting it up",
+};
+
+static void callback_info(const char *callback, struct klp_object *obj)
+{
+ if (obj->mod)
+ pr_info("%s: %s -> %s\n", callback, obj->mod->name,
+ module_state[obj->mod->state]);
+ else
+ pr_info("%s: vmlinux\n", callback);
+}
+
+static struct klp_patch patch;
+
+static int allocate_loglevel_state(void)
+{
+ struct klp_state *loglevel_state;
+
+ loglevel_state = klp_get_state(&patch, CONSOLE_LOGLEVEL_STATE);
+ if (!loglevel_state)
+ return -EINVAL;
+
+ loglevel_state->data = kzalloc(sizeof(console_loglevel), GFP_KERNEL);
+ if (!loglevel_state->data)
+ return -ENOMEM;
+
+ pr_info("%s: allocating space to store console_loglevel\n",
+ __func__);
+ return 0;
+}
+
+static void fix_console_loglevel(void)
+{
+ struct klp_state *loglevel_state;
+
+ loglevel_state = klp_get_state(&patch, CONSOLE_LOGLEVEL_STATE);
+ if (!loglevel_state)
+ return;
+
+ pr_info("%s: fixing console_loglevel\n", __func__);
+ *(int *)loglevel_state->data = console_loglevel;
+ console_loglevel = CONSOLE_LOGLEVEL_MOTORMOUTH;
+}
+
+static void restore_console_loglevel(void)
+{
+ struct klp_state *loglevel_state;
+
+ loglevel_state = klp_get_state(&patch, CONSOLE_LOGLEVEL_STATE);
+ if (!loglevel_state)
+ return;
+
+ pr_info("%s: restoring console_loglevel\n", __func__);
+ console_loglevel = *(int *)loglevel_state->data;
+}
+
+static void free_loglevel_state(void)
+{
+ struct klp_state *loglevel_state;
+
+ loglevel_state = klp_get_state(&patch, CONSOLE_LOGLEVEL_STATE);
+ if (!loglevel_state)
+ return;
+
+ pr_info("%s: freeing space for the stored console_loglevel\n",
+ __func__);
+ kfree(loglevel_state->data);
+}
+
+/* Executed on object patching (ie, patch enablement) */
+static int pre_patch_callback(struct klp_object *obj)
+{
+ callback_info(__func__, obj);
+ return allocate_loglevel_state();
+}
+
+/* Executed on object unpatching (ie, patch disablement) */
+static void post_patch_callback(struct klp_object *obj)
+{
+ callback_info(__func__, obj);
+ fix_console_loglevel();
+}
+
+/* Executed on object unpatching (ie, patch disablement) */
+static void pre_unpatch_callback(struct klp_object *obj)
+{
+ callback_info(__func__, obj);
+ restore_console_loglevel();
+}
+
+/* Executed on object unpatching (ie, patch disablement) */
+static void post_unpatch_callback(struct klp_object *obj)
+{
+ callback_info(__func__, obj);
+ free_loglevel_state();
+}
+
+static struct klp_func no_funcs[] = {
+ {}
+};
+
+static struct klp_object objs[] = {
+ {
+ .name = NULL, /* vmlinux */
+ .funcs = no_funcs,
+ .callbacks = {
+ .pre_patch = pre_patch_callback,
+ .post_patch = post_patch_callback,
+ .pre_unpatch = pre_unpatch_callback,
+ .post_unpatch = post_unpatch_callback,
+ },
+ }, { }
+};
+
+static struct klp_state states[] = {
+ {
+ .id = CONSOLE_LOGLEVEL_STATE,
+ .version = CONSOLE_LOGLEVEL_STATE_VERSION,
+ }, { }
+};
+
+static struct klp_patch patch = {
+ .mod = THIS_MODULE,
+ .objs = objs,
+ .states = states,
+ .replace = true,
+};
+
+static int test_klp_callbacks_demo_init(void)
+{
+ return klp_enable_patch(&patch);
+}
+
+static void test_klp_callbacks_demo_exit(void)
+{
+}
+
+module_init(test_klp_callbacks_demo_init);
+module_exit(test_klp_callbacks_demo_exit);
+MODULE_LICENSE("GPL");
+MODULE_INFO(livepatch, "Y");
+MODULE_AUTHOR("Petr Mladek <pmladek@suse.com>");
+MODULE_DESCRIPTION("Livepatch test: system state modification");
diff --git a/lib/livepatch/test_klp_state2.c b/lib/livepatch/test_klp_state2.c
new file mode 100644
index 000000000000..c978ea4d5e67
--- /dev/null
+++ b/lib/livepatch/test_klp_state2.c
@@ -0,0 +1,191 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2019 SUSE
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/printk.h>
+#include <linux/livepatch.h>
+
+#define CONSOLE_LOGLEVEL_STATE 1
+/* Version 2 supports migration. */
+#define CONSOLE_LOGLEVEL_STATE_VERSION 2
+
+static const char *const module_state[] = {
+ [MODULE_STATE_LIVE] = "[MODULE_STATE_LIVE] Normal state",
+ [MODULE_STATE_COMING] = "[MODULE_STATE_COMING] Full formed, running module_init",
+ [MODULE_STATE_GOING] = "[MODULE_STATE_GOING] Going away",
+ [MODULE_STATE_UNFORMED] = "[MODULE_STATE_UNFORMED] Still setting it up",
+};
+
+static void callback_info(const char *callback, struct klp_object *obj)
+{
+ if (obj->mod)
+ pr_info("%s: %s -> %s\n", callback, obj->mod->name,
+ module_state[obj->mod->state]);
+ else
+ pr_info("%s: vmlinux\n", callback);
+}
+
+static struct klp_patch patch;
+
+static int allocate_loglevel_state(void)
+{
+ struct klp_state *loglevel_state, *prev_loglevel_state;
+
+ prev_loglevel_state = klp_get_prev_state(CONSOLE_LOGLEVEL_STATE);
+ if (prev_loglevel_state) {
+ pr_info("%s: space to store console_loglevel already allocated\n",
+ __func__);
+ return 0;
+ }
+
+ loglevel_state = klp_get_state(&patch, CONSOLE_LOGLEVEL_STATE);
+ if (!loglevel_state)
+ return -EINVAL;
+
+ loglevel_state->data = kzalloc(sizeof(console_loglevel), GFP_KERNEL);
+ if (!loglevel_state->data)
+ return -ENOMEM;
+
+ pr_info("%s: allocating space to store console_loglevel\n",
+ __func__);
+ return 0;
+}
+
+static void fix_console_loglevel(void)
+{
+ struct klp_state *loglevel_state, *prev_loglevel_state;
+
+ loglevel_state = klp_get_state(&patch, CONSOLE_LOGLEVEL_STATE);
+ if (!loglevel_state)
+ return;
+
+ prev_loglevel_state = klp_get_prev_state(CONSOLE_LOGLEVEL_STATE);
+ if (prev_loglevel_state) {
+ pr_info("%s: taking over the console_loglevel change\n",
+ __func__);
+ loglevel_state->data = prev_loglevel_state->data;
+ return;
+ }
+
+ pr_info("%s: fixing console_loglevel\n", __func__);
+ *(int *)loglevel_state->data = console_loglevel;
+ console_loglevel = CONSOLE_LOGLEVEL_MOTORMOUTH;
+}
+
+static void restore_console_loglevel(void)
+{
+ struct klp_state *loglevel_state, *prev_loglevel_state;
+
+ prev_loglevel_state = klp_get_prev_state(CONSOLE_LOGLEVEL_STATE);
+ if (prev_loglevel_state) {
+ pr_info("%s: passing the console_loglevel change back to the old livepatch\n",
+ __func__);
+ return;
+ }
+
+ loglevel_state = klp_get_state(&patch, CONSOLE_LOGLEVEL_STATE);
+ if (!loglevel_state)
+ return;
+
+ pr_info("%s: restoring console_loglevel\n", __func__);
+ console_loglevel = *(int *)loglevel_state->data;
+}
+
+static void free_loglevel_state(void)
+{
+ struct klp_state *loglevel_state, *prev_loglevel_state;
+
+ prev_loglevel_state = klp_get_prev_state(CONSOLE_LOGLEVEL_STATE);
+ if (prev_loglevel_state) {
+ pr_info("%s: keeping space to store console_loglevel\n",
+ __func__);
+ return;
+ }
+
+ loglevel_state = klp_get_state(&patch, CONSOLE_LOGLEVEL_STATE);
+ if (!loglevel_state)
+ return;
+
+ pr_info("%s: freeing space for the stored console_loglevel\n",
+ __func__);
+ kfree(loglevel_state->data);
+}
+
+/* Executed on object patching (ie, patch enablement) */
+static int pre_patch_callback(struct klp_object *obj)
+{
+ callback_info(__func__, obj);
+ return allocate_loglevel_state();
+}
+
+/* Executed on object unpatching (ie, patch disablement) */
+static void post_patch_callback(struct klp_object *obj)
+{
+ callback_info(__func__, obj);
+ fix_console_loglevel();
+}
+
+/* Executed on object unpatching (ie, patch disablement) */
+static void pre_unpatch_callback(struct klp_object *obj)
+{
+ callback_info(__func__, obj);
+ restore_console_loglevel();
+}
+
+/* Executed on object unpatching (ie, patch disablement) */
+static void post_unpatch_callback(struct klp_object *obj)
+{
+ callback_info(__func__, obj);
+ free_loglevel_state();
+}
+
+static struct klp_func no_funcs[] = {
+ {}
+};
+
+static struct klp_object objs[] = {
+ {
+ .name = NULL, /* vmlinux */
+ .funcs = no_funcs,
+ .callbacks = {
+ .pre_patch = pre_patch_callback,
+ .post_patch = post_patch_callback,
+ .pre_unpatch = pre_unpatch_callback,
+ .post_unpatch = post_unpatch_callback,
+ },
+ }, { }
+};
+
+static struct klp_state states[] = {
+ {
+ .id = CONSOLE_LOGLEVEL_STATE,
+ .version = CONSOLE_LOGLEVEL_STATE_VERSION,
+ }, { }
+};
+
+static struct klp_patch patch = {
+ .mod = THIS_MODULE,
+ .objs = objs,
+ .states = states,
+ .replace = true,
+};
+
+static int test_klp_callbacks_demo_init(void)
+{
+ return klp_enable_patch(&patch);
+}
+
+static void test_klp_callbacks_demo_exit(void)
+{
+}
+
+module_init(test_klp_callbacks_demo_init);
+module_exit(test_klp_callbacks_demo_exit);
+MODULE_LICENSE("GPL");
+MODULE_INFO(livepatch, "Y");
+MODULE_AUTHOR("Petr Mladek <pmladek@suse.com>");
+MODULE_DESCRIPTION("Livepatch test: system state modification");
diff --git a/lib/livepatch/test_klp_state3.c b/lib/livepatch/test_klp_state3.c
new file mode 100644
index 000000000000..9226579d10c5
--- /dev/null
+++ b/lib/livepatch/test_klp_state3.c
@@ -0,0 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2019 SUSE
+
+/* The console loglevel fix is the same in the next cumulative patch. */
+#include "test_klp_state2.c"
diff --git a/lib/llist.c b/lib/llist.c
index 611ce4881a87..f21d0cfbbaaa 100644
--- a/lib/llist.c
+++ b/lib/llist.c
@@ -26,11 +26,11 @@
bool llist_add_batch(struct llist_node *new_first, struct llist_node *new_last,
struct llist_head *head)
{
- struct llist_node *first;
+ struct llist_node *first = READ_ONCE(head->first);
do {
- new_last->next = first = READ_ONCE(head->first);
- } while (cmpxchg(&head->first, first, new_first) != first);
+ new_last->next = first;
+ } while (!try_cmpxchg(&head->first, &first, new_first));
return !first;
}
@@ -52,24 +52,48 @@ EXPORT_SYMBOL_GPL(llist_add_batch);
*/
struct llist_node *llist_del_first(struct llist_head *head)
{
- struct llist_node *entry, *old_entry, *next;
+ struct llist_node *entry, *next;
entry = smp_load_acquire(&head->first);
- for (;;) {
+ do {
if (entry == NULL)
return NULL;
- old_entry = entry;
next = READ_ONCE(entry->next);
- entry = cmpxchg(&head->first, old_entry, next);
- if (entry == old_entry)
- break;
- }
+ } while (!try_cmpxchg(&head->first, &entry, next));
return entry;
}
EXPORT_SYMBOL_GPL(llist_del_first);
/**
+ * llist_del_first_this - delete given entry of lock-less list if it is first
+ * @head: the head for your lock-less list
+ * @this: a list entry.
+ *
+ * If head of the list is given entry, delete and return %true else
+ * return %false.
+ *
+ * Multiple callers can safely call this concurrently with multiple
+ * llist_add() callers, providing all the callers offer a different @this.
+ */
+bool llist_del_first_this(struct llist_head *head,
+ struct llist_node *this)
+{
+ struct llist_node *entry, *next;
+
+ /* acquire ensures orderig wrt try_cmpxchg() is llist_del_first() */
+ entry = smp_load_acquire(&head->first);
+ do {
+ if (entry != this)
+ return false;
+ next = READ_ONCE(entry->next);
+ } while (!try_cmpxchg(&head->first, &entry, next));
+
+ return true;
+}
+EXPORT_SYMBOL_GPL(llist_del_first_this);
+
+/**
* llist_reverse_order - reverse order of a llist chain
* @head: first item of the list to be reversed
*
diff --git a/lib/locking-selftest.c b/lib/locking-selftest.c
index a1705545e6ac..6f6a5fc85b42 100644
--- a/lib/locking-selftest.c
+++ b/lib/locking-selftest.c
@@ -15,6 +15,7 @@
#include <linux/mutex.h>
#include <linux/ww_mutex.h>
#include <linux/sched.h>
+#include <linux/sched/mm.h>
#include <linux/delay.h>
#include <linux/lockdep.h>
#include <linux/spinlock.h>
@@ -23,11 +24,19 @@
#include <linux/debug_locks.h>
#include <linux/irqflags.h>
#include <linux/rtmutex.h>
+#include <linux/local_lock.h>
+
+#ifdef CONFIG_PREEMPT_RT
+# define NON_RT(...)
+#else
+# define NON_RT(...) __VA_ARGS__
+#endif
/*
* Change this to 1 if you want to see the failure printouts:
*/
static unsigned int debug_locks_verbose;
+unsigned int force_read_lock_recursive;
static DEFINE_WD_CLASS(ww_lockdep);
@@ -49,6 +58,8 @@ __setup("debug_locks_verbose=", setup_debug_locks_verbose);
#define LOCKTYPE_RWSEM 0x8
#define LOCKTYPE_WW 0x10
#define LOCKTYPE_RTMUTEX 0x20
+#define LOCKTYPE_LL 0x40
+#define LOCKTYPE_SPECIAL 0x80
static struct ww_acquire_ctx t, t2;
static struct ww_mutex o, o2, o3;
@@ -57,10 +68,13 @@ static struct ww_mutex o, o2, o3;
* Normal standalone locks, for the circular and irq-context
* dependency tests:
*/
-static DEFINE_RAW_SPINLOCK(lock_A);
-static DEFINE_RAW_SPINLOCK(lock_B);
-static DEFINE_RAW_SPINLOCK(lock_C);
-static DEFINE_RAW_SPINLOCK(lock_D);
+static DEFINE_SPINLOCK(lock_A);
+static DEFINE_SPINLOCK(lock_B);
+static DEFINE_SPINLOCK(lock_C);
+static DEFINE_SPINLOCK(lock_D);
+
+static DEFINE_RAW_SPINLOCK(raw_lock_A);
+static DEFINE_RAW_SPINLOCK(raw_lock_B);
static DEFINE_RWLOCK(rwlock_A);
static DEFINE_RWLOCK(rwlock_B);
@@ -92,12 +106,12 @@ static DEFINE_RT_MUTEX(rtmutex_D);
* but X* and Y* are different classes. We do this so that
* we do not trigger a real lockup:
*/
-static DEFINE_RAW_SPINLOCK(lock_X1);
-static DEFINE_RAW_SPINLOCK(lock_X2);
-static DEFINE_RAW_SPINLOCK(lock_Y1);
-static DEFINE_RAW_SPINLOCK(lock_Y2);
-static DEFINE_RAW_SPINLOCK(lock_Z1);
-static DEFINE_RAW_SPINLOCK(lock_Z2);
+static DEFINE_SPINLOCK(lock_X1);
+static DEFINE_SPINLOCK(lock_X2);
+static DEFINE_SPINLOCK(lock_Y1);
+static DEFINE_SPINLOCK(lock_Y2);
+static DEFINE_SPINLOCK(lock_Z1);
+static DEFINE_SPINLOCK(lock_Z2);
static DEFINE_RWLOCK(rwlock_X1);
static DEFINE_RWLOCK(rwlock_X2);
@@ -131,16 +145,18 @@ static DEFINE_RT_MUTEX(rtmutex_Z2);
#endif
+static DEFINE_PER_CPU(local_lock_t, local_A);
+
/*
* non-inlined runtime initializers, to let separate locks share
* the same lock-class:
*/
#define INIT_CLASS_FUNC(class) \
static noinline void \
-init_class_##class(raw_spinlock_t *lock, rwlock_t *rwlock, \
+init_class_##class(spinlock_t *lock, rwlock_t *rwlock, \
struct mutex *mutex, struct rw_semaphore *rwsem)\
{ \
- raw_spin_lock_init(lock); \
+ spin_lock_init(lock); \
rwlock_init(rwlock); \
mutex_init(mutex); \
init_rwsem(rwsem); \
@@ -185,6 +201,7 @@ static void init_shared_classes(void)
#define HARDIRQ_ENTER() \
local_irq_disable(); \
__irq_enter(); \
+ lockdep_hardirq_threaded(); \
WARN_ON(!in_irq());
#define HARDIRQ_EXIT() \
@@ -209,10 +226,10 @@ static void init_shared_classes(void)
* Shortcuts for lock/unlock API variants, to keep
* the testcases compact:
*/
-#define L(x) raw_spin_lock(&lock_##x)
-#define U(x) raw_spin_unlock(&lock_##x)
+#define L(x) spin_lock(&lock_##x)
+#define U(x) spin_unlock(&lock_##x)
#define LU(x) L(x); U(x)
-#define SI(x) raw_spin_lock_init(&lock_##x)
+#define SI(x) spin_lock_init(&lock_##x)
#define WL(x) write_lock(&rwlock_##x)
#define WU(x) write_unlock(&rwlock_##x)
@@ -247,7 +264,7 @@ static void init_shared_classes(void)
#define WWAF(x) ww_acquire_fini(x)
#define WWL(x, c) ww_mutex_lock(x, c)
-#define WWT(x) ww_mutex_trylock(x)
+#define WWT(x) ww_mutex_trylock(x, NULL)
#define WWL1(x) ww_mutex_lock(x, NULL)
#define WWU(x) ww_mutex_unlock(x)
@@ -399,6 +416,49 @@ static void rwsem_ABBA1(void)
* read_lock(A)
* spin_lock(B)
* spin_lock(B)
+ * write_lock(A)
+ *
+ * This test case is aimed at poking whether the chain cache prevents us from
+ * detecting a read-lock/lock-write deadlock: if the chain cache doesn't differ
+ * read/write locks, the following case may happen
+ *
+ * { read_lock(A)->lock(B) dependency exists }
+ *
+ * P0:
+ * lock(B);
+ * read_lock(A);
+ *
+ * { Not a deadlock, B -> A is added in the chain cache }
+ *
+ * P1:
+ * lock(B);
+ * write_lock(A);
+ *
+ * { B->A found in chain cache, not reported as a deadlock }
+ *
+ */
+static void rlock_chaincache_ABBA1(void)
+{
+ RL(X1);
+ L(Y1);
+ U(Y1);
+ RU(X1);
+
+ L(Y1);
+ RL(X1);
+ RU(X1);
+ U(Y1);
+
+ L(Y1);
+ WL(X1);
+ WU(X1);
+ U(Y1); // should fail
+}
+
+/*
+ * read_lock(A)
+ * spin_lock(B)
+ * spin_lock(B)
* read_lock(A)
*/
static void rlock_ABBA2(void)
@@ -658,12 +718,18 @@ GENERATE_TESTCASE(ABCDBCDA_rtmutex);
#undef E
+#ifdef CONFIG_PREEMPT_RT
+# define RT_PREPARE_DBL_UNLOCK() { migrate_disable(); rcu_read_lock(); }
+#else
+# define RT_PREPARE_DBL_UNLOCK()
+#endif
/*
* Double unlock:
*/
#define E() \
\
LOCK(A); \
+ RT_PREPARE_DBL_UNLOCK(); \
UNLOCK(A); \
UNLOCK(A); /* fail */
@@ -748,6 +814,7 @@ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_hard_rlock)
#include "locking-selftest-wlock-hardirq.h"
GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_hard_wlock)
+#ifndef CONFIG_PREEMPT_RT
#include "locking-selftest-spin-softirq.h"
GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_soft_spin)
@@ -756,10 +823,12 @@ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_soft_rlock)
#include "locking-selftest-wlock-softirq.h"
GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_soft_wlock)
+#endif
#undef E1
#undef E2
+#ifndef CONFIG_PREEMPT_RT
/*
* Enabling hardirqs with a softirq-safe lock held:
*/
@@ -792,6 +861,8 @@ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2A_rlock)
#undef E1
#undef E2
+#endif
+
/*
* Enabling irqs with an irq-safe lock held:
*/
@@ -821,6 +892,7 @@ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_hard_rlock)
#include "locking-selftest-wlock-hardirq.h"
GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_hard_wlock)
+#ifndef CONFIG_PREEMPT_RT
#include "locking-selftest-spin-softirq.h"
GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_soft_spin)
@@ -829,6 +901,7 @@ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_soft_rlock)
#include "locking-selftest-wlock-softirq.h"
GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_soft_wlock)
+#endif
#undef E1
#undef E2
@@ -867,6 +940,7 @@ GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_hard_rlock)
#include "locking-selftest-wlock-hardirq.h"
GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_hard_wlock)
+#ifndef CONFIG_PREEMPT_RT
#include "locking-selftest-spin-softirq.h"
GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_soft_spin)
@@ -875,6 +949,7 @@ GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_soft_rlock)
#include "locking-selftest-wlock-softirq.h"
GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_soft_wlock)
+#endif
#undef E1
#undef E2
@@ -915,6 +990,7 @@ GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_hard_rlock)
#include "locking-selftest-wlock-hardirq.h"
GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_hard_wlock)
+#ifndef CONFIG_PREEMPT_RT
#include "locking-selftest-spin-softirq.h"
GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_soft_spin)
@@ -923,6 +999,7 @@ GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_soft_rlock)
#include "locking-selftest-wlock-softirq.h"
GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_soft_wlock)
+#endif
#undef E1
#undef E2
@@ -977,6 +1054,7 @@ GENERATE_PERMUTATIONS_3_EVENTS(irq_inversion_hard_rlock)
#include "locking-selftest-wlock-hardirq.h"
GENERATE_PERMUTATIONS_3_EVENTS(irq_inversion_hard_wlock)
+#ifndef CONFIG_PREEMPT_RT
#include "locking-selftest-spin-softirq.h"
GENERATE_PERMUTATIONS_3_EVENTS(irq_inversion_soft_spin)
@@ -985,12 +1063,140 @@ GENERATE_PERMUTATIONS_3_EVENTS(irq_inversion_soft_rlock)
#include "locking-selftest-wlock-softirq.h"
GENERATE_PERMUTATIONS_3_EVENTS(irq_inversion_soft_wlock)
+#endif
+
+#undef E1
+#undef E2
+#undef E3
+
+/*
+ * write-read / write-read / write-read deadlock even if read is recursive
+ */
+
+#define E1() \
+ \
+ WL(X1); \
+ RL(Y1); \
+ RU(Y1); \
+ WU(X1);
+
+#define E2() \
+ \
+ WL(Y1); \
+ RL(Z1); \
+ RU(Z1); \
+ WU(Y1);
+
+#define E3() \
+ \
+ WL(Z1); \
+ RL(X1); \
+ RU(X1); \
+ WU(Z1);
+
+#include "locking-selftest-rlock.h"
+GENERATE_PERMUTATIONS_3_EVENTS(W1R2_W2R3_W3R1)
+
+#undef E1
+#undef E2
+#undef E3
+
+/*
+ * write-write / read-read / write-read deadlock even if read is recursive
+ */
+
+#define E1() \
+ \
+ WL(X1); \
+ WL(Y1); \
+ WU(Y1); \
+ WU(X1);
+
+#define E2() \
+ \
+ RL(Y1); \
+ RL(Z1); \
+ RU(Z1); \
+ RU(Y1);
+
+#define E3() \
+ \
+ WL(Z1); \
+ RL(X1); \
+ RU(X1); \
+ WU(Z1);
+
+#include "locking-selftest-rlock.h"
+GENERATE_PERMUTATIONS_3_EVENTS(W1W2_R2R3_W3R1)
+
+#undef E1
+#undef E2
+#undef E3
+
+/*
+ * write-write / read-read / read-write is not deadlock when read is recursive
+ */
+
+#define E1() \
+ \
+ WL(X1); \
+ WL(Y1); \
+ WU(Y1); \
+ WU(X1);
+
+#define E2() \
+ \
+ RL(Y1); \
+ RL(Z1); \
+ RU(Z1); \
+ RU(Y1);
+
+#define E3() \
+ \
+ RL(Z1); \
+ WL(X1); \
+ WU(X1); \
+ RU(Z1);
+
+#include "locking-selftest-rlock.h"
+GENERATE_PERMUTATIONS_3_EVENTS(W1R2_R2R3_W3W1)
#undef E1
#undef E2
#undef E3
/*
+ * write-read / read-read / write-write is not deadlock when read is recursive
+ */
+
+#define E1() \
+ \
+ WL(X1); \
+ RL(Y1); \
+ RU(Y1); \
+ WU(X1);
+
+#define E2() \
+ \
+ RL(Y1); \
+ RL(Z1); \
+ RU(Z1); \
+ RU(Y1);
+
+#define E3() \
+ \
+ WL(Z1); \
+ WL(X1); \
+ WU(X1); \
+ WU(Z1);
+
+#include "locking-selftest-rlock.h"
+GENERATE_PERMUTATIONS_3_EVENTS(W1W2_R2R3_R3W1)
+
+#undef E1
+#undef E2
+#undef E3
+/*
* read-lock / write-lock recursion that is actually safe.
*/
@@ -1009,20 +1215,30 @@ GENERATE_PERMUTATIONS_3_EVENTS(irq_inversion_soft_wlock)
#define E3() \
\
IRQ_ENTER(); \
- RL(A); \
+ LOCK(A); \
L(B); \
U(B); \
- RU(A); \
+ UNLOCK(A); \
IRQ_EXIT();
/*
- * Generate 12 testcases:
+ * Generate 24 testcases:
*/
#include "locking-selftest-hardirq.h"
-GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion_hard)
+#include "locking-selftest-rlock.h"
+GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion_hard_rlock)
+
+#include "locking-selftest-wlock.h"
+GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion_hard_wlock)
+#ifndef CONFIG_PREEMPT_RT
#include "locking-selftest-softirq.h"
-GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion_soft)
+#include "locking-selftest-rlock.h"
+GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion_soft_rlock)
+
+#include "locking-selftest-wlock.h"
+GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion_soft_wlock)
+#endif
#undef E1
#undef E2
@@ -1036,8 +1252,8 @@ GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion_soft)
\
IRQ_DISABLE(); \
L(B); \
- WL(A); \
- WU(A); \
+ LOCK(A); \
+ UNLOCK(A); \
U(B); \
IRQ_ENABLE();
@@ -1054,29 +1270,99 @@ GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion_soft)
IRQ_EXIT();
/*
- * Generate 12 testcases:
+ * Generate 24 testcases:
*/
#include "locking-selftest-hardirq.h"
-// GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion2_hard)
+#include "locking-selftest-rlock.h"
+GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion2_hard_rlock)
+
+#include "locking-selftest-wlock.h"
+GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion2_hard_wlock)
+#ifndef CONFIG_PREEMPT_RT
#include "locking-selftest-softirq.h"
-// GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion2_soft)
+#include "locking-selftest-rlock.h"
+GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion2_soft_rlock)
+
+#include "locking-selftest-wlock.h"
+GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion2_soft_wlock)
+#endif
+
+#undef E1
+#undef E2
+#undef E3
+/*
+ * read-lock / write-lock recursion that is unsafe.
+ *
+ * A is a ENABLED_*_READ lock
+ * B is a USED_IN_*_READ lock
+ *
+ * read_lock(A);
+ * write_lock(B);
+ * <interrupt>
+ * read_lock(B);
+ * write_lock(A); // if this one is read_lock(), no deadlock
+ */
+
+#define E1() \
+ \
+ IRQ_DISABLE(); \
+ WL(B); \
+ LOCK(A); \
+ UNLOCK(A); \
+ WU(B); \
+ IRQ_ENABLE();
+
+#define E2() \
+ \
+ RL(A); \
+ RU(A); \
+
+#define E3() \
+ \
+ IRQ_ENTER(); \
+ RL(B); \
+ RU(B); \
+ IRQ_EXIT();
+
+/*
+ * Generate 24 testcases:
+ */
+#include "locking-selftest-hardirq.h"
+#include "locking-selftest-rlock.h"
+GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion3_hard_rlock)
+
+#include "locking-selftest-wlock.h"
+GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion3_hard_wlock)
+
+#ifndef CONFIG_PREEMPT_RT
+#include "locking-selftest-softirq.h"
+#include "locking-selftest-rlock.h"
+GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion3_soft_rlock)
+
+#include "locking-selftest-wlock.h"
+GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion3_soft_wlock)
+#endif
#ifdef CONFIG_DEBUG_LOCK_ALLOC
# define I_SPINLOCK(x) lockdep_reset_lock(&lock_##x.dep_map)
+# define I_RAW_SPINLOCK(x) lockdep_reset_lock(&raw_lock_##x.dep_map)
# define I_RWLOCK(x) lockdep_reset_lock(&rwlock_##x.dep_map)
# define I_MUTEX(x) lockdep_reset_lock(&mutex_##x.dep_map)
# define I_RWSEM(x) lockdep_reset_lock(&rwsem_##x.dep_map)
# define I_WW(x) lockdep_reset_lock(&x.dep_map)
+# define I_LOCAL_LOCK(x) lockdep_reset_lock(this_cpu_ptr(&local_##x.dep_map))
#ifdef CONFIG_RT_MUTEXES
# define I_RTMUTEX(x) lockdep_reset_lock(&rtmutex_##x.dep_map)
#endif
#else
# define I_SPINLOCK(x)
+# define I_RAW_SPINLOCK(x)
# define I_RWLOCK(x)
# define I_MUTEX(x)
# define I_RWSEM(x)
# define I_WW(x)
+# define I_LOCAL_LOCK(x)
#endif
#ifndef I_RTMUTEX
@@ -1100,7 +1386,7 @@ GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion_soft)
#define I2(x) \
do { \
- raw_spin_lock_init(&lock_##x); \
+ spin_lock_init(&lock_##x); \
rwlock_init(&rwlock_##x); \
mutex_init(&mutex_##x); \
init_rwsem(&rwsem_##x); \
@@ -1116,9 +1402,16 @@ static void reset_locks(void)
I1(A); I1(B); I1(C); I1(D);
I1(X1); I1(X2); I1(Y1); I1(Y2); I1(Z1); I1(Z2);
I_WW(t); I_WW(t2); I_WW(o.base); I_WW(o2.base); I_WW(o3.base);
+ I_RAW_SPINLOCK(A); I_RAW_SPINLOCK(B);
+ I_LOCAL_LOCK(A);
+
lockdep_reset();
+
I2(A); I2(B); I2(C); I2(D);
init_shared_classes();
+ raw_spin_lock_init(&raw_lock_A);
+ raw_spin_lock_init(&raw_lock_B);
+ local_lock_init(this_cpu_ptr(&local_A));
ww_mutex_init(&o, &ww_lockdep); ww_mutex_init(&o2, &ww_lockdep); ww_mutex_init(&o3, &ww_lockdep);
memset(&t, 0, sizeof(t)); memset(&t2, 0, sizeof(t2));
@@ -1136,10 +1429,18 @@ static int unexpected_testcase_failures;
static void dotest(void (*testcase_fn)(void), int expected, int lockclass_mask)
{
- unsigned long saved_preempt_count = preempt_count();
+ int saved_preempt_count = preempt_count();
+#ifdef CONFIG_PREEMPT_RT
+#ifdef CONFIG_SMP
+ int saved_mgd_count = current->migration_disabled;
+#endif
+ int saved_rcu_count = current->rcu_read_lock_nesting;
+#endif
WARN_ON(irqs_disabled());
+ debug_locks_silent = !(debug_locks_verbose & lockclass_mask);
+
testcase_fn();
/*
* Filter out expected failures:
@@ -1160,7 +1461,7 @@ static void dotest(void (*testcase_fn)(void), int expected, int lockclass_mask)
}
testcase_total++;
- if (debug_locks_verbose)
+ if (debug_locks_verbose & lockclass_mask)
pr_cont(" lockclass mask: %x, debug_locks: %d, expected: %d\n",
lockclass_mask, debug_locks, expected);
/*
@@ -1168,6 +1469,18 @@ static void dotest(void (*testcase_fn)(void), int expected, int lockclass_mask)
* count, so restore it:
*/
preempt_count_set(saved_preempt_count);
+
+#ifdef CONFIG_PREEMPT_RT
+#ifdef CONFIG_SMP
+ while (current->migration_disabled > saved_mgd_count)
+ migrate_enable();
+#endif
+
+ while (current->rcu_read_lock_nesting > saved_rcu_count)
+ rcu_read_unlock();
+ WARN_ON_ONCE(current->rcu_read_lock_nesting < saved_rcu_count);
+#endif
+
#ifdef CONFIG_TRACE_IRQFLAGS
if (softirq_count())
current->softirqs_enabled = 0;
@@ -1199,6 +1512,19 @@ static inline void print_testname(const char *testname)
dotest(name##_##nr, FAILURE, LOCKTYPE_RWLOCK); \
pr_cont("\n");
+#define DO_TESTCASE_1RR(desc, name, nr) \
+ print_testname(desc"/"#nr); \
+ pr_cont(" |"); \
+ dotest(name##_##nr, SUCCESS, LOCKTYPE_RWLOCK); \
+ pr_cont("\n");
+
+#define DO_TESTCASE_1RRB(desc, name, nr) \
+ print_testname(desc"/"#nr); \
+ pr_cont(" |"); \
+ dotest(name##_##nr, FAILURE, LOCKTYPE_RWLOCK); \
+ pr_cont("\n");
+
+
#define DO_TESTCASE_3(desc, name, nr) \
print_testname(desc"/"#nr); \
dotest(name##_spin_##nr, FAILURE, LOCKTYPE_SPIN); \
@@ -1213,6 +1539,25 @@ static inline void print_testname(const char *testname)
dotest(name##_rlock_##nr, SUCCESS, LOCKTYPE_RWLOCK); \
pr_cont("\n");
+#define DO_TESTCASE_2RW(desc, name, nr) \
+ print_testname(desc"/"#nr); \
+ pr_cont(" |"); \
+ dotest(name##_wlock_##nr, FAILURE, LOCKTYPE_RWLOCK); \
+ dotest(name##_rlock_##nr, SUCCESS, LOCKTYPE_RWLOCK); \
+ pr_cont("\n");
+
+#define DO_TESTCASE_2x2RW(desc, name, nr) \
+ DO_TESTCASE_2RW("hard-"desc, name##_hard, nr) \
+ NON_RT(DO_TESTCASE_2RW("soft-"desc, name##_soft, nr)) \
+
+#define DO_TESTCASE_6x2x2RW(desc, name) \
+ DO_TESTCASE_2x2RW(desc, name, 123); \
+ DO_TESTCASE_2x2RW(desc, name, 132); \
+ DO_TESTCASE_2x2RW(desc, name, 213); \
+ DO_TESTCASE_2x2RW(desc, name, 231); \
+ DO_TESTCASE_2x2RW(desc, name, 312); \
+ DO_TESTCASE_2x2RW(desc, name, 321);
+
#define DO_TESTCASE_6(desc, name) \
print_testname(desc); \
dotest(name##_spin, FAILURE, LOCKTYPE_SPIN); \
@@ -1251,19 +1596,19 @@ static inline void print_testname(const char *testname)
#define DO_TESTCASE_2I(desc, name, nr) \
DO_TESTCASE_1("hard-"desc, name##_hard, nr); \
- DO_TESTCASE_1("soft-"desc, name##_soft, nr);
+ NON_RT(DO_TESTCASE_1("soft-"desc, name##_soft, nr));
#define DO_TESTCASE_2IB(desc, name, nr) \
DO_TESTCASE_1B("hard-"desc, name##_hard, nr); \
- DO_TESTCASE_1B("soft-"desc, name##_soft, nr);
+ NON_RT(DO_TESTCASE_1B("soft-"desc, name##_soft, nr));
#define DO_TESTCASE_6I(desc, name, nr) \
DO_TESTCASE_3("hard-"desc, name##_hard, nr); \
- DO_TESTCASE_3("soft-"desc, name##_soft, nr);
+ NON_RT(DO_TESTCASE_3("soft-"desc, name##_soft, nr));
#define DO_TESTCASE_6IRW(desc, name, nr) \
DO_TESTCASE_3RW("hard-"desc, name##_hard, nr); \
- DO_TESTCASE_3RW("soft-"desc, name##_soft, nr);
+ NON_RT(DO_TESTCASE_3RW("soft-"desc, name##_soft, nr));
#define DO_TESTCASE_2x3(desc, name) \
DO_TESTCASE_3(desc, name, 12); \
@@ -1289,6 +1634,22 @@ static inline void print_testname(const char *testname)
DO_TESTCASE_2IB(desc, name, 312); \
DO_TESTCASE_2IB(desc, name, 321);
+#define DO_TESTCASE_6x1RR(desc, name) \
+ DO_TESTCASE_1RR(desc, name, 123); \
+ DO_TESTCASE_1RR(desc, name, 132); \
+ DO_TESTCASE_1RR(desc, name, 213); \
+ DO_TESTCASE_1RR(desc, name, 231); \
+ DO_TESTCASE_1RR(desc, name, 312); \
+ DO_TESTCASE_1RR(desc, name, 321);
+
+#define DO_TESTCASE_6x1RRB(desc, name) \
+ DO_TESTCASE_1RRB(desc, name, 123); \
+ DO_TESTCASE_1RRB(desc, name, 132); \
+ DO_TESTCASE_1RRB(desc, name, 213); \
+ DO_TESTCASE_1RRB(desc, name, 231); \
+ DO_TESTCASE_1RRB(desc, name, 312); \
+ DO_TESTCASE_1RRB(desc, name, 321);
+
#define DO_TESTCASE_6x6(desc, name) \
DO_TESTCASE_6I(desc, name, 123); \
DO_TESTCASE_6I(desc, name, 132); \
@@ -1339,6 +1700,22 @@ static void ww_test_fail_acquire(void)
#endif
}
+#ifdef CONFIG_PREEMPT_RT
+#define ww_mutex_base_lock(b) rt_mutex_lock(b)
+#define ww_mutex_base_trylock(b) rt_mutex_trylock(b)
+#define ww_mutex_base_lock_nest_lock(b, b2) rt_mutex_lock_nest_lock(b, b2)
+#define ww_mutex_base_lock_interruptible(b) rt_mutex_lock_interruptible(b)
+#define ww_mutex_base_lock_killable(b) rt_mutex_lock_killable(b)
+#define ww_mutex_base_unlock(b) rt_mutex_unlock(b)
+#else
+#define ww_mutex_base_lock(b) mutex_lock(b)
+#define ww_mutex_base_trylock(b) mutex_trylock(b)
+#define ww_mutex_base_lock_nest_lock(b, b2) mutex_lock_nest_lock(b, b2)
+#define ww_mutex_base_lock_interruptible(b) mutex_lock_interruptible(b)
+#define ww_mutex_base_lock_killable(b) mutex_lock_killable(b)
+#define ww_mutex_base_unlock(b) mutex_unlock(b)
+#endif
+
static void ww_test_normal(void)
{
int ret;
@@ -1353,50 +1730,50 @@ static void ww_test_normal(void)
/* mutex_lock (and indirectly, mutex_lock_nested) */
o.ctx = (void *)~0UL;
- mutex_lock(&o.base);
- mutex_unlock(&o.base);
+ ww_mutex_base_lock(&o.base);
+ ww_mutex_base_unlock(&o.base);
WARN_ON(o.ctx != (void *)~0UL);
/* mutex_lock_interruptible (and *_nested) */
o.ctx = (void *)~0UL;
- ret = mutex_lock_interruptible(&o.base);
+ ret = ww_mutex_base_lock_interruptible(&o.base);
if (!ret)
- mutex_unlock(&o.base);
+ ww_mutex_base_unlock(&o.base);
else
WARN_ON(1);
WARN_ON(o.ctx != (void *)~0UL);
/* mutex_lock_killable (and *_nested) */
o.ctx = (void *)~0UL;
- ret = mutex_lock_killable(&o.base);
+ ret = ww_mutex_base_lock_killable(&o.base);
if (!ret)
- mutex_unlock(&o.base);
+ ww_mutex_base_unlock(&o.base);
else
WARN_ON(1);
WARN_ON(o.ctx != (void *)~0UL);
/* trylock, succeeding */
o.ctx = (void *)~0UL;
- ret = mutex_trylock(&o.base);
+ ret = ww_mutex_base_trylock(&o.base);
WARN_ON(!ret);
if (ret)
- mutex_unlock(&o.base);
+ ww_mutex_base_unlock(&o.base);
else
WARN_ON(1);
WARN_ON(o.ctx != (void *)~0UL);
/* trylock, failing */
o.ctx = (void *)~0UL;
- mutex_lock(&o.base);
- ret = mutex_trylock(&o.base);
+ ww_mutex_base_lock(&o.base);
+ ret = ww_mutex_base_trylock(&o.base);
WARN_ON(ret);
- mutex_unlock(&o.base);
+ ww_mutex_base_unlock(&o.base);
WARN_ON(o.ctx != (void *)~0UL);
/* nest_lock */
o.ctx = (void *)~0UL;
- mutex_lock_nest_lock(&o.base, &t);
- mutex_unlock(&o.base);
+ ww_mutex_base_lock_nest_lock(&o.base, &t);
+ ww_mutex_base_unlock(&o.base);
WARN_ON(o.ctx != (void *)~0UL);
}
@@ -1409,7 +1786,7 @@ static void ww_test_two_contexts(void)
static void ww_test_diff_class(void)
{
WWAI(&t);
-#ifdef CONFIG_DEBUG_MUTEXES
+#ifdef DEBUG_WW_MUTEXES
t.ww_class = NULL;
#endif
WWL(&o, &t);
@@ -1473,9 +1850,9 @@ static void ww_test_edeadlk_normal(void)
{
int ret;
- mutex_lock(&o2.base);
+ ww_mutex_base_lock(&o2.base);
o2.ctx = &t2;
- mutex_release(&o2.base.dep_map, 1, _THIS_IP_);
+ mutex_release(&o2.base.dep_map, _THIS_IP_);
WWAI(&t);
t2 = t;
@@ -1489,7 +1866,7 @@ static void ww_test_edeadlk_normal(void)
o2.ctx = NULL;
mutex_acquire(&o2.base.dep_map, 0, 1, _THIS_IP_);
- mutex_unlock(&o2.base);
+ ww_mutex_base_unlock(&o2.base);
WWU(&o);
WWL(&o2, &t);
@@ -1499,8 +1876,8 @@ static void ww_test_edeadlk_normal_slow(void)
{
int ret;
- mutex_lock(&o2.base);
- mutex_release(&o2.base.dep_map, 1, _THIS_IP_);
+ ww_mutex_base_lock(&o2.base);
+ mutex_release(&o2.base.dep_map, _THIS_IP_);
o2.ctx = &t2;
WWAI(&t);
@@ -1515,7 +1892,7 @@ static void ww_test_edeadlk_normal_slow(void)
o2.ctx = NULL;
mutex_acquire(&o2.base.dep_map, 0, 1, _THIS_IP_);
- mutex_unlock(&o2.base);
+ ww_mutex_base_unlock(&o2.base);
WWU(&o);
ww_mutex_lock_slow(&o2, &t);
@@ -1525,9 +1902,9 @@ static void ww_test_edeadlk_no_unlock(void)
{
int ret;
- mutex_lock(&o2.base);
+ ww_mutex_base_lock(&o2.base);
o2.ctx = &t2;
- mutex_release(&o2.base.dep_map, 1, _THIS_IP_);
+ mutex_release(&o2.base.dep_map, _THIS_IP_);
WWAI(&t);
t2 = t;
@@ -1541,7 +1918,7 @@ static void ww_test_edeadlk_no_unlock(void)
o2.ctx = NULL;
mutex_acquire(&o2.base.dep_map, 0, 1, _THIS_IP_);
- mutex_unlock(&o2.base);
+ ww_mutex_base_unlock(&o2.base);
WWL(&o2, &t);
}
@@ -1550,8 +1927,8 @@ static void ww_test_edeadlk_no_unlock_slow(void)
{
int ret;
- mutex_lock(&o2.base);
- mutex_release(&o2.base.dep_map, 1, _THIS_IP_);
+ ww_mutex_base_lock(&o2.base);
+ mutex_release(&o2.base.dep_map, _THIS_IP_);
o2.ctx = &t2;
WWAI(&t);
@@ -1566,7 +1943,7 @@ static void ww_test_edeadlk_no_unlock_slow(void)
o2.ctx = NULL;
mutex_acquire(&o2.base.dep_map, 0, 1, _THIS_IP_);
- mutex_unlock(&o2.base);
+ ww_mutex_base_unlock(&o2.base);
ww_mutex_lock_slow(&o2, &t);
}
@@ -1575,8 +1952,8 @@ static void ww_test_edeadlk_acquire_more(void)
{
int ret;
- mutex_lock(&o2.base);
- mutex_release(&o2.base.dep_map, 1, _THIS_IP_);
+ ww_mutex_base_lock(&o2.base);
+ mutex_release(&o2.base.dep_map, _THIS_IP_);
o2.ctx = &t2;
WWAI(&t);
@@ -1596,8 +1973,8 @@ static void ww_test_edeadlk_acquire_more_slow(void)
{
int ret;
- mutex_lock(&o2.base);
- mutex_release(&o2.base.dep_map, 1, _THIS_IP_);
+ ww_mutex_base_lock(&o2.base);
+ mutex_release(&o2.base.dep_map, _THIS_IP_);
o2.ctx = &t2;
WWAI(&t);
@@ -1617,12 +1994,12 @@ static void ww_test_edeadlk_acquire_more_edeadlk(void)
{
int ret;
- mutex_lock(&o2.base);
- mutex_release(&o2.base.dep_map, 1, _THIS_IP_);
+ ww_mutex_base_lock(&o2.base);
+ mutex_release(&o2.base.dep_map, _THIS_IP_);
o2.ctx = &t2;
- mutex_lock(&o3.base);
- mutex_release(&o3.base.dep_map, 1, _THIS_IP_);
+ ww_mutex_base_lock(&o3.base);
+ mutex_release(&o3.base.dep_map, _THIS_IP_);
o3.ctx = &t2;
WWAI(&t);
@@ -1643,12 +2020,12 @@ static void ww_test_edeadlk_acquire_more_edeadlk_slow(void)
{
int ret;
- mutex_lock(&o2.base);
- mutex_release(&o2.base.dep_map, 1, _THIS_IP_);
+ ww_mutex_base_lock(&o2.base);
+ mutex_release(&o2.base.dep_map, _THIS_IP_);
o2.ctx = &t2;
- mutex_lock(&o3.base);
- mutex_release(&o3.base.dep_map, 1, _THIS_IP_);
+ ww_mutex_base_lock(&o3.base);
+ mutex_release(&o3.base.dep_map, _THIS_IP_);
o3.ctx = &t2;
WWAI(&t);
@@ -1668,8 +2045,8 @@ static void ww_test_edeadlk_acquire_wrong(void)
{
int ret;
- mutex_lock(&o2.base);
- mutex_release(&o2.base.dep_map, 1, _THIS_IP_);
+ ww_mutex_base_lock(&o2.base);
+ mutex_release(&o2.base.dep_map, _THIS_IP_);
o2.ctx = &t2;
WWAI(&t);
@@ -1693,8 +2070,8 @@ static void ww_test_edeadlk_acquire_wrong_slow(void)
{
int ret;
- mutex_lock(&o2.base);
- mutex_release(&o2.base.dep_map, 1, _THIS_IP_);
+ ww_mutex_base_lock(&o2.base);
+ mutex_release(&o2.base.dep_map, _THIS_IP_);
o2.ctx = &t2;
WWAI(&t);
@@ -1716,10 +2093,23 @@ static void ww_test_edeadlk_acquire_wrong_slow(void)
static void ww_test_spin_nest_unlocked(void)
{
- raw_spin_lock_nest_lock(&lock_A, &o.base);
+ spin_lock_nest_lock(&lock_A, &o.base);
U(A);
}
+/* This is not a deadlock, because we have X1 to serialize Y1 and Y2 */
+static void ww_test_spin_nest_lock(void)
+{
+ spin_lock(&lock_X1);
+ spin_lock_nest_lock(&lock_Y1, &lock_X1);
+ spin_lock(&lock_A);
+ spin_lock_nest_lock(&lock_Y2, &lock_X1);
+ spin_unlock(&lock_A);
+ spin_unlock(&lock_Y2);
+ spin_unlock(&lock_Y1);
+ spin_unlock(&lock_X1);
+}
+
static void ww_test_unneeded_slow(void)
{
WWAI(&t);
@@ -1937,6 +2327,10 @@ static void ww_tests(void)
dotest(ww_test_spin_nest_unlocked, FAILURE, LOCKTYPE_WW);
pr_cont("\n");
+ print_testname("spinlock nest test");
+ dotest(ww_test_spin_nest_lock, SUCCESS, LOCKTYPE_WW);
+ pr_cont("\n");
+
printk(" -----------------------------------------------------\n");
printk(" |block | try |context|\n");
printk(" -----------------------------------------------------\n");
@@ -1966,6 +2360,435 @@ static void ww_tests(void)
pr_cont("\n");
}
+
+/*
+ * <in hardirq handler>
+ * read_lock(&A);
+ * <hardirq disable>
+ * spin_lock(&B);
+ * spin_lock(&B);
+ * read_lock(&A);
+ *
+ * is a deadlock.
+ */
+static void queued_read_lock_hardirq_RE_Er(void)
+{
+ HARDIRQ_ENTER();
+ read_lock(&rwlock_A);
+ LOCK(B);
+ UNLOCK(B);
+ read_unlock(&rwlock_A);
+ HARDIRQ_EXIT();
+
+ HARDIRQ_DISABLE();
+ LOCK(B);
+ read_lock(&rwlock_A);
+ read_unlock(&rwlock_A);
+ UNLOCK(B);
+ HARDIRQ_ENABLE();
+}
+
+/*
+ * <in hardirq handler>
+ * spin_lock(&B);
+ * <hardirq disable>
+ * read_lock(&A);
+ * read_lock(&A);
+ * spin_lock(&B);
+ *
+ * is not a deadlock.
+ */
+static void queued_read_lock_hardirq_ER_rE(void)
+{
+ HARDIRQ_ENTER();
+ LOCK(B);
+ read_lock(&rwlock_A);
+ read_unlock(&rwlock_A);
+ UNLOCK(B);
+ HARDIRQ_EXIT();
+
+ HARDIRQ_DISABLE();
+ read_lock(&rwlock_A);
+ LOCK(B);
+ UNLOCK(B);
+ read_unlock(&rwlock_A);
+ HARDIRQ_ENABLE();
+}
+
+/*
+ * <hardirq disable>
+ * spin_lock(&B);
+ * read_lock(&A);
+ * <in hardirq handler>
+ * spin_lock(&B);
+ * read_lock(&A);
+ *
+ * is a deadlock. Because the two read_lock()s are both non-recursive readers.
+ */
+static void queued_read_lock_hardirq_inversion(void)
+{
+
+ HARDIRQ_ENTER();
+ LOCK(B);
+ UNLOCK(B);
+ HARDIRQ_EXIT();
+
+ HARDIRQ_DISABLE();
+ LOCK(B);
+ read_lock(&rwlock_A);
+ read_unlock(&rwlock_A);
+ UNLOCK(B);
+ HARDIRQ_ENABLE();
+
+ read_lock(&rwlock_A);
+ read_unlock(&rwlock_A);
+}
+
+static void queued_read_lock_tests(void)
+{
+ printk(" --------------------------------------------------------------------------\n");
+ printk(" | queued read lock tests |\n");
+ printk(" ---------------------------\n");
+ print_testname("hardirq read-lock/lock-read");
+ dotest(queued_read_lock_hardirq_RE_Er, FAILURE, LOCKTYPE_RWLOCK);
+ pr_cont("\n");
+
+ print_testname("hardirq lock-read/read-lock");
+ dotest(queued_read_lock_hardirq_ER_rE, SUCCESS, LOCKTYPE_RWLOCK);
+ pr_cont("\n");
+
+ print_testname("hardirq inversion");
+ dotest(queued_read_lock_hardirq_inversion, FAILURE, LOCKTYPE_RWLOCK);
+ pr_cont("\n");
+}
+
+static void fs_reclaim_correct_nesting(void)
+{
+ fs_reclaim_acquire(GFP_KERNEL);
+ might_alloc(GFP_NOFS);
+ fs_reclaim_release(GFP_KERNEL);
+}
+
+static void fs_reclaim_wrong_nesting(void)
+{
+ fs_reclaim_acquire(GFP_KERNEL);
+ might_alloc(GFP_KERNEL);
+ fs_reclaim_release(GFP_KERNEL);
+}
+
+static void fs_reclaim_protected_nesting(void)
+{
+ unsigned int flags;
+
+ fs_reclaim_acquire(GFP_KERNEL);
+ flags = memalloc_nofs_save();
+ might_alloc(GFP_KERNEL);
+ memalloc_nofs_restore(flags);
+ fs_reclaim_release(GFP_KERNEL);
+}
+
+static void fs_reclaim_tests(void)
+{
+ printk(" --------------------\n");
+ printk(" | fs_reclaim tests |\n");
+ printk(" --------------------\n");
+
+ print_testname("correct nesting");
+ dotest(fs_reclaim_correct_nesting, SUCCESS, 0);
+ pr_cont("\n");
+
+ print_testname("wrong nesting");
+ dotest(fs_reclaim_wrong_nesting, FAILURE, 0);
+ pr_cont("\n");
+
+ print_testname("protected nesting");
+ dotest(fs_reclaim_protected_nesting, SUCCESS, 0);
+ pr_cont("\n");
+}
+
+/* Defines guard classes to create contexts */
+DEFINE_LOCK_GUARD_0(HARDIRQ, HARDIRQ_ENTER(), HARDIRQ_EXIT())
+DEFINE_LOCK_GUARD_0(NOTTHREADED_HARDIRQ,
+ do {
+ local_irq_disable();
+ __irq_enter();
+ WARN_ON(!in_irq());
+ } while(0), HARDIRQ_EXIT())
+DEFINE_LOCK_GUARD_0(SOFTIRQ, SOFTIRQ_ENTER(), SOFTIRQ_EXIT())
+
+/* Define RCU guards, should go away when RCU has its own guard definitions */
+DEFINE_LOCK_GUARD_0(RCU, rcu_read_lock(), rcu_read_unlock())
+DEFINE_LOCK_GUARD_0(RCU_BH, rcu_read_lock_bh(), rcu_read_unlock_bh())
+DEFINE_LOCK_GUARD_0(RCU_SCHED, rcu_read_lock_sched(), rcu_read_unlock_sched())
+
+
+#define GENERATE_2_CONTEXT_TESTCASE(outer, outer_lock, inner, inner_lock) \
+ \
+static void __maybe_unused inner##_in_##outer(void) \
+{ \
+ /* Relies the reversed clean-up ordering: inner first */ \
+ guard(outer)(outer_lock); \
+ guard(inner)(inner_lock); \
+}
+
+/*
+ * wait contexts (considering PREEMPT_RT)
+ *
+ * o: inner is allowed in outer
+ * x: inner is disallowed in outer
+ *
+ * \ inner | RCU | RAW_SPIN | SPIN | MUTEX
+ * outer \ | | | |
+ * ---------------+-------+----------+------+-------
+ * HARDIRQ | o | o | o | x
+ * ---------------+-------+----------+------+-------
+ * NOTTHREADED_IRQ| o | o | x | x
+ * ---------------+-------+----------+------+-------
+ * SOFTIRQ | o | o | o | x
+ * ---------------+-------+----------+------+-------
+ * RCU | o | o | o | x
+ * ---------------+-------+----------+------+-------
+ * RCU_BH | o | o | o | x
+ * ---------------+-------+----------+------+-------
+ * RCU_SCHED | o | o | x | x
+ * ---------------+-------+----------+------+-------
+ * RAW_SPIN | o | o | x | x
+ * ---------------+-------+----------+------+-------
+ * SPIN | o | o | o | x
+ * ---------------+-------+----------+------+-------
+ * MUTEX | o | o | o | o
+ * ---------------+-------+----------+------+-------
+ */
+
+#define GENERATE_2_CONTEXT_TESTCASE_FOR_ALL_OUTER(inner, inner_lock) \
+GENERATE_2_CONTEXT_TESTCASE(HARDIRQ, , inner, inner_lock) \
+GENERATE_2_CONTEXT_TESTCASE(NOTTHREADED_HARDIRQ, , inner, inner_lock) \
+GENERATE_2_CONTEXT_TESTCASE(SOFTIRQ, , inner, inner_lock) \
+GENERATE_2_CONTEXT_TESTCASE(RCU, , inner, inner_lock) \
+GENERATE_2_CONTEXT_TESTCASE(RCU_BH, , inner, inner_lock) \
+GENERATE_2_CONTEXT_TESTCASE(RCU_SCHED, , inner, inner_lock) \
+GENERATE_2_CONTEXT_TESTCASE(raw_spinlock, &raw_lock_A, inner, inner_lock) \
+GENERATE_2_CONTEXT_TESTCASE(spinlock, &lock_A, inner, inner_lock) \
+GENERATE_2_CONTEXT_TESTCASE(mutex, &mutex_A, inner, inner_lock)
+
+GENERATE_2_CONTEXT_TESTCASE_FOR_ALL_OUTER(RCU, )
+GENERATE_2_CONTEXT_TESTCASE_FOR_ALL_OUTER(raw_spinlock, &raw_lock_B)
+GENERATE_2_CONTEXT_TESTCASE_FOR_ALL_OUTER(spinlock, &lock_B)
+GENERATE_2_CONTEXT_TESTCASE_FOR_ALL_OUTER(mutex, &mutex_B)
+
+/* the outer context allows all kinds of preemption */
+#define DO_CONTEXT_TESTCASE_OUTER_PREEMPTIBLE(outer) \
+ dotest(RCU_in_##outer, SUCCESS, LOCKTYPE_RWLOCK); \
+ dotest(raw_spinlock_in_##outer, SUCCESS, LOCKTYPE_SPIN); \
+ dotest(spinlock_in_##outer, SUCCESS, LOCKTYPE_SPIN); \
+ dotest(mutex_in_##outer, SUCCESS, LOCKTYPE_MUTEX); \
+
+/*
+ * the outer context only allows the preemption introduced by spinlock_t (which
+ * is a sleepable lock for PREEMPT_RT)
+ */
+#define DO_CONTEXT_TESTCASE_OUTER_LIMITED_PREEMPTIBLE(outer) \
+ dotest(RCU_in_##outer, SUCCESS, LOCKTYPE_RWLOCK); \
+ dotest(raw_spinlock_in_##outer, SUCCESS, LOCKTYPE_SPIN); \
+ dotest(spinlock_in_##outer, SUCCESS, LOCKTYPE_SPIN); \
+ dotest(mutex_in_##outer, FAILURE, LOCKTYPE_MUTEX); \
+
+/* the outer doesn't allows any kind of preemption */
+#define DO_CONTEXT_TESTCASE_OUTER_NOT_PREEMPTIBLE(outer) \
+ dotest(RCU_in_##outer, SUCCESS, LOCKTYPE_RWLOCK); \
+ dotest(raw_spinlock_in_##outer, SUCCESS, LOCKTYPE_SPIN); \
+ dotest(spinlock_in_##outer, FAILURE, LOCKTYPE_SPIN); \
+ dotest(mutex_in_##outer, FAILURE, LOCKTYPE_MUTEX); \
+
+static void wait_context_tests(void)
+{
+ printk(" --------------------------------------------------------------------------\n");
+ printk(" | wait context tests |\n");
+ printk(" --------------------------------------------------------------------------\n");
+ printk(" | rcu | raw | spin |mutex |\n");
+ printk(" --------------------------------------------------------------------------\n");
+ print_testname("in hardirq context");
+ DO_CONTEXT_TESTCASE_OUTER_LIMITED_PREEMPTIBLE(HARDIRQ);
+ pr_cont("\n");
+
+ print_testname("in hardirq context (not threaded)");
+ DO_CONTEXT_TESTCASE_OUTER_NOT_PREEMPTIBLE(NOTTHREADED_HARDIRQ);
+ pr_cont("\n");
+
+ print_testname("in softirq context");
+ DO_CONTEXT_TESTCASE_OUTER_LIMITED_PREEMPTIBLE(SOFTIRQ);
+ pr_cont("\n");
+
+ print_testname("in RCU context");
+ DO_CONTEXT_TESTCASE_OUTER_LIMITED_PREEMPTIBLE(RCU);
+ pr_cont("\n");
+
+ print_testname("in RCU-bh context");
+ DO_CONTEXT_TESTCASE_OUTER_LIMITED_PREEMPTIBLE(RCU_BH);
+ pr_cont("\n");
+
+ print_testname("in RCU-sched context");
+ DO_CONTEXT_TESTCASE_OUTER_NOT_PREEMPTIBLE(RCU_SCHED);
+ pr_cont("\n");
+
+ print_testname("in RAW_SPINLOCK context");
+ DO_CONTEXT_TESTCASE_OUTER_NOT_PREEMPTIBLE(raw_spinlock);
+ pr_cont("\n");
+
+ print_testname("in SPINLOCK context");
+ DO_CONTEXT_TESTCASE_OUTER_LIMITED_PREEMPTIBLE(spinlock);
+ pr_cont("\n");
+
+ print_testname("in MUTEX context");
+ DO_CONTEXT_TESTCASE_OUTER_PREEMPTIBLE(mutex);
+ pr_cont("\n");
+}
+
+static void local_lock_2(void)
+{
+ local_lock(&local_A); /* IRQ-ON */
+ local_unlock(&local_A);
+
+ HARDIRQ_ENTER();
+ spin_lock(&lock_A); /* IN-IRQ */
+ spin_unlock(&lock_A);
+ HARDIRQ_EXIT()
+
+ HARDIRQ_DISABLE();
+ spin_lock(&lock_A);
+ local_lock(&local_A); /* IN-IRQ <-> IRQ-ON cycle, false */
+ local_unlock(&local_A);
+ spin_unlock(&lock_A);
+ HARDIRQ_ENABLE();
+}
+
+static void local_lock_3A(void)
+{
+ local_lock(&local_A); /* IRQ-ON */
+ spin_lock(&lock_B); /* IRQ-ON */
+ spin_unlock(&lock_B);
+ local_unlock(&local_A);
+
+ HARDIRQ_ENTER();
+ spin_lock(&lock_A); /* IN-IRQ */
+ spin_unlock(&lock_A);
+ HARDIRQ_EXIT()
+
+ HARDIRQ_DISABLE();
+ spin_lock(&lock_A);
+ local_lock(&local_A); /* IN-IRQ <-> IRQ-ON cycle only if we count local_lock(), false */
+ local_unlock(&local_A);
+ spin_unlock(&lock_A);
+ HARDIRQ_ENABLE();
+}
+
+static void local_lock_3B(void)
+{
+ local_lock(&local_A); /* IRQ-ON */
+ spin_lock(&lock_B); /* IRQ-ON */
+ spin_unlock(&lock_B);
+ local_unlock(&local_A);
+
+ HARDIRQ_ENTER();
+ spin_lock(&lock_A); /* IN-IRQ */
+ spin_unlock(&lock_A);
+ HARDIRQ_EXIT()
+
+ HARDIRQ_DISABLE();
+ spin_lock(&lock_A);
+ local_lock(&local_A); /* IN-IRQ <-> IRQ-ON cycle only if we count local_lock(), false */
+ local_unlock(&local_A);
+ spin_unlock(&lock_A);
+ HARDIRQ_ENABLE();
+
+ HARDIRQ_DISABLE();
+ spin_lock(&lock_A);
+ spin_lock(&lock_B); /* IN-IRQ <-> IRQ-ON cycle, true */
+ spin_unlock(&lock_B);
+ spin_unlock(&lock_A);
+ HARDIRQ_DISABLE();
+
+}
+
+static void local_lock_tests(void)
+{
+ printk(" --------------------------------------------------------------------------\n");
+ printk(" | local_lock tests |\n");
+ printk(" ---------------------\n");
+
+ print_testname("local_lock inversion 2");
+ dotest(local_lock_2, SUCCESS, LOCKTYPE_LL);
+ pr_cont("\n");
+
+ print_testname("local_lock inversion 3A");
+ dotest(local_lock_3A, SUCCESS, LOCKTYPE_LL);
+ pr_cont("\n");
+
+ print_testname("local_lock inversion 3B");
+ dotest(local_lock_3B, FAILURE, LOCKTYPE_LL);
+ pr_cont("\n");
+}
+
+static void hardirq_deadlock_softirq_not_deadlock(void)
+{
+ /* mutex_A is hardirq-unsafe and softirq-unsafe */
+ /* mutex_A -> lock_C */
+ mutex_lock(&mutex_A);
+ HARDIRQ_DISABLE();
+ spin_lock(&lock_C);
+ spin_unlock(&lock_C);
+ HARDIRQ_ENABLE();
+ mutex_unlock(&mutex_A);
+
+ /* lock_A is hardirq-safe */
+ HARDIRQ_ENTER();
+ spin_lock(&lock_A);
+ spin_unlock(&lock_A);
+ HARDIRQ_EXIT();
+
+ /* lock_A -> lock_B */
+ HARDIRQ_DISABLE();
+ spin_lock(&lock_A);
+ spin_lock(&lock_B);
+ spin_unlock(&lock_B);
+ spin_unlock(&lock_A);
+ HARDIRQ_ENABLE();
+
+ /* lock_B -> lock_C */
+ HARDIRQ_DISABLE();
+ spin_lock(&lock_B);
+ spin_lock(&lock_C);
+ spin_unlock(&lock_C);
+ spin_unlock(&lock_B);
+ HARDIRQ_ENABLE();
+
+ /* lock_D is softirq-safe */
+ SOFTIRQ_ENTER();
+ spin_lock(&lock_D);
+ spin_unlock(&lock_D);
+ SOFTIRQ_EXIT();
+
+ /* And lock_D is hardirq-unsafe */
+ SOFTIRQ_DISABLE();
+ spin_lock(&lock_D);
+ spin_unlock(&lock_D);
+ SOFTIRQ_ENABLE();
+
+ /*
+ * mutex_A -> lock_C -> lock_D is softirq-unsafe -> softirq-safe, not
+ * deadlock.
+ *
+ * lock_A -> lock_B -> lock_C -> lock_D is hardirq-safe ->
+ * hardirq-unsafe, deadlock.
+ */
+ HARDIRQ_DISABLE();
+ spin_lock(&lock_C);
+ spin_lock(&lock_D);
+ spin_unlock(&lock_D);
+ spin_unlock(&lock_C);
+ HARDIRQ_ENABLE();
+}
+
void locking_selftest(void)
{
/*
@@ -1979,16 +2802,20 @@ void locking_selftest(void)
}
/*
+ * treats read_lock() as recursive read locks for testing purpose
+ */
+ force_read_lock_recursive = 1;
+
+ /*
* Run the testsuite:
*/
printk("------------------------\n");
printk("| Locking API testsuite:\n");
printk("----------------------------------------------------------------------------\n");
- printk(" | spin |wlock |rlock |mutex | wsem | rsem |\n");
+ printk(" | spin |wlock |rlock |mutex | wsem | rsem |rtmutex\n");
printk(" --------------------------------------------------------------------------\n");
init_shared_classes();
- debug_locks_silent = !debug_locks_verbose;
lockdep_set_selftest_task(current);
DO_TESTCASE_6R("A-A deadlock", AA);
@@ -2033,14 +2860,6 @@ void locking_selftest(void)
print_testname("mixed read-lock/lock-write ABBA");
pr_cont(" |");
dotest(rlock_ABBA1, FAILURE, LOCKTYPE_RWLOCK);
-#ifdef CONFIG_PROVE_LOCKING
- /*
- * Lockdep does indeed fail here, but there's nothing we can do about
- * that now. Don't kill lockdep for it.
- */
- unexpected_testcase_failures--;
-#endif
-
pr_cont(" |");
dotest(rwsem_ABBA1, FAILURE, LOCKTYPE_RWSEM);
@@ -2056,23 +2875,51 @@ void locking_selftest(void)
pr_cont(" |");
dotest(rwsem_ABBA3, FAILURE, LOCKTYPE_RWSEM);
- printk(" --------------------------------------------------------------------------\n");
+ print_testname("chain cached mixed R-L/L-W ABBA");
+ pr_cont(" |");
+ dotest(rlock_chaincache_ABBA1, FAILURE, LOCKTYPE_RWLOCK);
+
+ DO_TESTCASE_6x1RRB("rlock W1R2/W2R3/W3R1", W1R2_W2R3_W3R1);
+ DO_TESTCASE_6x1RRB("rlock W1W2/R2R3/W3R1", W1W2_R2R3_W3R1);
+ DO_TESTCASE_6x1RR("rlock W1W2/R2R3/R3W1", W1W2_R2R3_R3W1);
+ DO_TESTCASE_6x1RR("rlock W1R2/R2R3/W3W1", W1R2_R2R3_W3W1);
+ printk(" --------------------------------------------------------------------------\n");
/*
* irq-context testcases:
*/
DO_TESTCASE_2x6("irqs-on + irq-safe-A", irqsafe1);
- DO_TESTCASE_2x3("sirq-safe-A => hirqs-on", irqsafe2A);
+ NON_RT(DO_TESTCASE_2x3("sirq-safe-A => hirqs-on", irqsafe2A));
DO_TESTCASE_2x6("safe-A + irqs-on", irqsafe2B);
DO_TESTCASE_6x6("safe-A + unsafe-B #1", irqsafe3);
DO_TESTCASE_6x6("safe-A + unsafe-B #2", irqsafe4);
DO_TESTCASE_6x6RW("irq lock-inversion", irq_inversion);
- DO_TESTCASE_6x2("irq read-recursion", irq_read_recursion);
-// DO_TESTCASE_6x2B("irq read-recursion #2", irq_read_recursion2);
+ DO_TESTCASE_6x2x2RW("irq read-recursion", irq_read_recursion);
+ DO_TESTCASE_6x2x2RW("irq read-recursion #2", irq_read_recursion2);
+ DO_TESTCASE_6x2x2RW("irq read-recursion #3", irq_read_recursion3);
ww_tests();
+ force_read_lock_recursive = 0;
+ /*
+ * queued_read_lock() specific test cases can be put here
+ */
+ if (IS_ENABLED(CONFIG_QUEUED_RWLOCKS))
+ queued_read_lock_tests();
+
+ fs_reclaim_tests();
+
+ /* Wait context test cases that are specific for RAW_LOCK_NESTING */
+ if (IS_ENABLED(CONFIG_PROVE_RAW_LOCK_NESTING))
+ wait_context_tests();
+
+ local_lock_tests();
+
+ print_testname("hardirq_unsafe_softirq_safe");
+ dotest(hardirq_deadlock_softirq_not_deadlock, FAILURE, LOCKTYPE_SPECIAL);
+ pr_cont("\n");
+
if (unexpected_testcase_failures) {
printk("-----------------------------------------------------------------\n");
debug_locks = 0;
diff --git a/lib/lockref.c b/lib/lockref.c
index 5b34bbd3eba8..2afe4c5d8919 100644
--- a/lib/lockref.c
+++ b/lib/lockref.c
@@ -14,17 +14,15 @@
BUILD_BUG_ON(sizeof(old) != 8); \
old.lock_count = READ_ONCE(lockref->lock_count); \
while (likely(arch_spin_value_unlocked(old.lock.rlock.raw_lock))) { \
- struct lockref new = old, prev = old; \
+ struct lockref new = old; \
CODE \
- old.lock_count = cmpxchg64_relaxed(&lockref->lock_count, \
- old.lock_count, \
- new.lock_count); \
- if (likely(old.lock_count == prev.lock_count)) { \
+ if (likely(try_cmpxchg64_relaxed(&lockref->lock_count, \
+ &old.lock_count, \
+ new.lock_count))) { \
SUCCESS; \
} \
if (!--retry) \
break; \
- cpu_relax(); \
} \
} while (0)
@@ -112,31 +110,6 @@ int lockref_put_not_zero(struct lockref *lockref)
EXPORT_SYMBOL(lockref_put_not_zero);
/**
- * lockref_get_or_lock - Increments count unless the count is 0 or dead
- * @lockref: pointer to lockref structure
- * Return: 1 if count updated successfully or 0 if count was zero
- * and we got the lock instead.
- */
-int lockref_get_or_lock(struct lockref *lockref)
-{
- CMPXCHG_LOOP(
- new.count++;
- if (old.count <= 0)
- break;
- ,
- return 1;
- );
-
- spin_lock(&lockref->lock);
- if (lockref->count <= 0)
- return 0;
- lockref->count++;
- spin_unlock(&lockref->lock);
- return 1;
-}
-EXPORT_SYMBOL(lockref_get_or_lock);
-
-/**
* lockref_put_return - Decrement reference count if possible
* @lockref: pointer to lockref structure
*
diff --git a/lib/logic_iomem.c b/lib/logic_iomem.c
new file mode 100644
index 000000000000..b247d412ddef
--- /dev/null
+++ b/lib/logic_iomem.c
@@ -0,0 +1,321 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 Intel Corporation
+ * Author: Johannes Berg <johannes@sipsolutions.net>
+ */
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/logic_iomem.h>
+#include <asm/io.h>
+
+struct logic_iomem_region {
+ const struct resource *res;
+ const struct logic_iomem_region_ops *ops;
+ struct list_head list;
+};
+
+struct logic_iomem_area {
+ const struct logic_iomem_ops *ops;
+ void *priv;
+};
+
+#define AREA_SHIFT 24
+#define MAX_AREA_SIZE (1 << AREA_SHIFT)
+#define MAX_AREAS ((1U << 31) / MAX_AREA_SIZE)
+#define AREA_BITS ((MAX_AREAS - 1) << AREA_SHIFT)
+#define AREA_MASK (MAX_AREA_SIZE - 1)
+#ifdef CONFIG_64BIT
+#define IOREMAP_BIAS 0xDEAD000000000000UL
+#define IOREMAP_MASK 0xFFFFFFFF00000000UL
+#else
+#define IOREMAP_BIAS 0x80000000UL
+#define IOREMAP_MASK 0x80000000UL
+#endif
+
+static DEFINE_MUTEX(regions_mtx);
+static LIST_HEAD(regions_list);
+static struct logic_iomem_area mapped_areas[MAX_AREAS];
+
+int logic_iomem_add_region(struct resource *resource,
+ const struct logic_iomem_region_ops *ops)
+{
+ struct logic_iomem_region *rreg;
+ int err;
+
+ if (WARN_ON(!resource || !ops))
+ return -EINVAL;
+
+ if (WARN_ON((resource->flags & IORESOURCE_TYPE_BITS) != IORESOURCE_MEM))
+ return -EINVAL;
+
+ rreg = kzalloc(sizeof(*rreg), GFP_KERNEL);
+ if (!rreg)
+ return -ENOMEM;
+
+ err = request_resource(&iomem_resource, resource);
+ if (err) {
+ kfree(rreg);
+ return -ENOMEM;
+ }
+
+ mutex_lock(&regions_mtx);
+ rreg->res = resource;
+ rreg->ops = ops;
+ list_add_tail(&rreg->list, &regions_list);
+ mutex_unlock(&regions_mtx);
+
+ return 0;
+}
+EXPORT_SYMBOL(logic_iomem_add_region);
+
+#ifndef CONFIG_INDIRECT_IOMEM_FALLBACK
+static void __iomem *real_ioremap(phys_addr_t offset, size_t size)
+{
+ WARN(1, "invalid ioremap(0x%llx, 0x%zx)\n",
+ (unsigned long long)offset, size);
+ return NULL;
+}
+
+static void real_iounmap(volatile void __iomem *addr)
+{
+ WARN(1, "invalid iounmap for addr 0x%llx\n",
+ (unsigned long long)(uintptr_t __force)addr);
+}
+#endif /* CONFIG_INDIRECT_IOMEM_FALLBACK */
+
+void __iomem *ioremap(phys_addr_t offset, size_t size)
+{
+ void __iomem *ret = NULL;
+ struct logic_iomem_region *rreg, *found = NULL;
+ int i;
+
+ mutex_lock(&regions_mtx);
+ list_for_each_entry(rreg, &regions_list, list) {
+ if (rreg->res->start > offset)
+ continue;
+ if (rreg->res->end < offset + size - 1)
+ continue;
+ found = rreg;
+ break;
+ }
+
+ if (!found)
+ goto out;
+
+ for (i = 0; i < MAX_AREAS; i++) {
+ long offs;
+
+ if (mapped_areas[i].ops)
+ continue;
+
+ offs = rreg->ops->map(offset - found->res->start,
+ size, &mapped_areas[i].ops,
+ &mapped_areas[i].priv);
+ if (offs < 0) {
+ mapped_areas[i].ops = NULL;
+ break;
+ }
+
+ if (WARN_ON(!mapped_areas[i].ops)) {
+ mapped_areas[i].ops = NULL;
+ break;
+ }
+
+ ret = (void __iomem *)(IOREMAP_BIAS + (i << AREA_SHIFT) + offs);
+ break;
+ }
+out:
+ mutex_unlock(&regions_mtx);
+ if (ret)
+ return ret;
+ return real_ioremap(offset, size);
+}
+EXPORT_SYMBOL(ioremap);
+
+static inline struct logic_iomem_area *
+get_area(const volatile void __iomem *addr)
+{
+ unsigned long a = (unsigned long)addr;
+ unsigned int idx;
+
+ if (WARN_ON((a & IOREMAP_MASK) != IOREMAP_BIAS))
+ return NULL;
+
+ idx = (a & AREA_BITS) >> AREA_SHIFT;
+
+ if (mapped_areas[idx].ops)
+ return &mapped_areas[idx];
+
+ return NULL;
+}
+
+void iounmap(volatile void __iomem *addr)
+{
+ struct logic_iomem_area *area = get_area(addr);
+
+ if (!area) {
+ real_iounmap(addr);
+ return;
+ }
+
+ if (area->ops->unmap)
+ area->ops->unmap(area->priv);
+
+ mutex_lock(&regions_mtx);
+ area->ops = NULL;
+ area->priv = NULL;
+ mutex_unlock(&regions_mtx);
+}
+EXPORT_SYMBOL(iounmap);
+
+#ifndef CONFIG_INDIRECT_IOMEM_FALLBACK
+#define MAKE_FALLBACK(op, sz) \
+static u##sz real_raw_read ## op(const volatile void __iomem *addr) \
+{ \
+ WARN(1, "Invalid read" #op " at address %llx\n", \
+ (unsigned long long)(uintptr_t __force)addr); \
+ return (u ## sz)~0ULL; \
+} \
+ \
+static void real_raw_write ## op(u ## sz val, \
+ volatile void __iomem *addr) \
+{ \
+ WARN(1, "Invalid writeq" #op " of 0x%llx at address %llx\n", \
+ (unsigned long long)val, \
+ (unsigned long long)(uintptr_t __force)addr);\
+} \
+
+MAKE_FALLBACK(b, 8);
+MAKE_FALLBACK(w, 16);
+MAKE_FALLBACK(l, 32);
+#ifdef CONFIG_64BIT
+MAKE_FALLBACK(q, 64);
+#endif
+
+static void real_memset_io(volatile void __iomem *addr, int value, size_t size)
+{
+ WARN(1, "Invalid memset_io at address 0x%llx\n",
+ (unsigned long long)(uintptr_t __force)addr);
+}
+
+static void real_memcpy_fromio(void *buffer, const volatile void __iomem *addr,
+ size_t size)
+{
+ WARN(1, "Invalid memcpy_fromio at address 0x%llx\n",
+ (unsigned long long)(uintptr_t __force)addr);
+
+ memset(buffer, 0xff, size);
+}
+
+static void real_memcpy_toio(volatile void __iomem *addr, const void *buffer,
+ size_t size)
+{
+ WARN(1, "Invalid memcpy_toio at address 0x%llx\n",
+ (unsigned long long)(uintptr_t __force)addr);
+}
+#endif /* CONFIG_INDIRECT_IOMEM_FALLBACK */
+
+#define MAKE_OP(op, sz) \
+u##sz __raw_read ## op(const volatile void __iomem *addr) \
+{ \
+ struct logic_iomem_area *area = get_area(addr); \
+ \
+ if (!area) \
+ return real_raw_read ## op(addr); \
+ \
+ return (u ## sz) area->ops->read(area->priv, \
+ (unsigned long)addr & AREA_MASK,\
+ sz / 8); \
+} \
+EXPORT_SYMBOL(__raw_read ## op); \
+ \
+void __raw_write ## op(u ## sz val, volatile void __iomem *addr) \
+{ \
+ struct logic_iomem_area *area = get_area(addr); \
+ \
+ if (!area) { \
+ real_raw_write ## op(val, addr); \
+ return; \
+ } \
+ \
+ area->ops->write(area->priv, \
+ (unsigned long)addr & AREA_MASK, \
+ sz / 8, val); \
+} \
+EXPORT_SYMBOL(__raw_write ## op)
+
+MAKE_OP(b, 8);
+MAKE_OP(w, 16);
+MAKE_OP(l, 32);
+#ifdef CONFIG_64BIT
+MAKE_OP(q, 64);
+#endif
+
+void memset_io(volatile void __iomem *addr, int value, size_t size)
+{
+ struct logic_iomem_area *area = get_area(addr);
+ unsigned long offs, start;
+
+ if (!area) {
+ real_memset_io(addr, value, size);
+ return;
+ }
+
+ start = (unsigned long)addr & AREA_MASK;
+
+ if (area->ops->set) {
+ area->ops->set(area->priv, start, value, size);
+ return;
+ }
+
+ for (offs = 0; offs < size; offs++)
+ area->ops->write(area->priv, start + offs, 1, value);
+}
+EXPORT_SYMBOL(memset_io);
+
+void memcpy_fromio(void *buffer, const volatile void __iomem *addr,
+ size_t size)
+{
+ struct logic_iomem_area *area = get_area(addr);
+ u8 *buf = buffer;
+ unsigned long offs, start;
+
+ if (!area) {
+ real_memcpy_fromio(buffer, addr, size);
+ return;
+ }
+
+ start = (unsigned long)addr & AREA_MASK;
+
+ if (area->ops->copy_from) {
+ area->ops->copy_from(area->priv, buffer, start, size);
+ return;
+ }
+
+ for (offs = 0; offs < size; offs++)
+ buf[offs] = area->ops->read(area->priv, start + offs, 1);
+}
+EXPORT_SYMBOL(memcpy_fromio);
+
+void memcpy_toio(volatile void __iomem *addr, const void *buffer, size_t size)
+{
+ struct logic_iomem_area *area = get_area(addr);
+ const u8 *buf = buffer;
+ unsigned long offs, start;
+
+ if (!area) {
+ real_memcpy_toio(addr, buffer, size);
+ return;
+ }
+
+ start = (unsigned long)addr & AREA_MASK;
+
+ if (area->ops->copy_to) {
+ area->ops->copy_to(area->priv, start, buffer, size);
+ return;
+ }
+
+ for (offs = 0; offs < size; offs++)
+ area->ops->write(area->priv, start + offs, 1, buf[offs]);
+}
+EXPORT_SYMBOL(memcpy_toio);
diff --git a/lib/logic_pio.c b/lib/logic_pio.c
index 905027574e5d..2ea564a40064 100644
--- a/lib/logic_pio.c
+++ b/lib/logic_pio.c
@@ -3,6 +3,7 @@
* Copyright (C) 2017 HiSilicon Limited, All Rights Reserved.
* Author: Gabriele Paoloni <gabriele.paoloni@huawei.com>
* Author: Zhichang Yuan <yuanzhichang@hisilicon.com>
+ * Author: John Garry <john.garry@huawei.com>
*/
#define pr_fmt(fmt) "LOGIC PIO: " fmt
@@ -19,14 +20,13 @@
static LIST_HEAD(io_range_list);
static DEFINE_MUTEX(io_range_mutex);
-/* Consider a kernel general helper for this */
-#define in_range(b, first, len) ((b) >= (first) && (b) < (first) + (len))
-
/**
* logic_pio_register_range - register logical PIO range for a host
* @new_range: pointer to the IO range to be registered.
*
* Returns 0 on success, the error code in case of failure.
+ * If the range already exists, -EEXIST will be returned, which should be
+ * considered a success.
*
* Register a new IO range node in the IO range list.
*/
@@ -39,7 +39,8 @@ int logic_pio_register_range(struct logic_pio_hwaddr *new_range)
resource_size_t iio_sz = MMIO_UPPER_LIMIT;
int ret = 0;
- if (!new_range || !new_range->fwnode || !new_range->size)
+ if (!new_range || !new_range->fwnode || !new_range->size ||
+ (new_range->flags == LOGIC_PIO_INDIRECT && !new_range->ops))
return -EINVAL;
start = new_range->hw_start;
@@ -49,6 +50,7 @@ int logic_pio_register_range(struct logic_pio_hwaddr *new_range)
list_for_each_entry(range, &io_range_list, list) {
if (range->fwnode == new_range->fwnode) {
/* range already there */
+ ret = -EEXIST;
goto end_register;
}
if (range->flags == LOGIC_PIO_CPU_MMIO &&
@@ -227,17 +229,17 @@ unsigned long logic_pio_trans_cpuaddr(resource_size_t addr)
}
#if defined(CONFIG_INDIRECT_PIO) && defined(PCI_IOBASE)
-#define BUILD_LOGIC_IO(bw, type) \
-type logic_in##bw(unsigned long addr) \
+#define BUILD_LOGIC_IO(bwl, type) \
+type logic_in##bwl(unsigned long addr) \
{ \
type ret = (type)~0; \
\
if (addr < MMIO_UPPER_LIMIT) { \
- ret = read##bw(PCI_IOBASE + addr); \
+ ret = _in##bwl(addr); \
} else if (addr >= MMIO_UPPER_LIMIT && addr < IO_SPACE_LIMIT) { \
struct logic_pio_hwaddr *entry = find_io_range(addr); \
\
- if (entry && entry->ops) \
+ if (entry) \
ret = entry->ops->in(entry->hostdata, \
addr, sizeof(type)); \
else \
@@ -246,14 +248,14 @@ type logic_in##bw(unsigned long addr) \
return ret; \
} \
\
-void logic_out##bw(type value, unsigned long addr) \
+void logic_out##bwl(type value, unsigned long addr) \
{ \
if (addr < MMIO_UPPER_LIMIT) { \
- write##bw(value, PCI_IOBASE + addr); \
+ _out##bwl(value, addr); \
} else if (addr >= MMIO_UPPER_LIMIT && addr < IO_SPACE_LIMIT) { \
struct logic_pio_hwaddr *entry = find_io_range(addr); \
\
- if (entry && entry->ops) \
+ if (entry) \
entry->ops->out(entry->hostdata, \
addr, value, sizeof(type)); \
else \
@@ -261,15 +263,15 @@ void logic_out##bw(type value, unsigned long addr) \
} \
} \
\
-void logic_ins##bw(unsigned long addr, void *buffer, \
- unsigned int count) \
+void logic_ins##bwl(unsigned long addr, void *buffer, \
+ unsigned int count) \
{ \
if (addr < MMIO_UPPER_LIMIT) { \
- reads##bw(PCI_IOBASE + addr, buffer, count); \
+ reads##bwl(PCI_IOBASE + addr, buffer, count); \
} else if (addr >= MMIO_UPPER_LIMIT && addr < IO_SPACE_LIMIT) { \
struct logic_pio_hwaddr *entry = find_io_range(addr); \
\
- if (entry && entry->ops) \
+ if (entry) \
entry->ops->ins(entry->hostdata, \
addr, buffer, sizeof(type), count); \
else \
@@ -278,15 +280,15 @@ void logic_ins##bw(unsigned long addr, void *buffer, \
\
} \
\
-void logic_outs##bw(unsigned long addr, const void *buffer, \
- unsigned int count) \
+void logic_outs##bwl(unsigned long addr, const void *buffer, \
+ unsigned int count) \
{ \
if (addr < MMIO_UPPER_LIMIT) { \
- writes##bw(PCI_IOBASE + addr, buffer, count); \
+ writes##bwl(PCI_IOBASE + addr, buffer, count); \
} else if (addr >= MMIO_UPPER_LIMIT && addr < IO_SPACE_LIMIT) { \
struct logic_pio_hwaddr *entry = find_io_range(addr); \
\
- if (entry && entry->ops) \
+ if (entry) \
entry->ops->outs(entry->hostdata, \
addr, buffer, sizeof(type), count); \
else \
diff --git a/lib/lru_cache.c b/lib/lru_cache.c
index c69ee53d8dde..b3d9187611de 100644
--- a/lib/lru_cache.c
+++ b/lib/lru_cache.c
@@ -60,22 +60,12 @@ int lc_try_lock(struct lru_cache *lc)
} while (unlikely (val == LC_PARANOIA));
/* Spin until no-one is inside a PARANOIA_ENTRY()/RETURN() section. */
return 0 == val;
-#if 0
- /* Alternative approach, spin in case someone enters or leaves a
- * PARANOIA_ENTRY()/RETURN() section. */
- unsigned long old, new, val;
- do {
- old = lc->flags & LC_PARANOIA;
- new = old | LC_LOCKED;
- val = cmpxchg(&lc->flags, old, new);
- } while (unlikely (val == (old ^ LC_PARANOIA)));
- return old == val;
-#endif
}
/**
* lc_create - prepares to track objects in an active set
* @name: descriptive name only used in lc_seq_printf_stats and lc_seq_dump_details
+ * @cache: cache root pointer
* @max_pending_changes: maximum changes to accumulate until a transaction is required
* @e_count: number of elements allowed to be active simultaneously
* @e_size: size of the tracked objects
@@ -146,8 +136,8 @@ struct lru_cache *lc_create(const char *name, struct kmem_cache *cache,
return lc;
/* else: could not allocate all elements, give up */
- for (i--; i; i--) {
- void *p = element[i];
+ while (i) {
+ void *p = element[--i];
kmem_cache_free(cache, p - e_off);
}
kfree(lc);
@@ -363,7 +353,7 @@ static struct lc_element *__lc_get(struct lru_cache *lc, unsigned int enr, unsig
struct lc_element *e;
PARANOIA_ENTRY();
- if (lc->flags & LC_STARVING) {
+ if (test_bit(__LC_STARVING, &lc->flags)) {
++lc->starving;
RETURN(NULL);
}
@@ -416,7 +406,7 @@ static struct lc_element *__lc_get(struct lru_cache *lc, unsigned int enr, unsig
* the LRU element, we have to wait ...
*/
if (!lc_unused_element_available(lc)) {
- __set_bit(__LC_STARVING, &lc->flags);
+ set_bit(__LC_STARVING, &lc->flags);
RETURN(NULL);
}
@@ -585,49 +575,7 @@ struct lc_element *lc_element_by_index(struct lru_cache *lc, unsigned i)
}
/**
- * lc_index_of
- * @lc: the lru cache to operate on
- * @e: the element to query for its index position in lc->element
- */
-unsigned int lc_index_of(struct lru_cache *lc, struct lc_element *e)
-{
- PARANOIA_LC_ELEMENT(lc, e);
- return e->lc_index;
-}
-
-/**
- * lc_set - associate index with label
- * @lc: the lru cache to operate on
- * @enr: the label to set
- * @index: the element index to associate label with.
- *
- * Used to initialize the active set to some previously recorded state.
- */
-void lc_set(struct lru_cache *lc, unsigned int enr, int index)
-{
- struct lc_element *e;
- struct list_head *lh;
-
- if (index < 0 || index >= lc->nr_elements)
- return;
-
- e = lc_element_by_index(lc, index);
- BUG_ON(e->lc_number != e->lc_new_number);
- BUG_ON(e->refcnt != 0);
-
- e->lc_number = e->lc_new_number = enr;
- hlist_del_init(&e->colision);
- if (enr == LC_FREE)
- lh = &lc->free;
- else {
- hlist_add_head(&e->colision, lc_hash_slot(lc, enr));
- lh = &lc->lru;
- }
- list_move(&e->list, lh);
-}
-
-/**
- * lc_dump - Dump a complete LRU cache to seq in textual form.
+ * lc_seq_dump_details - Dump a complete LRU cache to seq in textual form.
* @lc: the lru cache to operate on
* @seq: the &struct seq_file pointer to seq_printf into
* @utext: user supplied additional "heading" or other info
@@ -660,7 +608,6 @@ void lc_seq_dump_details(struct seq_file *seq, struct lru_cache *lc, char *utext
EXPORT_SYMBOL(lc_create);
EXPORT_SYMBOL(lc_reset);
EXPORT_SYMBOL(lc_destroy);
-EXPORT_SYMBOL(lc_set);
EXPORT_SYMBOL(lc_del);
EXPORT_SYMBOL(lc_try_get);
EXPORT_SYMBOL(lc_find);
@@ -668,7 +615,6 @@ EXPORT_SYMBOL(lc_get);
EXPORT_SYMBOL(lc_put);
EXPORT_SYMBOL(lc_committed);
EXPORT_SYMBOL(lc_element_by_index);
-EXPORT_SYMBOL(lc_index_of);
EXPORT_SYMBOL(lc_seq_printf_stats);
EXPORT_SYMBOL(lc_seq_dump_details);
EXPORT_SYMBOL(lc_try_lock);
diff --git a/lib/lwq.c b/lib/lwq.c
new file mode 100644
index 000000000000..57d080a4d53d
--- /dev/null
+++ b/lib/lwq.c
@@ -0,0 +1,158 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Light-weight single-linked queue.
+ *
+ * Entries are enqueued to the head of an llist, with no blocking.
+ * This can happen in any context.
+ *
+ * Entries are dequeued using a spinlock to protect against multiple
+ * access. The llist is staged in reverse order, and refreshed
+ * from the llist when it exhausts.
+ *
+ * This is particularly suitable when work items are queued in BH or
+ * IRQ context, and where work items are handled one at a time by
+ * dedicated threads.
+ */
+#include <linux/rcupdate.h>
+#include <linux/lwq.h>
+
+struct llist_node *__lwq_dequeue(struct lwq *q)
+{
+ struct llist_node *this;
+
+ if (lwq_empty(q))
+ return NULL;
+ spin_lock(&q->lock);
+ this = q->ready;
+ if (!this && !llist_empty(&q->new)) {
+ /* ensure queue doesn't appear transiently lwq_empty */
+ smp_store_release(&q->ready, (void *)1);
+ this = llist_reverse_order(llist_del_all(&q->new));
+ if (!this)
+ q->ready = NULL;
+ }
+ if (this)
+ q->ready = llist_next(this);
+ spin_unlock(&q->lock);
+ return this;
+}
+EXPORT_SYMBOL_GPL(__lwq_dequeue);
+
+/**
+ * lwq_dequeue_all - dequeue all currently enqueued objects
+ * @q: the queue to dequeue from
+ *
+ * Remove and return a linked list of llist_nodes of all the objects that were
+ * in the queue. The first on the list will be the object that was least
+ * recently enqueued.
+ */
+struct llist_node *lwq_dequeue_all(struct lwq *q)
+{
+ struct llist_node *r, *t, **ep;
+
+ if (lwq_empty(q))
+ return NULL;
+
+ spin_lock(&q->lock);
+ r = q->ready;
+ q->ready = NULL;
+ t = llist_del_all(&q->new);
+ spin_unlock(&q->lock);
+ ep = &r;
+ while (*ep)
+ ep = &(*ep)->next;
+ *ep = llist_reverse_order(t);
+ return r;
+}
+EXPORT_SYMBOL_GPL(lwq_dequeue_all);
+
+#if IS_ENABLED(CONFIG_LWQ_TEST)
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/wait_bit.h>
+#include <linux/kthread.h>
+#include <linux/delay.h>
+struct tnode {
+ struct lwq_node n;
+ int i;
+ int c;
+};
+
+static int lwq_exercise(void *qv)
+{
+ struct lwq *q = qv;
+ int cnt;
+ struct tnode *t;
+
+ for (cnt = 0; cnt < 10000; cnt++) {
+ wait_var_event(q, (t = lwq_dequeue(q, struct tnode, n)) != NULL);
+ t->c++;
+ if (lwq_enqueue(&t->n, q))
+ wake_up_var(q);
+ }
+ while (!kthread_should_stop())
+ schedule_timeout_idle(1);
+ return 0;
+}
+
+static int lwq_test(void)
+{
+ int i;
+ struct lwq q;
+ struct llist_node *l, **t1, *t2;
+ struct tnode *t;
+ struct task_struct *threads[8];
+
+ printk(KERN_INFO "testing lwq....\n");
+ lwq_init(&q);
+ printk(KERN_INFO " lwq: run some threads\n");
+ for (i = 0; i < ARRAY_SIZE(threads); i++)
+ threads[i] = kthread_run(lwq_exercise, &q, "lwq-test-%d", i);
+ for (i = 0; i < 100; i++) {
+ t = kmalloc(sizeof(*t), GFP_KERNEL);
+ if (!t)
+ break;
+ t->i = i;
+ t->c = 0;
+ if (lwq_enqueue(&t->n, &q))
+ wake_up_var(&q);
+ }
+ /* wait for threads to exit */
+ for (i = 0; i < ARRAY_SIZE(threads); i++)
+ if (!IS_ERR_OR_NULL(threads[i]))
+ kthread_stop(threads[i]);
+ printk(KERN_INFO " lwq: dequeue first 50:");
+ for (i = 0; i < 50 ; i++) {
+ if (i && (i % 10) == 0) {
+ printk(KERN_CONT "\n");
+ printk(KERN_INFO " lwq: ... ");
+ }
+ t = lwq_dequeue(&q, struct tnode, n);
+ if (t)
+ printk(KERN_CONT " %d(%d)", t->i, t->c);
+ kfree(t);
+ }
+ printk(KERN_CONT "\n");
+ l = lwq_dequeue_all(&q);
+ printk(KERN_INFO " lwq: delete the multiples of 3 (test lwq_for_each_safe())\n");
+ lwq_for_each_safe(t, t1, t2, &l, n) {
+ if ((t->i % 3) == 0) {
+ t->i = -1;
+ kfree(t);
+ t = NULL;
+ }
+ }
+ if (l)
+ lwq_enqueue_batch(l, &q);
+ printk(KERN_INFO " lwq: dequeue remaining:");
+ while ((t = lwq_dequeue(&q, struct tnode, n)) != NULL) {
+ printk(KERN_CONT " %d", t->i);
+ kfree(t);
+ }
+ printk(KERN_CONT "\n");
+ return 0;
+}
+
+module_init(lwq_test);
+#endif /* CONFIG_LWQ_TEST*/
diff --git a/lib/lz4/lz4_compress.c b/lib/lz4/lz4_compress.c
index cc7b6d4cc7c7..90bb67994688 100644
--- a/lib/lz4/lz4_compress.c
+++ b/lib/lz4/lz4_compress.c
@@ -446,7 +446,7 @@ _last_literals:
*op++ = (BYTE)(lastRun << ML_BITS);
}
- memcpy(op, anchor, lastRun);
+ LZ4_memcpy(op, anchor, lastRun);
op += lastRun;
}
@@ -708,7 +708,7 @@ _last_literals:
} else {
*op++ = (BYTE)(lastRunSize<<ML_BITS);
}
- memcpy(op, anchor, lastRunSize);
+ LZ4_memcpy(op, anchor, lastRunSize);
op += lastRunSize;
}
diff --git a/lib/lz4/lz4_decompress.c b/lib/lz4/lz4_decompress.c
index 0c9d3ad17e0f..59fe69a63800 100644
--- a/lib/lz4/lz4_decompress.c
+++ b/lib/lz4/lz4_decompress.c
@@ -141,6 +141,9 @@ static FORCE_INLINE int LZ4_decompress_generic(
* space in the output for those 18 bytes earlier, upon
* entering the shortcut (in other words, there is a
* combined check for both stages).
+ *
+ * The & in the likely() below is intentionally not && so that
+ * some compilers can produce better parallelized runtime code
*/
if ((endOnInput ? length != RUN_MASK : length <= 8)
/*
@@ -150,7 +153,7 @@ static FORCE_INLINE int LZ4_decompress_generic(
&& likely((endOnInput ? ip < shortiend : 1) &
(op <= shortoend))) {
/* Copy the literals */
- memcpy(op, ip, endOnInput ? 16 : 8);
+ LZ4_memcpy(op, ip, endOnInput ? 16 : 8);
op += length; ip += length;
/*
@@ -169,9 +172,9 @@ static FORCE_INLINE int LZ4_decompress_generic(
(offset >= 8) &&
(dict == withPrefix64k || match >= lowPrefix)) {
/* Copy the match. */
- memcpy(op + 0, match + 0, 8);
- memcpy(op + 8, match + 8, 8);
- memcpy(op + 16, match + 16, 2);
+ LZ4_memcpy(op + 0, match + 0, 8);
+ LZ4_memcpy(op + 8, match + 8, 8);
+ LZ4_memcpy(op + 16, match + 16, 2);
op += length + MINMATCH;
/* Both stages worked, load the next token. */
continue;
@@ -260,12 +263,20 @@ static FORCE_INLINE int LZ4_decompress_generic(
}
}
- memcpy(op, ip, length);
+ /*
+ * supports overlapping memory regions; only matters
+ * for in-place decompression scenarios
+ */
+ LZ4_memmove(op, ip, length);
ip += length;
op += length;
- /* Necessarily EOF, due to parsing restrictions */
- if (!partialDecoding || (cpy == oend))
+ /* Necessarily EOF when !partialDecoding.
+ * When partialDecoding, it is EOF if we've either
+ * filled the output buffer or
+ * can't proceed with reading an offset for following match.
+ */
+ if (!partialDecoding || (cpy == oend) || (ip >= (iend - 2)))
break;
} else {
/* may overwrite up to WILDCOPYLENGTH beyond cpy */
@@ -347,7 +358,7 @@ _copy_match:
size_t const copySize = (size_t)(lowPrefix - match);
size_t const restSize = length - copySize;
- memcpy(op, dictEnd - copySize, copySize);
+ LZ4_memcpy(op, dictEnd - copySize, copySize);
op += copySize;
if (restSize > (size_t)(op - lowPrefix)) {
/* overlap copy */
@@ -357,7 +368,7 @@ _copy_match:
while (op < endOfMatch)
*op++ = *copyFrom++;
} else {
- memcpy(op, lowPrefix, restSize);
+ LZ4_memcpy(op, lowPrefix, restSize);
op += restSize;
}
}
@@ -383,7 +394,7 @@ _copy_match:
while (op < copyEnd)
*op++ = *match++;
} else {
- memcpy(op, match, mlen);
+ LZ4_memcpy(op, match, mlen);
}
op = copyEnd;
if (op == oend)
@@ -397,7 +408,7 @@ _copy_match:
op[2] = match[2];
op[3] = match[3];
match += inc32table[offset];
- memcpy(op + 4, match, 4);
+ LZ4_memcpy(op + 4, match, 4);
match -= dec64table[offset];
} else {
LZ4_copy8(op, match);
@@ -474,7 +485,7 @@ int LZ4_decompress_fast(const char *source, char *dest, int originalSize)
/* ===== Instantiate a few more decoding cases, used more than once. ===== */
-int LZ4_decompress_safe_withPrefix64k(const char *source, char *dest,
+static int LZ4_decompress_safe_withPrefix64k(const char *source, char *dest,
int compressedSize, int maxOutputSize)
{
return LZ4_decompress_generic(source, dest,
@@ -496,9 +507,9 @@ static int LZ4_decompress_safe_withSmallPrefix(const char *source, char *dest,
(BYTE *)dest - prefixSize, NULL, 0);
}
-int LZ4_decompress_safe_forceExtDict(const char *source, char *dest,
- int compressedSize, int maxOutputSize,
- const void *dictStart, size_t dictSize)
+static int LZ4_decompress_safe_forceExtDict(const char *source, char *dest,
+ int compressedSize, int maxOutputSize,
+ const void *dictStart, size_t dictSize)
{
return LZ4_decompress_generic(source, dest,
compressedSize, maxOutputSize,
diff --git a/lib/lz4/lz4defs.h b/lib/lz4/lz4defs.h
index 1a7fa9d9170f..330aa539b46e 100644
--- a/lib/lz4/lz4defs.h
+++ b/lib/lz4/lz4defs.h
@@ -36,6 +36,8 @@
*/
#include <asm/unaligned.h>
+
+#include <linux/bitops.h>
#include <linux/string.h> /* memset, memcpy */
#define FORCE_INLINE __always_inline
@@ -137,6 +139,17 @@ static FORCE_INLINE void LZ4_writeLE16(void *memPtr, U16 value)
return put_unaligned_le16(value, memPtr);
}
+/*
+ * LZ4 relies on memcpy with a constant size being inlined. In freestanding
+ * environments, the compiler can't assume the implementation of memcpy() is
+ * standard compliant, so apply its specialized memcpy() inlining logic. When
+ * possible, use __builtin_memcpy() to tell the compiler to analyze memcpy()
+ * as-if it were standard compliant, so it can inline it in freestanding
+ * environments. This is needed when decompressing the Linux Kernel, for example.
+ */
+#define LZ4_memcpy(dst, src, size) __builtin_memcpy(dst, src, size)
+#define LZ4_memmove(dst, src, size) __builtin_memmove(dst, src, size)
+
static FORCE_INLINE void LZ4_copy8(void *dst, const void *src)
{
#if LZ4_ARCH64
diff --git a/lib/lz4/lz4hc_compress.c b/lib/lz4/lz4hc_compress.c
index 176f03b83e56..e7ac8694b797 100644
--- a/lib/lz4/lz4hc_compress.c
+++ b/lib/lz4/lz4hc_compress.c
@@ -570,7 +570,7 @@ _Search3:
*op++ = (BYTE) lastRun;
} else
*op++ = (BYTE)(lastRun<<ML_BITS);
- memcpy(op, anchor, iend - anchor);
+ LZ4_memcpy(op, anchor, iend - anchor);
op += iend - anchor;
}
@@ -663,7 +663,6 @@ static void LZ4HC_setExternalDict(
/* match referencing will resume from there */
ctxPtr->nextToUpdate = ctxPtr->dictLimit;
}
-EXPORT_SYMBOL(LZ4HC_setExternalDict);
static int LZ4_compressHC_continue_generic(
LZ4_streamHC_t *LZ4_streamHCPtr,
diff --git a/lib/lzo/lzo1x_compress.c b/lib/lzo/lzo1x_compress.c
index ba16c08e8cb9..9d31e7126606 100644
--- a/lib/lzo/lzo1x_compress.c
+++ b/lib/lzo/lzo1x_compress.c
@@ -50,9 +50,7 @@ next:
if (dv == 0 && bitstream_version) {
const unsigned char *ir = ip + 4;
- const unsigned char *limit = ip_end
- < (ip + MAX_ZERO_RUN_LENGTH + 1)
- ? ip_end : ip + MAX_ZERO_RUN_LENGTH + 1;
+ const unsigned char *limit = min(ip_end, ip + MAX_ZERO_RUN_LENGTH + 1);
#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && \
defined(LZO_FAST_64BIT_MEMORY_ACCESS)
u64 dv64;
@@ -83,17 +81,19 @@ next:
ALIGN((uintptr_t)ir, 4)) &&
(ir < limit) && (*ir == 0))
ir++;
- for (; (ir + 4) <= limit; ir += 4) {
- dv = *((u32 *)ir);
- if (dv) {
+ if (IS_ALIGNED((uintptr_t)ir, 4)) {
+ for (; (ir + 4) <= limit; ir += 4) {
+ dv = *((u32 *)ir);
+ if (dv) {
# if defined(__LITTLE_ENDIAN)
- ir += __builtin_ctz(dv) >> 3;
+ ir += __builtin_ctz(dv) >> 3;
# elif defined(__BIG_ENDIAN)
- ir += __builtin_clz(dv) >> 3;
+ ir += __builtin_clz(dv) >> 3;
# else
# error "missing endian definition"
# endif
- break;
+ break;
+ }
}
}
#endif
@@ -266,6 +266,19 @@ m_len_done:
*op++ = (M4_MARKER | ((m_off >> 11) & 8)
| (m_len - 2));
else {
+ if (unlikely(((m_off & 0x403f) == 0x403f)
+ && (m_len >= 261)
+ && (m_len <= 264))
+ && likely(bitstream_version)) {
+ // Under lzo-rle, block copies
+ // for 261 <= length <= 264 and
+ // (distance & 0x80f3) == 0x80f3
+ // can result in ambiguous
+ // output. Adjust length
+ // to 260 to prevent ambiguity.
+ ip -= m_len - 260;
+ m_len = 260;
+ }
m_len -= M4_MAX_LEN;
*op++ = (M4_MARKER | ((m_off >> 11) & 8));
while (unlikely(m_len > 255)) {
@@ -286,7 +299,7 @@ finished_writing_instruction:
return in_end - (ii - ti);
}
-int lzogeneric1x_1_compress(const unsigned char *in, size_t in_len,
+static int lzogeneric1x_1_compress(const unsigned char *in, size_t in_len,
unsigned char *out, size_t *out_len,
void *wrkmem, const unsigned char bitstream_version)
{
@@ -311,7 +324,7 @@ int lzogeneric1x_1_compress(const unsigned char *in, size_t in_len,
data_start = op;
while (l > 20) {
- size_t ll = l <= (m4_max_offset + 1) ? l : (m4_max_offset + 1);
+ size_t ll = min_t(size_t, l, m4_max_offset + 1);
uintptr_t ll_end = (uintptr_t) ip + ll;
if ((ll_end + ((t + ll) >> 5)) <= ll_end)
break;
diff --git a/lib/lzo/lzo1x_decompress_safe.c b/lib/lzo/lzo1x_decompress_safe.c
index 2717c7963acd..7892a40cf765 100644
--- a/lib/lzo/lzo1x_decompress_safe.c
+++ b/lib/lzo/lzo1x_decompress_safe.c
@@ -32,7 +32,7 @@
* depending on the base count. Since the base count is taken from a u8
* and a few bits, it is safe to assume that it will always be lower than
* or equal to 2*255, thus we can always prevent any overflow by accepting
- * two less 255 steps. See Documentation/lzo.txt for more information.
+ * two less 255 steps. See Documentation/staging/lzo.rst for more information.
*/
#define MAX_255_COUNT ((((size_t)~0) / 255) - 2)
diff --git a/lib/maple_tree.c b/lib/maple_tree.c
new file mode 100644
index 000000000000..6f241bb38799
--- /dev/null
+++ b/lib/maple_tree.c
@@ -0,0 +1,7564 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Maple Tree implementation
+ * Copyright (c) 2018-2022 Oracle Corporation
+ * Authors: Liam R. Howlett <Liam.Howlett@oracle.com>
+ * Matthew Wilcox <willy@infradead.org>
+ * Copyright (c) 2023 ByteDance
+ * Author: Peng Zhang <zhangpeng.00@bytedance.com>
+ */
+
+/*
+ * DOC: Interesting implementation details of the Maple Tree
+ *
+ * Each node type has a number of slots for entries and a number of slots for
+ * pivots. In the case of dense nodes, the pivots are implied by the position
+ * and are simply the slot index + the minimum of the node.
+ *
+ * In regular B-Tree terms, pivots are called keys. The term pivot is used to
+ * indicate that the tree is specifying ranges. Pivots may appear in the
+ * subtree with an entry attached to the value whereas keys are unique to a
+ * specific position of a B-tree. Pivot values are inclusive of the slot with
+ * the same index.
+ *
+ *
+ * The following illustrates the layout of a range64 nodes slots and pivots.
+ *
+ *
+ * Slots -> | 0 | 1 | 2 | ... | 12 | 13 | 14 | 15 |
+ * ┬ ┬ ┬ ┬ ┬ ┬ ┬ ┬ ┬
+ * │ │ │ │ │ │ │ │ └─ Implied maximum
+ * │ │ │ │ │ │ │ └─ Pivot 14
+ * │ │ │ │ │ │ └─ Pivot 13
+ * │ │ │ │ │ └─ Pivot 12
+ * │ │ │ │ └─ Pivot 11
+ * │ │ │ └─ Pivot 2
+ * │ │ └─ Pivot 1
+ * │ └─ Pivot 0
+ * └─ Implied minimum
+ *
+ * Slot contents:
+ * Internal (non-leaf) nodes contain pointers to other nodes.
+ * Leaf nodes contain entries.
+ *
+ * The location of interest is often referred to as an offset. All offsets have
+ * a slot, but the last offset has an implied pivot from the node above (or
+ * UINT_MAX for the root node.
+ *
+ * Ranges complicate certain write activities. When modifying any of
+ * the B-tree variants, it is known that one entry will either be added or
+ * deleted. When modifying the Maple Tree, one store operation may overwrite
+ * the entire data set, or one half of the tree, or the middle half of the tree.
+ *
+ */
+
+
+#include <linux/maple_tree.h>
+#include <linux/xarray.h>
+#include <linux/types.h>
+#include <linux/export.h>
+#include <linux/slab.h>
+#include <linux/limits.h>
+#include <asm/barrier.h>
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/maple_tree.h>
+
+#define MA_ROOT_PARENT 1
+
+/*
+ * Maple state flags
+ * * MA_STATE_BULK - Bulk insert mode
+ * * MA_STATE_REBALANCE - Indicate a rebalance during bulk insert
+ * * MA_STATE_PREALLOC - Preallocated nodes, WARN_ON allocation
+ */
+#define MA_STATE_BULK 1
+#define MA_STATE_REBALANCE 2
+#define MA_STATE_PREALLOC 4
+
+#define ma_parent_ptr(x) ((struct maple_pnode *)(x))
+#define mas_tree_parent(x) ((unsigned long)(x->tree) | MA_ROOT_PARENT)
+#define ma_mnode_ptr(x) ((struct maple_node *)(x))
+#define ma_enode_ptr(x) ((struct maple_enode *)(x))
+static struct kmem_cache *maple_node_cache;
+
+#ifdef CONFIG_DEBUG_MAPLE_TREE
+static const unsigned long mt_max[] = {
+ [maple_dense] = MAPLE_NODE_SLOTS,
+ [maple_leaf_64] = ULONG_MAX,
+ [maple_range_64] = ULONG_MAX,
+ [maple_arange_64] = ULONG_MAX,
+};
+#define mt_node_max(x) mt_max[mte_node_type(x)]
+#endif
+
+static const unsigned char mt_slots[] = {
+ [maple_dense] = MAPLE_NODE_SLOTS,
+ [maple_leaf_64] = MAPLE_RANGE64_SLOTS,
+ [maple_range_64] = MAPLE_RANGE64_SLOTS,
+ [maple_arange_64] = MAPLE_ARANGE64_SLOTS,
+};
+#define mt_slot_count(x) mt_slots[mte_node_type(x)]
+
+static const unsigned char mt_pivots[] = {
+ [maple_dense] = 0,
+ [maple_leaf_64] = MAPLE_RANGE64_SLOTS - 1,
+ [maple_range_64] = MAPLE_RANGE64_SLOTS - 1,
+ [maple_arange_64] = MAPLE_ARANGE64_SLOTS - 1,
+};
+#define mt_pivot_count(x) mt_pivots[mte_node_type(x)]
+
+static const unsigned char mt_min_slots[] = {
+ [maple_dense] = MAPLE_NODE_SLOTS / 2,
+ [maple_leaf_64] = (MAPLE_RANGE64_SLOTS / 2) - 2,
+ [maple_range_64] = (MAPLE_RANGE64_SLOTS / 2) - 2,
+ [maple_arange_64] = (MAPLE_ARANGE64_SLOTS / 2) - 1,
+};
+#define mt_min_slot_count(x) mt_min_slots[mte_node_type(x)]
+
+#define MAPLE_BIG_NODE_SLOTS (MAPLE_RANGE64_SLOTS * 2 + 2)
+#define MAPLE_BIG_NODE_GAPS (MAPLE_ARANGE64_SLOTS * 2 + 1)
+
+struct maple_big_node {
+ struct maple_pnode *parent;
+ unsigned long pivot[MAPLE_BIG_NODE_SLOTS - 1];
+ union {
+ struct maple_enode *slot[MAPLE_BIG_NODE_SLOTS];
+ struct {
+ unsigned long padding[MAPLE_BIG_NODE_GAPS];
+ unsigned long gap[MAPLE_BIG_NODE_GAPS];
+ };
+ };
+ unsigned char b_end;
+ enum maple_type type;
+};
+
+/*
+ * The maple_subtree_state is used to build a tree to replace a segment of an
+ * existing tree in a more atomic way. Any walkers of the older tree will hit a
+ * dead node and restart on updates.
+ */
+struct maple_subtree_state {
+ struct ma_state *orig_l; /* Original left side of subtree */
+ struct ma_state *orig_r; /* Original right side of subtree */
+ struct ma_state *l; /* New left side of subtree */
+ struct ma_state *m; /* New middle of subtree (rare) */
+ struct ma_state *r; /* New right side of subtree */
+ struct ma_topiary *free; /* nodes to be freed */
+ struct ma_topiary *destroy; /* Nodes to be destroyed (walked and freed) */
+ struct maple_big_node *bn;
+};
+
+#ifdef CONFIG_KASAN_STACK
+/* Prevent mas_wr_bnode() from exceeding the stack frame limit */
+#define noinline_for_kasan noinline_for_stack
+#else
+#define noinline_for_kasan inline
+#endif
+
+/* Functions */
+static inline struct maple_node *mt_alloc_one(gfp_t gfp)
+{
+ return kmem_cache_alloc(maple_node_cache, gfp);
+}
+
+static inline int mt_alloc_bulk(gfp_t gfp, size_t size, void **nodes)
+{
+ return kmem_cache_alloc_bulk(maple_node_cache, gfp, size, nodes);
+}
+
+static inline void mt_free_one(struct maple_node *node)
+{
+ kmem_cache_free(maple_node_cache, node);
+}
+
+static inline void mt_free_bulk(size_t size, void __rcu **nodes)
+{
+ kmem_cache_free_bulk(maple_node_cache, size, (void **)nodes);
+}
+
+static void mt_free_rcu(struct rcu_head *head)
+{
+ struct maple_node *node = container_of(head, struct maple_node, rcu);
+
+ kmem_cache_free(maple_node_cache, node);
+}
+
+/*
+ * ma_free_rcu() - Use rcu callback to free a maple node
+ * @node: The node to free
+ *
+ * The maple tree uses the parent pointer to indicate this node is no longer in
+ * use and will be freed.
+ */
+static void ma_free_rcu(struct maple_node *node)
+{
+ WARN_ON(node->parent != ma_parent_ptr(node));
+ call_rcu(&node->rcu, mt_free_rcu);
+}
+
+static void mas_set_height(struct ma_state *mas)
+{
+ unsigned int new_flags = mas->tree->ma_flags;
+
+ new_flags &= ~MT_FLAGS_HEIGHT_MASK;
+ MAS_BUG_ON(mas, mas->depth > MAPLE_HEIGHT_MAX);
+ new_flags |= mas->depth << MT_FLAGS_HEIGHT_OFFSET;
+ mas->tree->ma_flags = new_flags;
+}
+
+static unsigned int mas_mt_height(struct ma_state *mas)
+{
+ return mt_height(mas->tree);
+}
+
+static inline unsigned int mt_attr(struct maple_tree *mt)
+{
+ return mt->ma_flags & ~MT_FLAGS_HEIGHT_MASK;
+}
+
+static __always_inline enum maple_type mte_node_type(
+ const struct maple_enode *entry)
+{
+ return ((unsigned long)entry >> MAPLE_NODE_TYPE_SHIFT) &
+ MAPLE_NODE_TYPE_MASK;
+}
+
+static __always_inline bool ma_is_dense(const enum maple_type type)
+{
+ return type < maple_leaf_64;
+}
+
+static __always_inline bool ma_is_leaf(const enum maple_type type)
+{
+ return type < maple_range_64;
+}
+
+static __always_inline bool mte_is_leaf(const struct maple_enode *entry)
+{
+ return ma_is_leaf(mte_node_type(entry));
+}
+
+/*
+ * We also reserve values with the bottom two bits set to '10' which are
+ * below 4096
+ */
+static __always_inline bool mt_is_reserved(const void *entry)
+{
+ return ((unsigned long)entry < MAPLE_RESERVED_RANGE) &&
+ xa_is_internal(entry);
+}
+
+static __always_inline void mas_set_err(struct ma_state *mas, long err)
+{
+ mas->node = MA_ERROR(err);
+ mas->status = ma_error;
+}
+
+static __always_inline bool mas_is_ptr(const struct ma_state *mas)
+{
+ return mas->status == ma_root;
+}
+
+static __always_inline bool mas_is_start(const struct ma_state *mas)
+{
+ return mas->status == ma_start;
+}
+
+static __always_inline bool mas_is_none(const struct ma_state *mas)
+{
+ return mas->status == ma_none;
+}
+
+static __always_inline bool mas_is_paused(const struct ma_state *mas)
+{
+ return mas->status == ma_pause;
+}
+
+static __always_inline bool mas_is_overflow(struct ma_state *mas)
+{
+ return mas->status == ma_overflow;
+}
+
+static inline bool mas_is_underflow(struct ma_state *mas)
+{
+ return mas->status == ma_underflow;
+}
+
+static __always_inline struct maple_node *mte_to_node(
+ const struct maple_enode *entry)
+{
+ return (struct maple_node *)((unsigned long)entry & ~MAPLE_NODE_MASK);
+}
+
+/*
+ * mte_to_mat() - Convert a maple encoded node to a maple topiary node.
+ * @entry: The maple encoded node
+ *
+ * Return: a maple topiary pointer
+ */
+static inline struct maple_topiary *mte_to_mat(const struct maple_enode *entry)
+{
+ return (struct maple_topiary *)
+ ((unsigned long)entry & ~MAPLE_NODE_MASK);
+}
+
+/*
+ * mas_mn() - Get the maple state node.
+ * @mas: The maple state
+ *
+ * Return: the maple node (not encoded - bare pointer).
+ */
+static inline struct maple_node *mas_mn(const struct ma_state *mas)
+{
+ return mte_to_node(mas->node);
+}
+
+/*
+ * mte_set_node_dead() - Set a maple encoded node as dead.
+ * @mn: The maple encoded node.
+ */
+static inline void mte_set_node_dead(struct maple_enode *mn)
+{
+ mte_to_node(mn)->parent = ma_parent_ptr(mte_to_node(mn));
+ smp_wmb(); /* Needed for RCU */
+}
+
+/* Bit 1 indicates the root is a node */
+#define MAPLE_ROOT_NODE 0x02
+/* maple_type stored bit 3-6 */
+#define MAPLE_ENODE_TYPE_SHIFT 0x03
+/* Bit 2 means a NULL somewhere below */
+#define MAPLE_ENODE_NULL 0x04
+
+static inline struct maple_enode *mt_mk_node(const struct maple_node *node,
+ enum maple_type type)
+{
+ return (void *)((unsigned long)node |
+ (type << MAPLE_ENODE_TYPE_SHIFT) | MAPLE_ENODE_NULL);
+}
+
+static inline void *mte_mk_root(const struct maple_enode *node)
+{
+ return (void *)((unsigned long)node | MAPLE_ROOT_NODE);
+}
+
+static inline void *mte_safe_root(const struct maple_enode *node)
+{
+ return (void *)((unsigned long)node & ~MAPLE_ROOT_NODE);
+}
+
+static inline void *mte_set_full(const struct maple_enode *node)
+{
+ return (void *)((unsigned long)node & ~MAPLE_ENODE_NULL);
+}
+
+static inline void *mte_clear_full(const struct maple_enode *node)
+{
+ return (void *)((unsigned long)node | MAPLE_ENODE_NULL);
+}
+
+static inline bool mte_has_null(const struct maple_enode *node)
+{
+ return (unsigned long)node & MAPLE_ENODE_NULL;
+}
+
+static __always_inline bool ma_is_root(struct maple_node *node)
+{
+ return ((unsigned long)node->parent & MA_ROOT_PARENT);
+}
+
+static __always_inline bool mte_is_root(const struct maple_enode *node)
+{
+ return ma_is_root(mte_to_node(node));
+}
+
+static inline bool mas_is_root_limits(const struct ma_state *mas)
+{
+ return !mas->min && mas->max == ULONG_MAX;
+}
+
+static __always_inline bool mt_is_alloc(struct maple_tree *mt)
+{
+ return (mt->ma_flags & MT_FLAGS_ALLOC_RANGE);
+}
+
+/*
+ * The Parent Pointer
+ * Excluding root, the parent pointer is 256B aligned like all other tree nodes.
+ * When storing a 32 or 64 bit values, the offset can fit into 5 bits. The 16
+ * bit values need an extra bit to store the offset. This extra bit comes from
+ * a reuse of the last bit in the node type. This is possible by using bit 1 to
+ * indicate if bit 2 is part of the type or the slot.
+ *
+ * Note types:
+ * 0x??1 = Root
+ * 0x?00 = 16 bit nodes
+ * 0x010 = 32 bit nodes
+ * 0x110 = 64 bit nodes
+ *
+ * Slot size and alignment
+ * 0b??1 : Root
+ * 0b?00 : 16 bit values, type in 0-1, slot in 2-7
+ * 0b010 : 32 bit values, type in 0-2, slot in 3-7
+ * 0b110 : 64 bit values, type in 0-2, slot in 3-7
+ */
+
+#define MAPLE_PARENT_ROOT 0x01
+
+#define MAPLE_PARENT_SLOT_SHIFT 0x03
+#define MAPLE_PARENT_SLOT_MASK 0xF8
+
+#define MAPLE_PARENT_16B_SLOT_SHIFT 0x02
+#define MAPLE_PARENT_16B_SLOT_MASK 0xFC
+
+#define MAPLE_PARENT_RANGE64 0x06
+#define MAPLE_PARENT_RANGE32 0x04
+#define MAPLE_PARENT_NOT_RANGE16 0x02
+
+/*
+ * mte_parent_shift() - Get the parent shift for the slot storage.
+ * @parent: The parent pointer cast as an unsigned long
+ * Return: The shift into that pointer to the star to of the slot
+ */
+static inline unsigned long mte_parent_shift(unsigned long parent)
+{
+ /* Note bit 1 == 0 means 16B */
+ if (likely(parent & MAPLE_PARENT_NOT_RANGE16))
+ return MAPLE_PARENT_SLOT_SHIFT;
+
+ return MAPLE_PARENT_16B_SLOT_SHIFT;
+}
+
+/*
+ * mte_parent_slot_mask() - Get the slot mask for the parent.
+ * @parent: The parent pointer cast as an unsigned long.
+ * Return: The slot mask for that parent.
+ */
+static inline unsigned long mte_parent_slot_mask(unsigned long parent)
+{
+ /* Note bit 1 == 0 means 16B */
+ if (likely(parent & MAPLE_PARENT_NOT_RANGE16))
+ return MAPLE_PARENT_SLOT_MASK;
+
+ return MAPLE_PARENT_16B_SLOT_MASK;
+}
+
+/*
+ * mas_parent_type() - Return the maple_type of the parent from the stored
+ * parent type.
+ * @mas: The maple state
+ * @enode: The maple_enode to extract the parent's enum
+ * Return: The node->parent maple_type
+ */
+static inline
+enum maple_type mas_parent_type(struct ma_state *mas, struct maple_enode *enode)
+{
+ unsigned long p_type;
+
+ p_type = (unsigned long)mte_to_node(enode)->parent;
+ if (WARN_ON(p_type & MAPLE_PARENT_ROOT))
+ return 0;
+
+ p_type &= MAPLE_NODE_MASK;
+ p_type &= ~mte_parent_slot_mask(p_type);
+ switch (p_type) {
+ case MAPLE_PARENT_RANGE64: /* or MAPLE_PARENT_ARANGE64 */
+ if (mt_is_alloc(mas->tree))
+ return maple_arange_64;
+ return maple_range_64;
+ }
+
+ return 0;
+}
+
+/*
+ * mas_set_parent() - Set the parent node and encode the slot
+ * @enode: The encoded maple node.
+ * @parent: The encoded maple node that is the parent of @enode.
+ * @slot: The slot that @enode resides in @parent.
+ *
+ * Slot number is encoded in the enode->parent bit 3-6 or 2-6, depending on the
+ * parent type.
+ */
+static inline
+void mas_set_parent(struct ma_state *mas, struct maple_enode *enode,
+ const struct maple_enode *parent, unsigned char slot)
+{
+ unsigned long val = (unsigned long)parent;
+ unsigned long shift;
+ unsigned long type;
+ enum maple_type p_type = mte_node_type(parent);
+
+ MAS_BUG_ON(mas, p_type == maple_dense);
+ MAS_BUG_ON(mas, p_type == maple_leaf_64);
+
+ switch (p_type) {
+ case maple_range_64:
+ case maple_arange_64:
+ shift = MAPLE_PARENT_SLOT_SHIFT;
+ type = MAPLE_PARENT_RANGE64;
+ break;
+ default:
+ case maple_dense:
+ case maple_leaf_64:
+ shift = type = 0;
+ break;
+ }
+
+ val &= ~MAPLE_NODE_MASK; /* Clear all node metadata in parent */
+ val |= (slot << shift) | type;
+ mte_to_node(enode)->parent = ma_parent_ptr(val);
+}
+
+/*
+ * mte_parent_slot() - get the parent slot of @enode.
+ * @enode: The encoded maple node.
+ *
+ * Return: The slot in the parent node where @enode resides.
+ */
+static __always_inline
+unsigned int mte_parent_slot(const struct maple_enode *enode)
+{
+ unsigned long val = (unsigned long)mte_to_node(enode)->parent;
+
+ if (unlikely(val & MA_ROOT_PARENT))
+ return 0;
+
+ /*
+ * Okay to use MAPLE_PARENT_16B_SLOT_MASK as the last bit will be lost
+ * by shift if the parent shift is MAPLE_PARENT_SLOT_SHIFT
+ */
+ return (val & MAPLE_PARENT_16B_SLOT_MASK) >> mte_parent_shift(val);
+}
+
+/*
+ * mte_parent() - Get the parent of @node.
+ * @node: The encoded maple node.
+ *
+ * Return: The parent maple node.
+ */
+static __always_inline
+struct maple_node *mte_parent(const struct maple_enode *enode)
+{
+ return (void *)((unsigned long)
+ (mte_to_node(enode)->parent) & ~MAPLE_NODE_MASK);
+}
+
+/*
+ * ma_dead_node() - check if the @enode is dead.
+ * @enode: The encoded maple node
+ *
+ * Return: true if dead, false otherwise.
+ */
+static __always_inline bool ma_dead_node(const struct maple_node *node)
+{
+ struct maple_node *parent;
+
+ /* Do not reorder reads from the node prior to the parent check */
+ smp_rmb();
+ parent = (void *)((unsigned long) node->parent & ~MAPLE_NODE_MASK);
+ return (parent == node);
+}
+
+/*
+ * mte_dead_node() - check if the @enode is dead.
+ * @enode: The encoded maple node
+ *
+ * Return: true if dead, false otherwise.
+ */
+static __always_inline bool mte_dead_node(const struct maple_enode *enode)
+{
+ struct maple_node *parent, *node;
+
+ node = mte_to_node(enode);
+ /* Do not reorder reads from the node prior to the parent check */
+ smp_rmb();
+ parent = mte_parent(enode);
+ return (parent == node);
+}
+
+/*
+ * mas_allocated() - Get the number of nodes allocated in a maple state.
+ * @mas: The maple state
+ *
+ * The ma_state alloc member is overloaded to hold a pointer to the first
+ * allocated node or to the number of requested nodes to allocate. If bit 0 is
+ * set, then the alloc contains the number of requested nodes. If there is an
+ * allocated node, then the total allocated nodes is in that node.
+ *
+ * Return: The total number of nodes allocated
+ */
+static inline unsigned long mas_allocated(const struct ma_state *mas)
+{
+ if (!mas->alloc || ((unsigned long)mas->alloc & 0x1))
+ return 0;
+
+ return mas->alloc->total;
+}
+
+/*
+ * mas_set_alloc_req() - Set the requested number of allocations.
+ * @mas: the maple state
+ * @count: the number of allocations.
+ *
+ * The requested number of allocations is either in the first allocated node,
+ * located in @mas->alloc->request_count, or directly in @mas->alloc if there is
+ * no allocated node. Set the request either in the node or do the necessary
+ * encoding to store in @mas->alloc directly.
+ */
+static inline void mas_set_alloc_req(struct ma_state *mas, unsigned long count)
+{
+ if (!mas->alloc || ((unsigned long)mas->alloc & 0x1)) {
+ if (!count)
+ mas->alloc = NULL;
+ else
+ mas->alloc = (struct maple_alloc *)(((count) << 1U) | 1U);
+ return;
+ }
+
+ mas->alloc->request_count = count;
+}
+
+/*
+ * mas_alloc_req() - get the requested number of allocations.
+ * @mas: The maple state
+ *
+ * The alloc count is either stored directly in @mas, or in
+ * @mas->alloc->request_count if there is at least one node allocated. Decode
+ * the request count if it's stored directly in @mas->alloc.
+ *
+ * Return: The allocation request count.
+ */
+static inline unsigned int mas_alloc_req(const struct ma_state *mas)
+{
+ if ((unsigned long)mas->alloc & 0x1)
+ return (unsigned long)(mas->alloc) >> 1;
+ else if (mas->alloc)
+ return mas->alloc->request_count;
+ return 0;
+}
+
+/*
+ * ma_pivots() - Get a pointer to the maple node pivots.
+ * @node - the maple node
+ * @type - the node type
+ *
+ * In the event of a dead node, this array may be %NULL
+ *
+ * Return: A pointer to the maple node pivots
+ */
+static inline unsigned long *ma_pivots(struct maple_node *node,
+ enum maple_type type)
+{
+ switch (type) {
+ case maple_arange_64:
+ return node->ma64.pivot;
+ case maple_range_64:
+ case maple_leaf_64:
+ return node->mr64.pivot;
+ case maple_dense:
+ return NULL;
+ }
+ return NULL;
+}
+
+/*
+ * ma_gaps() - Get a pointer to the maple node gaps.
+ * @node - the maple node
+ * @type - the node type
+ *
+ * Return: A pointer to the maple node gaps
+ */
+static inline unsigned long *ma_gaps(struct maple_node *node,
+ enum maple_type type)
+{
+ switch (type) {
+ case maple_arange_64:
+ return node->ma64.gap;
+ case maple_range_64:
+ case maple_leaf_64:
+ case maple_dense:
+ return NULL;
+ }
+ return NULL;
+}
+
+/*
+ * mas_safe_pivot() - get the pivot at @piv or mas->max.
+ * @mas: The maple state
+ * @pivots: The pointer to the maple node pivots
+ * @piv: The pivot to fetch
+ * @type: The maple node type
+ *
+ * Return: The pivot at @piv within the limit of the @pivots array, @mas->max
+ * otherwise.
+ */
+static __always_inline unsigned long
+mas_safe_pivot(const struct ma_state *mas, unsigned long *pivots,
+ unsigned char piv, enum maple_type type)
+{
+ if (piv >= mt_pivots[type])
+ return mas->max;
+
+ return pivots[piv];
+}
+
+/*
+ * mas_safe_min() - Return the minimum for a given offset.
+ * @mas: The maple state
+ * @pivots: The pointer to the maple node pivots
+ * @offset: The offset into the pivot array
+ *
+ * Return: The minimum range value that is contained in @offset.
+ */
+static inline unsigned long
+mas_safe_min(struct ma_state *mas, unsigned long *pivots, unsigned char offset)
+{
+ if (likely(offset))
+ return pivots[offset - 1] + 1;
+
+ return mas->min;
+}
+
+/*
+ * mte_set_pivot() - Set a pivot to a value in an encoded maple node.
+ * @mn: The encoded maple node
+ * @piv: The pivot offset
+ * @val: The value of the pivot
+ */
+static inline void mte_set_pivot(struct maple_enode *mn, unsigned char piv,
+ unsigned long val)
+{
+ struct maple_node *node = mte_to_node(mn);
+ enum maple_type type = mte_node_type(mn);
+
+ BUG_ON(piv >= mt_pivots[type]);
+ switch (type) {
+ case maple_range_64:
+ case maple_leaf_64:
+ node->mr64.pivot[piv] = val;
+ break;
+ case maple_arange_64:
+ node->ma64.pivot[piv] = val;
+ break;
+ case maple_dense:
+ break;
+ }
+
+}
+
+/*
+ * ma_slots() - Get a pointer to the maple node slots.
+ * @mn: The maple node
+ * @mt: The maple node type
+ *
+ * Return: A pointer to the maple node slots
+ */
+static inline void __rcu **ma_slots(struct maple_node *mn, enum maple_type mt)
+{
+ switch (mt) {
+ case maple_arange_64:
+ return mn->ma64.slot;
+ case maple_range_64:
+ case maple_leaf_64:
+ return mn->mr64.slot;
+ case maple_dense:
+ return mn->slot;
+ }
+
+ return NULL;
+}
+
+static inline bool mt_write_locked(const struct maple_tree *mt)
+{
+ return mt_external_lock(mt) ? mt_write_lock_is_held(mt) :
+ lockdep_is_held(&mt->ma_lock);
+}
+
+static __always_inline bool mt_locked(const struct maple_tree *mt)
+{
+ return mt_external_lock(mt) ? mt_lock_is_held(mt) :
+ lockdep_is_held(&mt->ma_lock);
+}
+
+static __always_inline void *mt_slot(const struct maple_tree *mt,
+ void __rcu **slots, unsigned char offset)
+{
+ return rcu_dereference_check(slots[offset], mt_locked(mt));
+}
+
+static __always_inline void *mt_slot_locked(struct maple_tree *mt,
+ void __rcu **slots, unsigned char offset)
+{
+ return rcu_dereference_protected(slots[offset], mt_write_locked(mt));
+}
+/*
+ * mas_slot_locked() - Get the slot value when holding the maple tree lock.
+ * @mas: The maple state
+ * @slots: The pointer to the slots
+ * @offset: The offset into the slots array to fetch
+ *
+ * Return: The entry stored in @slots at the @offset.
+ */
+static __always_inline void *mas_slot_locked(struct ma_state *mas,
+ void __rcu **slots, unsigned char offset)
+{
+ return mt_slot_locked(mas->tree, slots, offset);
+}
+
+/*
+ * mas_slot() - Get the slot value when not holding the maple tree lock.
+ * @mas: The maple state
+ * @slots: The pointer to the slots
+ * @offset: The offset into the slots array to fetch
+ *
+ * Return: The entry stored in @slots at the @offset
+ */
+static __always_inline void *mas_slot(struct ma_state *mas, void __rcu **slots,
+ unsigned char offset)
+{
+ return mt_slot(mas->tree, slots, offset);
+}
+
+/*
+ * mas_root() - Get the maple tree root.
+ * @mas: The maple state.
+ *
+ * Return: The pointer to the root of the tree
+ */
+static __always_inline void *mas_root(struct ma_state *mas)
+{
+ return rcu_dereference_check(mas->tree->ma_root, mt_locked(mas->tree));
+}
+
+static inline void *mt_root_locked(struct maple_tree *mt)
+{
+ return rcu_dereference_protected(mt->ma_root, mt_write_locked(mt));
+}
+
+/*
+ * mas_root_locked() - Get the maple tree root when holding the maple tree lock.
+ * @mas: The maple state.
+ *
+ * Return: The pointer to the root of the tree
+ */
+static inline void *mas_root_locked(struct ma_state *mas)
+{
+ return mt_root_locked(mas->tree);
+}
+
+static inline struct maple_metadata *ma_meta(struct maple_node *mn,
+ enum maple_type mt)
+{
+ switch (mt) {
+ case maple_arange_64:
+ return &mn->ma64.meta;
+ default:
+ return &mn->mr64.meta;
+ }
+}
+
+/*
+ * ma_set_meta() - Set the metadata information of a node.
+ * @mn: The maple node
+ * @mt: The maple node type
+ * @offset: The offset of the highest sub-gap in this node.
+ * @end: The end of the data in this node.
+ */
+static inline void ma_set_meta(struct maple_node *mn, enum maple_type mt,
+ unsigned char offset, unsigned char end)
+{
+ struct maple_metadata *meta = ma_meta(mn, mt);
+
+ meta->gap = offset;
+ meta->end = end;
+}
+
+/*
+ * mt_clear_meta() - clear the metadata information of a node, if it exists
+ * @mt: The maple tree
+ * @mn: The maple node
+ * @type: The maple node type
+ * @offset: The offset of the highest sub-gap in this node.
+ * @end: The end of the data in this node.
+ */
+static inline void mt_clear_meta(struct maple_tree *mt, struct maple_node *mn,
+ enum maple_type type)
+{
+ struct maple_metadata *meta;
+ unsigned long *pivots;
+ void __rcu **slots;
+ void *next;
+
+ switch (type) {
+ case maple_range_64:
+ pivots = mn->mr64.pivot;
+ if (unlikely(pivots[MAPLE_RANGE64_SLOTS - 2])) {
+ slots = mn->mr64.slot;
+ next = mt_slot_locked(mt, slots,
+ MAPLE_RANGE64_SLOTS - 1);
+ if (unlikely((mte_to_node(next) &&
+ mte_node_type(next))))
+ return; /* no metadata, could be node */
+ }
+ fallthrough;
+ case maple_arange_64:
+ meta = ma_meta(mn, type);
+ break;
+ default:
+ return;
+ }
+
+ meta->gap = 0;
+ meta->end = 0;
+}
+
+/*
+ * ma_meta_end() - Get the data end of a node from the metadata
+ * @mn: The maple node
+ * @mt: The maple node type
+ */
+static inline unsigned char ma_meta_end(struct maple_node *mn,
+ enum maple_type mt)
+{
+ struct maple_metadata *meta = ma_meta(mn, mt);
+
+ return meta->end;
+}
+
+/*
+ * ma_meta_gap() - Get the largest gap location of a node from the metadata
+ * @mn: The maple node
+ */
+static inline unsigned char ma_meta_gap(struct maple_node *mn)
+{
+ return mn->ma64.meta.gap;
+}
+
+/*
+ * ma_set_meta_gap() - Set the largest gap location in a nodes metadata
+ * @mn: The maple node
+ * @mn: The maple node type
+ * @offset: The location of the largest gap.
+ */
+static inline void ma_set_meta_gap(struct maple_node *mn, enum maple_type mt,
+ unsigned char offset)
+{
+
+ struct maple_metadata *meta = ma_meta(mn, mt);
+
+ meta->gap = offset;
+}
+
+/*
+ * mat_add() - Add a @dead_enode to the ma_topiary of a list of dead nodes.
+ * @mat - the ma_topiary, a linked list of dead nodes.
+ * @dead_enode - the node to be marked as dead and added to the tail of the list
+ *
+ * Add the @dead_enode to the linked list in @mat.
+ */
+static inline void mat_add(struct ma_topiary *mat,
+ struct maple_enode *dead_enode)
+{
+ mte_set_node_dead(dead_enode);
+ mte_to_mat(dead_enode)->next = NULL;
+ if (!mat->tail) {
+ mat->tail = mat->head = dead_enode;
+ return;
+ }
+
+ mte_to_mat(mat->tail)->next = dead_enode;
+ mat->tail = dead_enode;
+}
+
+static void mt_free_walk(struct rcu_head *head);
+static void mt_destroy_walk(struct maple_enode *enode, struct maple_tree *mt,
+ bool free);
+/*
+ * mas_mat_destroy() - Free all nodes and subtrees in a dead list.
+ * @mas - the maple state
+ * @mat - the ma_topiary linked list of dead nodes to free.
+ *
+ * Destroy walk a dead list.
+ */
+static void mas_mat_destroy(struct ma_state *mas, struct ma_topiary *mat)
+{
+ struct maple_enode *next;
+ struct maple_node *node;
+ bool in_rcu = mt_in_rcu(mas->tree);
+
+ while (mat->head) {
+ next = mte_to_mat(mat->head)->next;
+ node = mte_to_node(mat->head);
+ mt_destroy_walk(mat->head, mas->tree, !in_rcu);
+ if (in_rcu)
+ call_rcu(&node->rcu, mt_free_walk);
+ mat->head = next;
+ }
+}
+/*
+ * mas_descend() - Descend into the slot stored in the ma_state.
+ * @mas - the maple state.
+ *
+ * Note: Not RCU safe, only use in write side or debug code.
+ */
+static inline void mas_descend(struct ma_state *mas)
+{
+ enum maple_type type;
+ unsigned long *pivots;
+ struct maple_node *node;
+ void __rcu **slots;
+
+ node = mas_mn(mas);
+ type = mte_node_type(mas->node);
+ pivots = ma_pivots(node, type);
+ slots = ma_slots(node, type);
+
+ if (mas->offset)
+ mas->min = pivots[mas->offset - 1] + 1;
+ mas->max = mas_safe_pivot(mas, pivots, mas->offset, type);
+ mas->node = mas_slot(mas, slots, mas->offset);
+}
+
+/*
+ * mte_set_gap() - Set a maple node gap.
+ * @mn: The encoded maple node
+ * @gap: The offset of the gap to set
+ * @val: The gap value
+ */
+static inline void mte_set_gap(const struct maple_enode *mn,
+ unsigned char gap, unsigned long val)
+{
+ switch (mte_node_type(mn)) {
+ default:
+ break;
+ case maple_arange_64:
+ mte_to_node(mn)->ma64.gap[gap] = val;
+ break;
+ }
+}
+
+/*
+ * mas_ascend() - Walk up a level of the tree.
+ * @mas: The maple state
+ *
+ * Sets the @mas->max and @mas->min to the correct values when walking up. This
+ * may cause several levels of walking up to find the correct min and max.
+ * May find a dead node which will cause a premature return.
+ * Return: 1 on dead node, 0 otherwise
+ */
+static int mas_ascend(struct ma_state *mas)
+{
+ struct maple_enode *p_enode; /* parent enode. */
+ struct maple_enode *a_enode; /* ancestor enode. */
+ struct maple_node *a_node; /* ancestor node. */
+ struct maple_node *p_node; /* parent node. */
+ unsigned char a_slot;
+ enum maple_type a_type;
+ unsigned long min, max;
+ unsigned long *pivots;
+ bool set_max = false, set_min = false;
+
+ a_node = mas_mn(mas);
+ if (ma_is_root(a_node)) {
+ mas->offset = 0;
+ return 0;
+ }
+
+ p_node = mte_parent(mas->node);
+ if (unlikely(a_node == p_node))
+ return 1;
+
+ a_type = mas_parent_type(mas, mas->node);
+ mas->offset = mte_parent_slot(mas->node);
+ a_enode = mt_mk_node(p_node, a_type);
+
+ /* Check to make sure all parent information is still accurate */
+ if (p_node != mte_parent(mas->node))
+ return 1;
+
+ mas->node = a_enode;
+
+ if (mte_is_root(a_enode)) {
+ mas->max = ULONG_MAX;
+ mas->min = 0;
+ return 0;
+ }
+
+ min = 0;
+ max = ULONG_MAX;
+ if (!mas->offset) {
+ min = mas->min;
+ set_min = true;
+ }
+
+ if (mas->max == ULONG_MAX)
+ set_max = true;
+
+ do {
+ p_enode = a_enode;
+ a_type = mas_parent_type(mas, p_enode);
+ a_node = mte_parent(p_enode);
+ a_slot = mte_parent_slot(p_enode);
+ a_enode = mt_mk_node(a_node, a_type);
+ pivots = ma_pivots(a_node, a_type);
+
+ if (unlikely(ma_dead_node(a_node)))
+ return 1;
+
+ if (!set_min && a_slot) {
+ set_min = true;
+ min = pivots[a_slot - 1] + 1;
+ }
+
+ if (!set_max && a_slot < mt_pivots[a_type]) {
+ set_max = true;
+ max = pivots[a_slot];
+ }
+
+ if (unlikely(ma_dead_node(a_node)))
+ return 1;
+
+ if (unlikely(ma_is_root(a_node)))
+ break;
+
+ } while (!set_min || !set_max);
+
+ mas->max = max;
+ mas->min = min;
+ return 0;
+}
+
+/*
+ * mas_pop_node() - Get a previously allocated maple node from the maple state.
+ * @mas: The maple state
+ *
+ * Return: A pointer to a maple node.
+ */
+static inline struct maple_node *mas_pop_node(struct ma_state *mas)
+{
+ struct maple_alloc *ret, *node = mas->alloc;
+ unsigned long total = mas_allocated(mas);
+ unsigned int req = mas_alloc_req(mas);
+
+ /* nothing or a request pending. */
+ if (WARN_ON(!total))
+ return NULL;
+
+ if (total == 1) {
+ /* single allocation in this ma_state */
+ mas->alloc = NULL;
+ ret = node;
+ goto single_node;
+ }
+
+ if (node->node_count == 1) {
+ /* Single allocation in this node. */
+ mas->alloc = node->slot[0];
+ mas->alloc->total = node->total - 1;
+ ret = node;
+ goto new_head;
+ }
+ node->total--;
+ ret = node->slot[--node->node_count];
+ node->slot[node->node_count] = NULL;
+
+single_node:
+new_head:
+ if (req) {
+ req++;
+ mas_set_alloc_req(mas, req);
+ }
+
+ memset(ret, 0, sizeof(*ret));
+ return (struct maple_node *)ret;
+}
+
+/*
+ * mas_push_node() - Push a node back on the maple state allocation.
+ * @mas: The maple state
+ * @used: The used maple node
+ *
+ * Stores the maple node back into @mas->alloc for reuse. Updates allocated and
+ * requested node count as necessary.
+ */
+static inline void mas_push_node(struct ma_state *mas, struct maple_node *used)
+{
+ struct maple_alloc *reuse = (struct maple_alloc *)used;
+ struct maple_alloc *head = mas->alloc;
+ unsigned long count;
+ unsigned int requested = mas_alloc_req(mas);
+
+ count = mas_allocated(mas);
+
+ reuse->request_count = 0;
+ reuse->node_count = 0;
+ if (count && (head->node_count < MAPLE_ALLOC_SLOTS)) {
+ head->slot[head->node_count++] = reuse;
+ head->total++;
+ goto done;
+ }
+
+ reuse->total = 1;
+ if ((head) && !((unsigned long)head & 0x1)) {
+ reuse->slot[0] = head;
+ reuse->node_count = 1;
+ reuse->total += head->total;
+ }
+
+ mas->alloc = reuse;
+done:
+ if (requested > 1)
+ mas_set_alloc_req(mas, requested - 1);
+}
+
+/*
+ * mas_alloc_nodes() - Allocate nodes into a maple state
+ * @mas: The maple state
+ * @gfp: The GFP Flags
+ */
+static inline void mas_alloc_nodes(struct ma_state *mas, gfp_t gfp)
+{
+ struct maple_alloc *node;
+ unsigned long allocated = mas_allocated(mas);
+ unsigned int requested = mas_alloc_req(mas);
+ unsigned int count;
+ void **slots = NULL;
+ unsigned int max_req = 0;
+
+ if (!requested)
+ return;
+
+ mas_set_alloc_req(mas, 0);
+ if (mas->mas_flags & MA_STATE_PREALLOC) {
+ if (allocated)
+ return;
+ BUG_ON(!allocated);
+ WARN_ON(!allocated);
+ }
+
+ if (!allocated || mas->alloc->node_count == MAPLE_ALLOC_SLOTS) {
+ node = (struct maple_alloc *)mt_alloc_one(gfp);
+ if (!node)
+ goto nomem_one;
+
+ if (allocated) {
+ node->slot[0] = mas->alloc;
+ node->node_count = 1;
+ } else {
+ node->node_count = 0;
+ }
+
+ mas->alloc = node;
+ node->total = ++allocated;
+ requested--;
+ }
+
+ node = mas->alloc;
+ node->request_count = 0;
+ while (requested) {
+ max_req = MAPLE_ALLOC_SLOTS - node->node_count;
+ slots = (void **)&node->slot[node->node_count];
+ max_req = min(requested, max_req);
+ count = mt_alloc_bulk(gfp, max_req, slots);
+ if (!count)
+ goto nomem_bulk;
+
+ if (node->node_count == 0) {
+ node->slot[0]->node_count = 0;
+ node->slot[0]->request_count = 0;
+ }
+
+ node->node_count += count;
+ allocated += count;
+ node = node->slot[0];
+ requested -= count;
+ }
+ mas->alloc->total = allocated;
+ return;
+
+nomem_bulk:
+ /* Clean up potential freed allocations on bulk failure */
+ memset(slots, 0, max_req * sizeof(unsigned long));
+nomem_one:
+ mas_set_alloc_req(mas, requested);
+ if (mas->alloc && !(((unsigned long)mas->alloc & 0x1)))
+ mas->alloc->total = allocated;
+ mas_set_err(mas, -ENOMEM);
+}
+
+/*
+ * mas_free() - Free an encoded maple node
+ * @mas: The maple state
+ * @used: The encoded maple node to free.
+ *
+ * Uses rcu free if necessary, pushes @used back on the maple state allocations
+ * otherwise.
+ */
+static inline void mas_free(struct ma_state *mas, struct maple_enode *used)
+{
+ struct maple_node *tmp = mte_to_node(used);
+
+ if (mt_in_rcu(mas->tree))
+ ma_free_rcu(tmp);
+ else
+ mas_push_node(mas, tmp);
+}
+
+/*
+ * mas_node_count() - Check if enough nodes are allocated and request more if
+ * there is not enough nodes.
+ * @mas: The maple state
+ * @count: The number of nodes needed
+ * @gfp: the gfp flags
+ */
+static void mas_node_count_gfp(struct ma_state *mas, int count, gfp_t gfp)
+{
+ unsigned long allocated = mas_allocated(mas);
+
+ if (allocated < count) {
+ mas_set_alloc_req(mas, count - allocated);
+ mas_alloc_nodes(mas, gfp);
+ }
+}
+
+/*
+ * mas_node_count() - Check if enough nodes are allocated and request more if
+ * there is not enough nodes.
+ * @mas: The maple state
+ * @count: The number of nodes needed
+ *
+ * Note: Uses GFP_NOWAIT | __GFP_NOWARN for gfp flags.
+ */
+static void mas_node_count(struct ma_state *mas, int count)
+{
+ return mas_node_count_gfp(mas, count, GFP_NOWAIT | __GFP_NOWARN);
+}
+
+/*
+ * mas_start() - Sets up maple state for operations.
+ * @mas: The maple state.
+ *
+ * If mas->status == mas_start, then set the min, max and depth to
+ * defaults.
+ *
+ * Return:
+ * - If mas->node is an error or not mas_start, return NULL.
+ * - If it's an empty tree: NULL & mas->status == ma_none
+ * - If it's a single entry: The entry & mas->status == mas_root
+ * - If it's a tree: NULL & mas->status == safe root node.
+ */
+static inline struct maple_enode *mas_start(struct ma_state *mas)
+{
+ if (likely(mas_is_start(mas))) {
+ struct maple_enode *root;
+
+ mas->min = 0;
+ mas->max = ULONG_MAX;
+
+retry:
+ mas->depth = 0;
+ root = mas_root(mas);
+ /* Tree with nodes */
+ if (likely(xa_is_node(root))) {
+ mas->depth = 1;
+ mas->status = ma_active;
+ mas->node = mte_safe_root(root);
+ mas->offset = 0;
+ if (mte_dead_node(mas->node))
+ goto retry;
+
+ return NULL;
+ }
+
+ /* empty tree */
+ if (unlikely(!root)) {
+ mas->node = NULL;
+ mas->status = ma_none;
+ mas->offset = MAPLE_NODE_SLOTS;
+ return NULL;
+ }
+
+ /* Single entry tree */
+ mas->status = ma_root;
+ mas->offset = MAPLE_NODE_SLOTS;
+
+ /* Single entry tree. */
+ if (mas->index > 0)
+ return NULL;
+
+ return root;
+ }
+
+ return NULL;
+}
+
+/*
+ * ma_data_end() - Find the end of the data in a node.
+ * @node: The maple node
+ * @type: The maple node type
+ * @pivots: The array of pivots in the node
+ * @max: The maximum value in the node
+ *
+ * Uses metadata to find the end of the data when possible.
+ * Return: The zero indexed last slot with data (may be null).
+ */
+static __always_inline unsigned char ma_data_end(struct maple_node *node,
+ enum maple_type type, unsigned long *pivots, unsigned long max)
+{
+ unsigned char offset;
+
+ if (!pivots)
+ return 0;
+
+ if (type == maple_arange_64)
+ return ma_meta_end(node, type);
+
+ offset = mt_pivots[type] - 1;
+ if (likely(!pivots[offset]))
+ return ma_meta_end(node, type);
+
+ if (likely(pivots[offset] == max))
+ return offset;
+
+ return mt_pivots[type];
+}
+
+/*
+ * mas_data_end() - Find the end of the data (slot).
+ * @mas: the maple state
+ *
+ * This method is optimized to check the metadata of a node if the node type
+ * supports data end metadata.
+ *
+ * Return: The zero indexed last slot with data (may be null).
+ */
+static inline unsigned char mas_data_end(struct ma_state *mas)
+{
+ enum maple_type type;
+ struct maple_node *node;
+ unsigned char offset;
+ unsigned long *pivots;
+
+ type = mte_node_type(mas->node);
+ node = mas_mn(mas);
+ if (type == maple_arange_64)
+ return ma_meta_end(node, type);
+
+ pivots = ma_pivots(node, type);
+ if (unlikely(ma_dead_node(node)))
+ return 0;
+
+ offset = mt_pivots[type] - 1;
+ if (likely(!pivots[offset]))
+ return ma_meta_end(node, type);
+
+ if (likely(pivots[offset] == mas->max))
+ return offset;
+
+ return mt_pivots[type];
+}
+
+/*
+ * mas_leaf_max_gap() - Returns the largest gap in a leaf node
+ * @mas - the maple state
+ *
+ * Return: The maximum gap in the leaf.
+ */
+static unsigned long mas_leaf_max_gap(struct ma_state *mas)
+{
+ enum maple_type mt;
+ unsigned long pstart, gap, max_gap;
+ struct maple_node *mn;
+ unsigned long *pivots;
+ void __rcu **slots;
+ unsigned char i;
+ unsigned char max_piv;
+
+ mt = mte_node_type(mas->node);
+ mn = mas_mn(mas);
+ slots = ma_slots(mn, mt);
+ max_gap = 0;
+ if (unlikely(ma_is_dense(mt))) {
+ gap = 0;
+ for (i = 0; i < mt_slots[mt]; i++) {
+ if (slots[i]) {
+ if (gap > max_gap)
+ max_gap = gap;
+ gap = 0;
+ } else {
+ gap++;
+ }
+ }
+ if (gap > max_gap)
+ max_gap = gap;
+ return max_gap;
+ }
+
+ /*
+ * Check the first implied pivot optimizes the loop below and slot 1 may
+ * be skipped if there is a gap in slot 0.
+ */
+ pivots = ma_pivots(mn, mt);
+ if (likely(!slots[0])) {
+ max_gap = pivots[0] - mas->min + 1;
+ i = 2;
+ } else {
+ i = 1;
+ }
+
+ /* reduce max_piv as the special case is checked before the loop */
+ max_piv = ma_data_end(mn, mt, pivots, mas->max) - 1;
+ /*
+ * Check end implied pivot which can only be a gap on the right most
+ * node.
+ */
+ if (unlikely(mas->max == ULONG_MAX) && !slots[max_piv + 1]) {
+ gap = ULONG_MAX - pivots[max_piv];
+ if (gap > max_gap)
+ max_gap = gap;
+
+ if (max_gap > pivots[max_piv] - mas->min)
+ return max_gap;
+ }
+
+ for (; i <= max_piv; i++) {
+ /* data == no gap. */
+ if (likely(slots[i]))
+ continue;
+
+ pstart = pivots[i - 1];
+ gap = pivots[i] - pstart;
+ if (gap > max_gap)
+ max_gap = gap;
+
+ /* There cannot be two gaps in a row. */
+ i++;
+ }
+ return max_gap;
+}
+
+/*
+ * ma_max_gap() - Get the maximum gap in a maple node (non-leaf)
+ * @node: The maple node
+ * @gaps: The pointer to the gaps
+ * @mt: The maple node type
+ * @*off: Pointer to store the offset location of the gap.
+ *
+ * Uses the metadata data end to scan backwards across set gaps.
+ *
+ * Return: The maximum gap value
+ */
+static inline unsigned long
+ma_max_gap(struct maple_node *node, unsigned long *gaps, enum maple_type mt,
+ unsigned char *off)
+{
+ unsigned char offset, i;
+ unsigned long max_gap = 0;
+
+ i = offset = ma_meta_end(node, mt);
+ do {
+ if (gaps[i] > max_gap) {
+ max_gap = gaps[i];
+ offset = i;
+ }
+ } while (i--);
+
+ *off = offset;
+ return max_gap;
+}
+
+/*
+ * mas_max_gap() - find the largest gap in a non-leaf node and set the slot.
+ * @mas: The maple state.
+ *
+ * Return: The gap value.
+ */
+static inline unsigned long mas_max_gap(struct ma_state *mas)
+{
+ unsigned long *gaps;
+ unsigned char offset;
+ enum maple_type mt;
+ struct maple_node *node;
+
+ mt = mte_node_type(mas->node);
+ if (ma_is_leaf(mt))
+ return mas_leaf_max_gap(mas);
+
+ node = mas_mn(mas);
+ MAS_BUG_ON(mas, mt != maple_arange_64);
+ offset = ma_meta_gap(node);
+ gaps = ma_gaps(node, mt);
+ return gaps[offset];
+}
+
+/*
+ * mas_parent_gap() - Set the parent gap and any gaps above, as needed
+ * @mas: The maple state
+ * @offset: The gap offset in the parent to set
+ * @new: The new gap value.
+ *
+ * Set the parent gap then continue to set the gap upwards, using the metadata
+ * of the parent to see if it is necessary to check the node above.
+ */
+static inline void mas_parent_gap(struct ma_state *mas, unsigned char offset,
+ unsigned long new)
+{
+ unsigned long meta_gap = 0;
+ struct maple_node *pnode;
+ struct maple_enode *penode;
+ unsigned long *pgaps;
+ unsigned char meta_offset;
+ enum maple_type pmt;
+
+ pnode = mte_parent(mas->node);
+ pmt = mas_parent_type(mas, mas->node);
+ penode = mt_mk_node(pnode, pmt);
+ pgaps = ma_gaps(pnode, pmt);
+
+ascend:
+ MAS_BUG_ON(mas, pmt != maple_arange_64);
+ meta_offset = ma_meta_gap(pnode);
+ meta_gap = pgaps[meta_offset];
+
+ pgaps[offset] = new;
+
+ if (meta_gap == new)
+ return;
+
+ if (offset != meta_offset) {
+ if (meta_gap > new)
+ return;
+
+ ma_set_meta_gap(pnode, pmt, offset);
+ } else if (new < meta_gap) {
+ new = ma_max_gap(pnode, pgaps, pmt, &meta_offset);
+ ma_set_meta_gap(pnode, pmt, meta_offset);
+ }
+
+ if (ma_is_root(pnode))
+ return;
+
+ /* Go to the parent node. */
+ pnode = mte_parent(penode);
+ pmt = mas_parent_type(mas, penode);
+ pgaps = ma_gaps(pnode, pmt);
+ offset = mte_parent_slot(penode);
+ penode = mt_mk_node(pnode, pmt);
+ goto ascend;
+}
+
+/*
+ * mas_update_gap() - Update a nodes gaps and propagate up if necessary.
+ * @mas - the maple state.
+ */
+static inline void mas_update_gap(struct ma_state *mas)
+{
+ unsigned char pslot;
+ unsigned long p_gap;
+ unsigned long max_gap;
+
+ if (!mt_is_alloc(mas->tree))
+ return;
+
+ if (mte_is_root(mas->node))
+ return;
+
+ max_gap = mas_max_gap(mas);
+
+ pslot = mte_parent_slot(mas->node);
+ p_gap = ma_gaps(mte_parent(mas->node),
+ mas_parent_type(mas, mas->node))[pslot];
+
+ if (p_gap != max_gap)
+ mas_parent_gap(mas, pslot, max_gap);
+}
+
+/*
+ * mas_adopt_children() - Set the parent pointer of all nodes in @parent to
+ * @parent with the slot encoded.
+ * @mas - the maple state (for the tree)
+ * @parent - the maple encoded node containing the children.
+ */
+static inline void mas_adopt_children(struct ma_state *mas,
+ struct maple_enode *parent)
+{
+ enum maple_type type = mte_node_type(parent);
+ struct maple_node *node = mte_to_node(parent);
+ void __rcu **slots = ma_slots(node, type);
+ unsigned long *pivots = ma_pivots(node, type);
+ struct maple_enode *child;
+ unsigned char offset;
+
+ offset = ma_data_end(node, type, pivots, mas->max);
+ do {
+ child = mas_slot_locked(mas, slots, offset);
+ mas_set_parent(mas, child, parent, offset);
+ } while (offset--);
+}
+
+/*
+ * mas_put_in_tree() - Put a new node in the tree, smp_wmb(), and mark the old
+ * node as dead.
+ * @mas - the maple state with the new node
+ * @old_enode - The old maple encoded node to replace.
+ */
+static inline void mas_put_in_tree(struct ma_state *mas,
+ struct maple_enode *old_enode)
+ __must_hold(mas->tree->ma_lock)
+{
+ unsigned char offset;
+ void __rcu **slots;
+
+ if (mte_is_root(mas->node)) {
+ mas_mn(mas)->parent = ma_parent_ptr(mas_tree_parent(mas));
+ rcu_assign_pointer(mas->tree->ma_root, mte_mk_root(mas->node));
+ mas_set_height(mas);
+ } else {
+
+ offset = mte_parent_slot(mas->node);
+ slots = ma_slots(mte_parent(mas->node),
+ mas_parent_type(mas, mas->node));
+ rcu_assign_pointer(slots[offset], mas->node);
+ }
+
+ mte_set_node_dead(old_enode);
+}
+
+/*
+ * mas_replace_node() - Replace a node by putting it in the tree, marking it
+ * dead, and freeing it.
+ * the parent encoding to locate the maple node in the tree.
+ * @mas - the ma_state with @mas->node pointing to the new node.
+ * @old_enode - The old maple encoded node.
+ */
+static inline void mas_replace_node(struct ma_state *mas,
+ struct maple_enode *old_enode)
+ __must_hold(mas->tree->ma_lock)
+{
+ mas_put_in_tree(mas, old_enode);
+ mas_free(mas, old_enode);
+}
+
+/*
+ * mas_find_child() - Find a child who has the parent @mas->node.
+ * @mas: the maple state with the parent.
+ * @child: the maple state to store the child.
+ */
+static inline bool mas_find_child(struct ma_state *mas, struct ma_state *child)
+ __must_hold(mas->tree->ma_lock)
+{
+ enum maple_type mt;
+ unsigned char offset;
+ unsigned char end;
+ unsigned long *pivots;
+ struct maple_enode *entry;
+ struct maple_node *node;
+ void __rcu **slots;
+
+ mt = mte_node_type(mas->node);
+ node = mas_mn(mas);
+ slots = ma_slots(node, mt);
+ pivots = ma_pivots(node, mt);
+ end = ma_data_end(node, mt, pivots, mas->max);
+ for (offset = mas->offset; offset <= end; offset++) {
+ entry = mas_slot_locked(mas, slots, offset);
+ if (mte_parent(entry) == node) {
+ *child = *mas;
+ mas->offset = offset + 1;
+ child->offset = offset;
+ mas_descend(child);
+ child->offset = 0;
+ return true;
+ }
+ }
+ return false;
+}
+
+/*
+ * mab_shift_right() - Shift the data in mab right. Note, does not clean out the
+ * old data or set b_node->b_end.
+ * @b_node: the maple_big_node
+ * @shift: the shift count
+ */
+static inline void mab_shift_right(struct maple_big_node *b_node,
+ unsigned char shift)
+{
+ unsigned long size = b_node->b_end * sizeof(unsigned long);
+
+ memmove(b_node->pivot + shift, b_node->pivot, size);
+ memmove(b_node->slot + shift, b_node->slot, size);
+ if (b_node->type == maple_arange_64)
+ memmove(b_node->gap + shift, b_node->gap, size);
+}
+
+/*
+ * mab_middle_node() - Check if a middle node is needed (unlikely)
+ * @b_node: the maple_big_node that contains the data.
+ * @size: the amount of data in the b_node
+ * @split: the potential split location
+ * @slot_count: the size that can be stored in a single node being considered.
+ *
+ * Return: true if a middle node is required.
+ */
+static inline bool mab_middle_node(struct maple_big_node *b_node, int split,
+ unsigned char slot_count)
+{
+ unsigned char size = b_node->b_end;
+
+ if (size >= 2 * slot_count)
+ return true;
+
+ if (!b_node->slot[split] && (size >= 2 * slot_count - 1))
+ return true;
+
+ return false;
+}
+
+/*
+ * mab_no_null_split() - ensure the split doesn't fall on a NULL
+ * @b_node: the maple_big_node with the data
+ * @split: the suggested split location
+ * @slot_count: the number of slots in the node being considered.
+ *
+ * Return: the split location.
+ */
+static inline int mab_no_null_split(struct maple_big_node *b_node,
+ unsigned char split, unsigned char slot_count)
+{
+ if (!b_node->slot[split]) {
+ /*
+ * If the split is less than the max slot && the right side will
+ * still be sufficient, then increment the split on NULL.
+ */
+ if ((split < slot_count - 1) &&
+ (b_node->b_end - split) > (mt_min_slots[b_node->type]))
+ split++;
+ else
+ split--;
+ }
+ return split;
+}
+
+/*
+ * mab_calc_split() - Calculate the split location and if there needs to be two
+ * splits.
+ * @bn: The maple_big_node with the data
+ * @mid_split: The second split, if required. 0 otherwise.
+ *
+ * Return: The first split location. The middle split is set in @mid_split.
+ */
+static inline int mab_calc_split(struct ma_state *mas,
+ struct maple_big_node *bn, unsigned char *mid_split, unsigned long min)
+{
+ unsigned char b_end = bn->b_end;
+ int split = b_end / 2; /* Assume equal split. */
+ unsigned char slot_min, slot_count = mt_slots[bn->type];
+
+ /*
+ * To support gap tracking, all NULL entries are kept together and a node cannot
+ * end on a NULL entry, with the exception of the left-most leaf. The
+ * limitation means that the split of a node must be checked for this condition
+ * and be able to put more data in one direction or the other.
+ */
+ if (unlikely((mas->mas_flags & MA_STATE_BULK))) {
+ *mid_split = 0;
+ split = b_end - mt_min_slots[bn->type];
+
+ if (!ma_is_leaf(bn->type))
+ return split;
+
+ mas->mas_flags |= MA_STATE_REBALANCE;
+ if (!bn->slot[split])
+ split--;
+ return split;
+ }
+
+ /*
+ * Although extremely rare, it is possible to enter what is known as the 3-way
+ * split scenario. The 3-way split comes about by means of a store of a range
+ * that overwrites the end and beginning of two full nodes. The result is a set
+ * of entries that cannot be stored in 2 nodes. Sometimes, these two nodes can
+ * also be located in different parent nodes which are also full. This can
+ * carry upwards all the way to the root in the worst case.
+ */
+ if (unlikely(mab_middle_node(bn, split, slot_count))) {
+ split = b_end / 3;
+ *mid_split = split * 2;
+ } else {
+ slot_min = mt_min_slots[bn->type];
+
+ *mid_split = 0;
+ /*
+ * Avoid having a range less than the slot count unless it
+ * causes one node to be deficient.
+ * NOTE: mt_min_slots is 1 based, b_end and split are zero.
+ */
+ while ((split < slot_count - 1) &&
+ ((bn->pivot[split] - min) < slot_count - 1) &&
+ (b_end - split > slot_min))
+ split++;
+ }
+
+ /* Avoid ending a node on a NULL entry */
+ split = mab_no_null_split(bn, split, slot_count);
+
+ if (unlikely(*mid_split))
+ *mid_split = mab_no_null_split(bn, *mid_split, slot_count);
+
+ return split;
+}
+
+/*
+ * mas_mab_cp() - Copy data from a maple state inclusively to a maple_big_node
+ * and set @b_node->b_end to the next free slot.
+ * @mas: The maple state
+ * @mas_start: The starting slot to copy
+ * @mas_end: The end slot to copy (inclusively)
+ * @b_node: The maple_big_node to place the data
+ * @mab_start: The starting location in maple_big_node to store the data.
+ */
+static inline void mas_mab_cp(struct ma_state *mas, unsigned char mas_start,
+ unsigned char mas_end, struct maple_big_node *b_node,
+ unsigned char mab_start)
+{
+ enum maple_type mt;
+ struct maple_node *node;
+ void __rcu **slots;
+ unsigned long *pivots, *gaps;
+ int i = mas_start, j = mab_start;
+ unsigned char piv_end;
+
+ node = mas_mn(mas);
+ mt = mte_node_type(mas->node);
+ pivots = ma_pivots(node, mt);
+ if (!i) {
+ b_node->pivot[j] = pivots[i++];
+ if (unlikely(i > mas_end))
+ goto complete;
+ j++;
+ }
+
+ piv_end = min(mas_end, mt_pivots[mt]);
+ for (; i < piv_end; i++, j++) {
+ b_node->pivot[j] = pivots[i];
+ if (unlikely(!b_node->pivot[j]))
+ break;
+
+ if (unlikely(mas->max == b_node->pivot[j]))
+ goto complete;
+ }
+
+ if (likely(i <= mas_end))
+ b_node->pivot[j] = mas_safe_pivot(mas, pivots, i, mt);
+
+complete:
+ b_node->b_end = ++j;
+ j -= mab_start;
+ slots = ma_slots(node, mt);
+ memcpy(b_node->slot + mab_start, slots + mas_start, sizeof(void *) * j);
+ if (!ma_is_leaf(mt) && mt_is_alloc(mas->tree)) {
+ gaps = ma_gaps(node, mt);
+ memcpy(b_node->gap + mab_start, gaps + mas_start,
+ sizeof(unsigned long) * j);
+ }
+}
+
+/*
+ * mas_leaf_set_meta() - Set the metadata of a leaf if possible.
+ * @node: The maple node
+ * @mt: The maple type
+ * @end: The node end
+ */
+static inline void mas_leaf_set_meta(struct maple_node *node,
+ enum maple_type mt, unsigned char end)
+{
+ if (end < mt_slots[mt] - 1)
+ ma_set_meta(node, mt, 0, end);
+}
+
+/*
+ * mab_mas_cp() - Copy data from maple_big_node to a maple encoded node.
+ * @b_node: the maple_big_node that has the data
+ * @mab_start: the start location in @b_node.
+ * @mab_end: The end location in @b_node (inclusively)
+ * @mas: The maple state with the maple encoded node.
+ */
+static inline void mab_mas_cp(struct maple_big_node *b_node,
+ unsigned char mab_start, unsigned char mab_end,
+ struct ma_state *mas, bool new_max)
+{
+ int i, j = 0;
+ enum maple_type mt = mte_node_type(mas->node);
+ struct maple_node *node = mte_to_node(mas->node);
+ void __rcu **slots = ma_slots(node, mt);
+ unsigned long *pivots = ma_pivots(node, mt);
+ unsigned long *gaps = NULL;
+ unsigned char end;
+
+ if (mab_end - mab_start > mt_pivots[mt])
+ mab_end--;
+
+ if (!pivots[mt_pivots[mt] - 1])
+ slots[mt_pivots[mt]] = NULL;
+
+ i = mab_start;
+ do {
+ pivots[j++] = b_node->pivot[i++];
+ } while (i <= mab_end && likely(b_node->pivot[i]));
+
+ memcpy(slots, b_node->slot + mab_start,
+ sizeof(void *) * (i - mab_start));
+
+ if (new_max)
+ mas->max = b_node->pivot[i - 1];
+
+ end = j - 1;
+ if (likely(!ma_is_leaf(mt) && mt_is_alloc(mas->tree))) {
+ unsigned long max_gap = 0;
+ unsigned char offset = 0;
+
+ gaps = ma_gaps(node, mt);
+ do {
+ gaps[--j] = b_node->gap[--i];
+ if (gaps[j] > max_gap) {
+ offset = j;
+ max_gap = gaps[j];
+ }
+ } while (j);
+
+ ma_set_meta(node, mt, offset, end);
+ } else {
+ mas_leaf_set_meta(node, mt, end);
+ }
+}
+
+/*
+ * mas_bulk_rebalance() - Rebalance the end of a tree after a bulk insert.
+ * @mas: The maple state
+ * @end: The maple node end
+ * @mt: The maple node type
+ */
+static inline void mas_bulk_rebalance(struct ma_state *mas, unsigned char end,
+ enum maple_type mt)
+{
+ if (!(mas->mas_flags & MA_STATE_BULK))
+ return;
+
+ if (mte_is_root(mas->node))
+ return;
+
+ if (end > mt_min_slots[mt]) {
+ mas->mas_flags &= ~MA_STATE_REBALANCE;
+ return;
+ }
+}
+
+/*
+ * mas_store_b_node() - Store an @entry into the b_node while also copying the
+ * data from a maple encoded node.
+ * @wr_mas: the maple write state
+ * @b_node: the maple_big_node to fill with data
+ * @offset_end: the offset to end copying
+ *
+ * Return: The actual end of the data stored in @b_node
+ */
+static noinline_for_kasan void mas_store_b_node(struct ma_wr_state *wr_mas,
+ struct maple_big_node *b_node, unsigned char offset_end)
+{
+ unsigned char slot;
+ unsigned char b_end;
+ /* Possible underflow of piv will wrap back to 0 before use. */
+ unsigned long piv;
+ struct ma_state *mas = wr_mas->mas;
+
+ b_node->type = wr_mas->type;
+ b_end = 0;
+ slot = mas->offset;
+ if (slot) {
+ /* Copy start data up to insert. */
+ mas_mab_cp(mas, 0, slot - 1, b_node, 0);
+ b_end = b_node->b_end;
+ piv = b_node->pivot[b_end - 1];
+ } else
+ piv = mas->min - 1;
+
+ if (piv + 1 < mas->index) {
+ /* Handle range starting after old range */
+ b_node->slot[b_end] = wr_mas->content;
+ if (!wr_mas->content)
+ b_node->gap[b_end] = mas->index - 1 - piv;
+ b_node->pivot[b_end++] = mas->index - 1;
+ }
+
+ /* Store the new entry. */
+ mas->offset = b_end;
+ b_node->slot[b_end] = wr_mas->entry;
+ b_node->pivot[b_end] = mas->last;
+
+ /* Appended. */
+ if (mas->last >= mas->max)
+ goto b_end;
+
+ /* Handle new range ending before old range ends */
+ piv = mas_safe_pivot(mas, wr_mas->pivots, offset_end, wr_mas->type);
+ if (piv > mas->last) {
+ if (piv == ULONG_MAX)
+ mas_bulk_rebalance(mas, b_node->b_end, wr_mas->type);
+
+ if (offset_end != slot)
+ wr_mas->content = mas_slot_locked(mas, wr_mas->slots,
+ offset_end);
+
+ b_node->slot[++b_end] = wr_mas->content;
+ if (!wr_mas->content)
+ b_node->gap[b_end] = piv - mas->last + 1;
+ b_node->pivot[b_end] = piv;
+ }
+
+ slot = offset_end + 1;
+ if (slot > mas->end)
+ goto b_end;
+
+ /* Copy end data to the end of the node. */
+ mas_mab_cp(mas, slot, mas->end + 1, b_node, ++b_end);
+ b_node->b_end--;
+ return;
+
+b_end:
+ b_node->b_end = b_end;
+}
+
+/*
+ * mas_prev_sibling() - Find the previous node with the same parent.
+ * @mas: the maple state
+ *
+ * Return: True if there is a previous sibling, false otherwise.
+ */
+static inline bool mas_prev_sibling(struct ma_state *mas)
+{
+ unsigned int p_slot = mte_parent_slot(mas->node);
+
+ if (mte_is_root(mas->node))
+ return false;
+
+ if (!p_slot)
+ return false;
+
+ mas_ascend(mas);
+ mas->offset = p_slot - 1;
+ mas_descend(mas);
+ return true;
+}
+
+/*
+ * mas_next_sibling() - Find the next node with the same parent.
+ * @mas: the maple state
+ *
+ * Return: true if there is a next sibling, false otherwise.
+ */
+static inline bool mas_next_sibling(struct ma_state *mas)
+{
+ MA_STATE(parent, mas->tree, mas->index, mas->last);
+
+ if (mte_is_root(mas->node))
+ return false;
+
+ parent = *mas;
+ mas_ascend(&parent);
+ parent.offset = mte_parent_slot(mas->node) + 1;
+ if (parent.offset > mas_data_end(&parent))
+ return false;
+
+ *mas = parent;
+ mas_descend(mas);
+ return true;
+}
+
+/*
+ * mte_node_or_none() - Set the enode and state.
+ * @enode: The encoded maple node.
+ *
+ * Set the node to the enode and the status.
+ */
+static inline void mas_node_or_none(struct ma_state *mas,
+ struct maple_enode *enode)
+{
+ if (enode) {
+ mas->node = enode;
+ mas->status = ma_active;
+ } else {
+ mas->node = NULL;
+ mas->status = ma_none;
+ }
+}
+
+/*
+ * mas_wr_node_walk() - Find the correct offset for the index in the @mas.
+ * @wr_mas: The maple write state
+ *
+ * Uses mas_slot_locked() and does not need to worry about dead nodes.
+ */
+static inline void mas_wr_node_walk(struct ma_wr_state *wr_mas)
+{
+ struct ma_state *mas = wr_mas->mas;
+ unsigned char count, offset;
+
+ if (unlikely(ma_is_dense(wr_mas->type))) {
+ wr_mas->r_max = wr_mas->r_min = mas->index;
+ mas->offset = mas->index = mas->min;
+ return;
+ }
+
+ wr_mas->node = mas_mn(wr_mas->mas);
+ wr_mas->pivots = ma_pivots(wr_mas->node, wr_mas->type);
+ count = mas->end = ma_data_end(wr_mas->node, wr_mas->type,
+ wr_mas->pivots, mas->max);
+ offset = mas->offset;
+
+ while (offset < count && mas->index > wr_mas->pivots[offset])
+ offset++;
+
+ wr_mas->r_max = offset < count ? wr_mas->pivots[offset] : mas->max;
+ wr_mas->r_min = mas_safe_min(mas, wr_mas->pivots, offset);
+ wr_mas->offset_end = mas->offset = offset;
+}
+
+/*
+ * mast_rebalance_next() - Rebalance against the next node
+ * @mast: The maple subtree state
+ * @old_r: The encoded maple node to the right (next node).
+ */
+static inline void mast_rebalance_next(struct maple_subtree_state *mast)
+{
+ unsigned char b_end = mast->bn->b_end;
+
+ mas_mab_cp(mast->orig_r, 0, mt_slot_count(mast->orig_r->node),
+ mast->bn, b_end);
+ mast->orig_r->last = mast->orig_r->max;
+}
+
+/*
+ * mast_rebalance_prev() - Rebalance against the previous node
+ * @mast: The maple subtree state
+ * @old_l: The encoded maple node to the left (previous node)
+ */
+static inline void mast_rebalance_prev(struct maple_subtree_state *mast)
+{
+ unsigned char end = mas_data_end(mast->orig_l) + 1;
+ unsigned char b_end = mast->bn->b_end;
+
+ mab_shift_right(mast->bn, end);
+ mas_mab_cp(mast->orig_l, 0, end - 1, mast->bn, 0);
+ mast->l->min = mast->orig_l->min;
+ mast->orig_l->index = mast->orig_l->min;
+ mast->bn->b_end = end + b_end;
+ mast->l->offset += end;
+}
+
+/*
+ * mast_spanning_rebalance() - Rebalance nodes with nearest neighbour favouring
+ * the node to the right. Checking the nodes to the right then the left at each
+ * level upwards until root is reached.
+ * Data is copied into the @mast->bn.
+ * @mast: The maple_subtree_state.
+ */
+static inline
+bool mast_spanning_rebalance(struct maple_subtree_state *mast)
+{
+ struct ma_state r_tmp = *mast->orig_r;
+ struct ma_state l_tmp = *mast->orig_l;
+ unsigned char depth = 0;
+
+ r_tmp = *mast->orig_r;
+ l_tmp = *mast->orig_l;
+ do {
+ mas_ascend(mast->orig_r);
+ mas_ascend(mast->orig_l);
+ depth++;
+ if (mast->orig_r->offset < mas_data_end(mast->orig_r)) {
+ mast->orig_r->offset++;
+ do {
+ mas_descend(mast->orig_r);
+ mast->orig_r->offset = 0;
+ } while (--depth);
+
+ mast_rebalance_next(mast);
+ *mast->orig_l = l_tmp;
+ return true;
+ } else if (mast->orig_l->offset != 0) {
+ mast->orig_l->offset--;
+ do {
+ mas_descend(mast->orig_l);
+ mast->orig_l->offset =
+ mas_data_end(mast->orig_l);
+ } while (--depth);
+
+ mast_rebalance_prev(mast);
+ *mast->orig_r = r_tmp;
+ return true;
+ }
+ } while (!mte_is_root(mast->orig_r->node));
+
+ *mast->orig_r = r_tmp;
+ *mast->orig_l = l_tmp;
+ return false;
+}
+
+/*
+ * mast_ascend() - Ascend the original left and right maple states.
+ * @mast: the maple subtree state.
+ *
+ * Ascend the original left and right sides. Set the offsets to point to the
+ * data already in the new tree (@mast->l and @mast->r).
+ */
+static inline void mast_ascend(struct maple_subtree_state *mast)
+{
+ MA_WR_STATE(wr_mas, mast->orig_r, NULL);
+ mas_ascend(mast->orig_l);
+ mas_ascend(mast->orig_r);
+
+ mast->orig_r->offset = 0;
+ mast->orig_r->index = mast->r->max;
+ /* last should be larger than or equal to index */
+ if (mast->orig_r->last < mast->orig_r->index)
+ mast->orig_r->last = mast->orig_r->index;
+
+ wr_mas.type = mte_node_type(mast->orig_r->node);
+ mas_wr_node_walk(&wr_mas);
+ /* Set up the left side of things */
+ mast->orig_l->offset = 0;
+ mast->orig_l->index = mast->l->min;
+ wr_mas.mas = mast->orig_l;
+ wr_mas.type = mte_node_type(mast->orig_l->node);
+ mas_wr_node_walk(&wr_mas);
+
+ mast->bn->type = wr_mas.type;
+}
+
+/*
+ * mas_new_ma_node() - Create and return a new maple node. Helper function.
+ * @mas: the maple state with the allocations.
+ * @b_node: the maple_big_node with the type encoding.
+ *
+ * Use the node type from the maple_big_node to allocate a new node from the
+ * ma_state. This function exists mainly for code readability.
+ *
+ * Return: A new maple encoded node
+ */
+static inline struct maple_enode
+*mas_new_ma_node(struct ma_state *mas, struct maple_big_node *b_node)
+{
+ return mt_mk_node(ma_mnode_ptr(mas_pop_node(mas)), b_node->type);
+}
+
+/*
+ * mas_mab_to_node() - Set up right and middle nodes
+ *
+ * @mas: the maple state that contains the allocations.
+ * @b_node: the node which contains the data.
+ * @left: The pointer which will have the left node
+ * @right: The pointer which may have the right node
+ * @middle: the pointer which may have the middle node (rare)
+ * @mid_split: the split location for the middle node
+ *
+ * Return: the split of left.
+ */
+static inline unsigned char mas_mab_to_node(struct ma_state *mas,
+ struct maple_big_node *b_node, struct maple_enode **left,
+ struct maple_enode **right, struct maple_enode **middle,
+ unsigned char *mid_split, unsigned long min)
+{
+ unsigned char split = 0;
+ unsigned char slot_count = mt_slots[b_node->type];
+
+ *left = mas_new_ma_node(mas, b_node);
+ *right = NULL;
+ *middle = NULL;
+ *mid_split = 0;
+
+ if (b_node->b_end < slot_count) {
+ split = b_node->b_end;
+ } else {
+ split = mab_calc_split(mas, b_node, mid_split, min);
+ *right = mas_new_ma_node(mas, b_node);
+ }
+
+ if (*mid_split)
+ *middle = mas_new_ma_node(mas, b_node);
+
+ return split;
+
+}
+
+/*
+ * mab_set_b_end() - Add entry to b_node at b_node->b_end and increment the end
+ * pointer.
+ * @b_node - the big node to add the entry
+ * @mas - the maple state to get the pivot (mas->max)
+ * @entry - the entry to add, if NULL nothing happens.
+ */
+static inline void mab_set_b_end(struct maple_big_node *b_node,
+ struct ma_state *mas,
+ void *entry)
+{
+ if (!entry)
+ return;
+
+ b_node->slot[b_node->b_end] = entry;
+ if (mt_is_alloc(mas->tree))
+ b_node->gap[b_node->b_end] = mas_max_gap(mas);
+ b_node->pivot[b_node->b_end++] = mas->max;
+}
+
+/*
+ * mas_set_split_parent() - combine_then_separate helper function. Sets the parent
+ * of @mas->node to either @left or @right, depending on @slot and @split
+ *
+ * @mas - the maple state with the node that needs a parent
+ * @left - possible parent 1
+ * @right - possible parent 2
+ * @slot - the slot the mas->node was placed
+ * @split - the split location between @left and @right
+ */
+static inline void mas_set_split_parent(struct ma_state *mas,
+ struct maple_enode *left,
+ struct maple_enode *right,
+ unsigned char *slot, unsigned char split)
+{
+ if (mas_is_none(mas))
+ return;
+
+ if ((*slot) <= split)
+ mas_set_parent(mas, mas->node, left, *slot);
+ else if (right)
+ mas_set_parent(mas, mas->node, right, (*slot) - split - 1);
+
+ (*slot)++;
+}
+
+/*
+ * mte_mid_split_check() - Check if the next node passes the mid-split
+ * @**l: Pointer to left encoded maple node.
+ * @**m: Pointer to middle encoded maple node.
+ * @**r: Pointer to right encoded maple node.
+ * @slot: The offset
+ * @*split: The split location.
+ * @mid_split: The middle split.
+ */
+static inline void mte_mid_split_check(struct maple_enode **l,
+ struct maple_enode **r,
+ struct maple_enode *right,
+ unsigned char slot,
+ unsigned char *split,
+ unsigned char mid_split)
+{
+ if (*r == right)
+ return;
+
+ if (slot < mid_split)
+ return;
+
+ *l = *r;
+ *r = right;
+ *split = mid_split;
+}
+
+/*
+ * mast_set_split_parents() - Helper function to set three nodes parents. Slot
+ * is taken from @mast->l.
+ * @mast - the maple subtree state
+ * @left - the left node
+ * @right - the right node
+ * @split - the split location.
+ */
+static inline void mast_set_split_parents(struct maple_subtree_state *mast,
+ struct maple_enode *left,
+ struct maple_enode *middle,
+ struct maple_enode *right,
+ unsigned char split,
+ unsigned char mid_split)
+{
+ unsigned char slot;
+ struct maple_enode *l = left;
+ struct maple_enode *r = right;
+
+ if (mas_is_none(mast->l))
+ return;
+
+ if (middle)
+ r = middle;
+
+ slot = mast->l->offset;
+
+ mte_mid_split_check(&l, &r, right, slot, &split, mid_split);
+ mas_set_split_parent(mast->l, l, r, &slot, split);
+
+ mte_mid_split_check(&l, &r, right, slot, &split, mid_split);
+ mas_set_split_parent(mast->m, l, r, &slot, split);
+
+ mte_mid_split_check(&l, &r, right, slot, &split, mid_split);
+ mas_set_split_parent(mast->r, l, r, &slot, split);
+}
+
+/*
+ * mas_topiary_node() - Dispose of a single node
+ * @mas: The maple state for pushing nodes
+ * @enode: The encoded maple node
+ * @in_rcu: If the tree is in rcu mode
+ *
+ * The node will either be RCU freed or pushed back on the maple state.
+ */
+static inline void mas_topiary_node(struct ma_state *mas,
+ struct ma_state *tmp_mas, bool in_rcu)
+{
+ struct maple_node *tmp;
+ struct maple_enode *enode;
+
+ if (mas_is_none(tmp_mas))
+ return;
+
+ enode = tmp_mas->node;
+ tmp = mte_to_node(enode);
+ mte_set_node_dead(enode);
+ if (in_rcu)
+ ma_free_rcu(tmp);
+ else
+ mas_push_node(mas, tmp);
+}
+
+/*
+ * mas_topiary_replace() - Replace the data with new data, then repair the
+ * parent links within the new tree. Iterate over the dead sub-tree and collect
+ * the dead subtrees and topiary the nodes that are no longer of use.
+ *
+ * The new tree will have up to three children with the correct parent. Keep
+ * track of the new entries as they need to be followed to find the next level
+ * of new entries.
+ *
+ * The old tree will have up to three children with the old parent. Keep track
+ * of the old entries as they may have more nodes below replaced. Nodes within
+ * [index, last] are dead subtrees, others need to be freed and followed.
+ *
+ * @mas: The maple state pointing at the new data
+ * @old_enode: The maple encoded node being replaced
+ *
+ */
+static inline void mas_topiary_replace(struct ma_state *mas,
+ struct maple_enode *old_enode)
+{
+ struct ma_state tmp[3], tmp_next[3];
+ MA_TOPIARY(subtrees, mas->tree);
+ bool in_rcu;
+ int i, n;
+
+ /* Place data in tree & then mark node as old */
+ mas_put_in_tree(mas, old_enode);
+
+ /* Update the parent pointers in the tree */
+ tmp[0] = *mas;
+ tmp[0].offset = 0;
+ tmp[1].status = ma_none;
+ tmp[2].status = ma_none;
+ while (!mte_is_leaf(tmp[0].node)) {
+ n = 0;
+ for (i = 0; i < 3; i++) {
+ if (mas_is_none(&tmp[i]))
+ continue;
+
+ while (n < 3) {
+ if (!mas_find_child(&tmp[i], &tmp_next[n]))
+ break;
+ n++;
+ }
+
+ mas_adopt_children(&tmp[i], tmp[i].node);
+ }
+
+ if (MAS_WARN_ON(mas, n == 0))
+ break;
+
+ while (n < 3)
+ tmp_next[n++].status = ma_none;
+
+ for (i = 0; i < 3; i++)
+ tmp[i] = tmp_next[i];
+ }
+
+ /* Collect the old nodes that need to be discarded */
+ if (mte_is_leaf(old_enode))
+ return mas_free(mas, old_enode);
+
+ tmp[0] = *mas;
+ tmp[0].offset = 0;
+ tmp[0].node = old_enode;
+ tmp[1].status = ma_none;
+ tmp[2].status = ma_none;
+ in_rcu = mt_in_rcu(mas->tree);
+ do {
+ n = 0;
+ for (i = 0; i < 3; i++) {
+ if (mas_is_none(&tmp[i]))
+ continue;
+
+ while (n < 3) {
+ if (!mas_find_child(&tmp[i], &tmp_next[n]))
+ break;
+
+ if ((tmp_next[n].min >= tmp_next->index) &&
+ (tmp_next[n].max <= tmp_next->last)) {
+ mat_add(&subtrees, tmp_next[n].node);
+ tmp_next[n].status = ma_none;
+ } else {
+ n++;
+ }
+ }
+ }
+
+ if (MAS_WARN_ON(mas, n == 0))
+ break;
+
+ while (n < 3)
+ tmp_next[n++].status = ma_none;
+
+ for (i = 0; i < 3; i++) {
+ mas_topiary_node(mas, &tmp[i], in_rcu);
+ tmp[i] = tmp_next[i];
+ }
+ } while (!mte_is_leaf(tmp[0].node));
+
+ for (i = 0; i < 3; i++)
+ mas_topiary_node(mas, &tmp[i], in_rcu);
+
+ mas_mat_destroy(mas, &subtrees);
+}
+
+/*
+ * mas_wmb_replace() - Write memory barrier and replace
+ * @mas: The maple state
+ * @old: The old maple encoded node that is being replaced.
+ *
+ * Updates gap as necessary.
+ */
+static inline void mas_wmb_replace(struct ma_state *mas,
+ struct maple_enode *old_enode)
+{
+ /* Insert the new data in the tree */
+ mas_topiary_replace(mas, old_enode);
+
+ if (mte_is_leaf(mas->node))
+ return;
+
+ mas_update_gap(mas);
+}
+
+/*
+ * mast_cp_to_nodes() - Copy data out to nodes.
+ * @mast: The maple subtree state
+ * @left: The left encoded maple node
+ * @middle: The middle encoded maple node
+ * @right: The right encoded maple node
+ * @split: The location to split between left and (middle ? middle : right)
+ * @mid_split: The location to split between middle and right.
+ */
+static inline void mast_cp_to_nodes(struct maple_subtree_state *mast,
+ struct maple_enode *left, struct maple_enode *middle,
+ struct maple_enode *right, unsigned char split, unsigned char mid_split)
+{
+ bool new_lmax = true;
+
+ mas_node_or_none(mast->l, left);
+ mas_node_or_none(mast->m, middle);
+ mas_node_or_none(mast->r, right);
+
+ mast->l->min = mast->orig_l->min;
+ if (split == mast->bn->b_end) {
+ mast->l->max = mast->orig_r->max;
+ new_lmax = false;
+ }
+
+ mab_mas_cp(mast->bn, 0, split, mast->l, new_lmax);
+
+ if (middle) {
+ mab_mas_cp(mast->bn, 1 + split, mid_split, mast->m, true);
+ mast->m->min = mast->bn->pivot[split] + 1;
+ split = mid_split;
+ }
+
+ mast->r->max = mast->orig_r->max;
+ if (right) {
+ mab_mas_cp(mast->bn, 1 + split, mast->bn->b_end, mast->r, false);
+ mast->r->min = mast->bn->pivot[split] + 1;
+ }
+}
+
+/*
+ * mast_combine_cp_left - Copy in the original left side of the tree into the
+ * combined data set in the maple subtree state big node.
+ * @mast: The maple subtree state
+ */
+static inline void mast_combine_cp_left(struct maple_subtree_state *mast)
+{
+ unsigned char l_slot = mast->orig_l->offset;
+
+ if (!l_slot)
+ return;
+
+ mas_mab_cp(mast->orig_l, 0, l_slot - 1, mast->bn, 0);
+}
+
+/*
+ * mast_combine_cp_right: Copy in the original right side of the tree into the
+ * combined data set in the maple subtree state big node.
+ * @mast: The maple subtree state
+ */
+static inline void mast_combine_cp_right(struct maple_subtree_state *mast)
+{
+ if (mast->bn->pivot[mast->bn->b_end - 1] >= mast->orig_r->max)
+ return;
+
+ mas_mab_cp(mast->orig_r, mast->orig_r->offset + 1,
+ mt_slot_count(mast->orig_r->node), mast->bn,
+ mast->bn->b_end);
+ mast->orig_r->last = mast->orig_r->max;
+}
+
+/*
+ * mast_sufficient: Check if the maple subtree state has enough data in the big
+ * node to create at least one sufficient node
+ * @mast: the maple subtree state
+ */
+static inline bool mast_sufficient(struct maple_subtree_state *mast)
+{
+ if (mast->bn->b_end > mt_min_slot_count(mast->orig_l->node))
+ return true;
+
+ return false;
+}
+
+/*
+ * mast_overflow: Check if there is too much data in the subtree state for a
+ * single node.
+ * @mast: The maple subtree state
+ */
+static inline bool mast_overflow(struct maple_subtree_state *mast)
+{
+ if (mast->bn->b_end >= mt_slot_count(mast->orig_l->node))
+ return true;
+
+ return false;
+}
+
+static inline void *mtree_range_walk(struct ma_state *mas)
+{
+ unsigned long *pivots;
+ unsigned char offset;
+ struct maple_node *node;
+ struct maple_enode *next, *last;
+ enum maple_type type;
+ void __rcu **slots;
+ unsigned char end;
+ unsigned long max, min;
+ unsigned long prev_max, prev_min;
+
+ next = mas->node;
+ min = mas->min;
+ max = mas->max;
+ do {
+ last = next;
+ node = mte_to_node(next);
+ type = mte_node_type(next);
+ pivots = ma_pivots(node, type);
+ end = ma_data_end(node, type, pivots, max);
+ prev_min = min;
+ prev_max = max;
+ if (pivots[0] >= mas->index) {
+ offset = 0;
+ max = pivots[0];
+ goto next;
+ }
+
+ offset = 1;
+ while (offset < end) {
+ if (pivots[offset] >= mas->index) {
+ max = pivots[offset];
+ break;
+ }
+ offset++;
+ }
+
+ min = pivots[offset - 1] + 1;
+next:
+ slots = ma_slots(node, type);
+ next = mt_slot(mas->tree, slots, offset);
+ if (unlikely(ma_dead_node(node)))
+ goto dead_node;
+ } while (!ma_is_leaf(type));
+
+ mas->end = end;
+ mas->offset = offset;
+ mas->index = min;
+ mas->last = max;
+ mas->min = prev_min;
+ mas->max = prev_max;
+ mas->node = last;
+ return (void *)next;
+
+dead_node:
+ mas_reset(mas);
+ return NULL;
+}
+
+/*
+ * mas_spanning_rebalance() - Rebalance across two nodes which may not be peers.
+ * @mas: The starting maple state
+ * @mast: The maple_subtree_state, keeps track of 4 maple states.
+ * @count: The estimated count of iterations needed.
+ *
+ * Follow the tree upwards from @l_mas and @r_mas for @count, or until the root
+ * is hit. First @b_node is split into two entries which are inserted into the
+ * next iteration of the loop. @b_node is returned populated with the final
+ * iteration. @mas is used to obtain allocations. orig_l_mas keeps track of the
+ * nodes that will remain active by using orig_l_mas->index and orig_l_mas->last
+ * to account of what has been copied into the new sub-tree. The update of
+ * orig_l_mas->last is used in mas_consume to find the slots that will need to
+ * be either freed or destroyed. orig_l_mas->depth keeps track of the height of
+ * the new sub-tree in case the sub-tree becomes the full tree.
+ *
+ * Return: the number of elements in b_node during the last loop.
+ */
+static int mas_spanning_rebalance(struct ma_state *mas,
+ struct maple_subtree_state *mast, unsigned char count)
+{
+ unsigned char split, mid_split;
+ unsigned char slot = 0;
+ struct maple_enode *left = NULL, *middle = NULL, *right = NULL;
+ struct maple_enode *old_enode;
+
+ MA_STATE(l_mas, mas->tree, mas->index, mas->index);
+ MA_STATE(r_mas, mas->tree, mas->index, mas->last);
+ MA_STATE(m_mas, mas->tree, mas->index, mas->index);
+
+ /*
+ * The tree needs to be rebalanced and leaves need to be kept at the same level.
+ * Rebalancing is done by use of the ``struct maple_topiary``.
+ */
+ mast->l = &l_mas;
+ mast->m = &m_mas;
+ mast->r = &r_mas;
+ l_mas.status = r_mas.status = m_mas.status = ma_none;
+
+ /* Check if this is not root and has sufficient data. */
+ if (((mast->orig_l->min != 0) || (mast->orig_r->max != ULONG_MAX)) &&
+ unlikely(mast->bn->b_end <= mt_min_slots[mast->bn->type]))
+ mast_spanning_rebalance(mast);
+
+ l_mas.depth = 0;
+
+ /*
+ * Each level of the tree is examined and balanced, pushing data to the left or
+ * right, or rebalancing against left or right nodes is employed to avoid
+ * rippling up the tree to limit the amount of churn. Once a new sub-section of
+ * the tree is created, there may be a mix of new and old nodes. The old nodes
+ * will have the incorrect parent pointers and currently be in two trees: the
+ * original tree and the partially new tree. To remedy the parent pointers in
+ * the old tree, the new data is swapped into the active tree and a walk down
+ * the tree is performed and the parent pointers are updated.
+ * See mas_topiary_replace() for more information.
+ */
+ while (count--) {
+ mast->bn->b_end--;
+ mast->bn->type = mte_node_type(mast->orig_l->node);
+ split = mas_mab_to_node(mas, mast->bn, &left, &right, &middle,
+ &mid_split, mast->orig_l->min);
+ mast_set_split_parents(mast, left, middle, right, split,
+ mid_split);
+ mast_cp_to_nodes(mast, left, middle, right, split, mid_split);
+
+ /*
+ * Copy data from next level in the tree to mast->bn from next
+ * iteration
+ */
+ memset(mast->bn, 0, sizeof(struct maple_big_node));
+ mast->bn->type = mte_node_type(left);
+ l_mas.depth++;
+
+ /* Root already stored in l->node. */
+ if (mas_is_root_limits(mast->l))
+ goto new_root;
+
+ mast_ascend(mast);
+ mast_combine_cp_left(mast);
+ l_mas.offset = mast->bn->b_end;
+ mab_set_b_end(mast->bn, &l_mas, left);
+ mab_set_b_end(mast->bn, &m_mas, middle);
+ mab_set_b_end(mast->bn, &r_mas, right);
+
+ /* Copy anything necessary out of the right node. */
+ mast_combine_cp_right(mast);
+ mast->orig_l->last = mast->orig_l->max;
+
+ if (mast_sufficient(mast))
+ continue;
+
+ if (mast_overflow(mast))
+ continue;
+
+ /* May be a new root stored in mast->bn */
+ if (mas_is_root_limits(mast->orig_l))
+ break;
+
+ mast_spanning_rebalance(mast);
+
+ /* rebalancing from other nodes may require another loop. */
+ if (!count)
+ count++;
+ }
+
+ l_mas.node = mt_mk_node(ma_mnode_ptr(mas_pop_node(mas)),
+ mte_node_type(mast->orig_l->node));
+ l_mas.depth++;
+ mab_mas_cp(mast->bn, 0, mt_slots[mast->bn->type] - 1, &l_mas, true);
+ mas_set_parent(mas, left, l_mas.node, slot);
+ if (middle)
+ mas_set_parent(mas, middle, l_mas.node, ++slot);
+
+ if (right)
+ mas_set_parent(mas, right, l_mas.node, ++slot);
+
+ if (mas_is_root_limits(mast->l)) {
+new_root:
+ mas_mn(mast->l)->parent = ma_parent_ptr(mas_tree_parent(mas));
+ while (!mte_is_root(mast->orig_l->node))
+ mast_ascend(mast);
+ } else {
+ mas_mn(&l_mas)->parent = mas_mn(mast->orig_l)->parent;
+ }
+
+ old_enode = mast->orig_l->node;
+ mas->depth = l_mas.depth;
+ mas->node = l_mas.node;
+ mas->min = l_mas.min;
+ mas->max = l_mas.max;
+ mas->offset = l_mas.offset;
+ mas_wmb_replace(mas, old_enode);
+ mtree_range_walk(mas);
+ return mast->bn->b_end;
+}
+
+/*
+ * mas_rebalance() - Rebalance a given node.
+ * @mas: The maple state
+ * @b_node: The big maple node.
+ *
+ * Rebalance two nodes into a single node or two new nodes that are sufficient.
+ * Continue upwards until tree is sufficient.
+ *
+ * Return: the number of elements in b_node during the last loop.
+ */
+static inline int mas_rebalance(struct ma_state *mas,
+ struct maple_big_node *b_node)
+{
+ char empty_count = mas_mt_height(mas);
+ struct maple_subtree_state mast;
+ unsigned char shift, b_end = ++b_node->b_end;
+
+ MA_STATE(l_mas, mas->tree, mas->index, mas->last);
+ MA_STATE(r_mas, mas->tree, mas->index, mas->last);
+
+ trace_ma_op(__func__, mas);
+
+ /*
+ * Rebalancing occurs if a node is insufficient. Data is rebalanced
+ * against the node to the right if it exists, otherwise the node to the
+ * left of this node is rebalanced against this node. If rebalancing
+ * causes just one node to be produced instead of two, then the parent
+ * is also examined and rebalanced if it is insufficient. Every level
+ * tries to combine the data in the same way. If one node contains the
+ * entire range of the tree, then that node is used as a new root node.
+ */
+ mas_node_count(mas, empty_count * 2 - 1);
+ if (mas_is_err(mas))
+ return 0;
+
+ mast.orig_l = &l_mas;
+ mast.orig_r = &r_mas;
+ mast.bn = b_node;
+ mast.bn->type = mte_node_type(mas->node);
+
+ l_mas = r_mas = *mas;
+
+ if (mas_next_sibling(&r_mas)) {
+ mas_mab_cp(&r_mas, 0, mt_slot_count(r_mas.node), b_node, b_end);
+ r_mas.last = r_mas.index = r_mas.max;
+ } else {
+ mas_prev_sibling(&l_mas);
+ shift = mas_data_end(&l_mas) + 1;
+ mab_shift_right(b_node, shift);
+ mas->offset += shift;
+ mas_mab_cp(&l_mas, 0, shift - 1, b_node, 0);
+ b_node->b_end = shift + b_end;
+ l_mas.index = l_mas.last = l_mas.min;
+ }
+
+ return mas_spanning_rebalance(mas, &mast, empty_count);
+}
+
+/*
+ * mas_destroy_rebalance() - Rebalance left-most node while destroying the maple
+ * state.
+ * @mas: The maple state
+ * @end: The end of the left-most node.
+ *
+ * During a mass-insert event (such as forking), it may be necessary to
+ * rebalance the left-most node when it is not sufficient.
+ */
+static inline void mas_destroy_rebalance(struct ma_state *mas, unsigned char end)
+{
+ enum maple_type mt = mte_node_type(mas->node);
+ struct maple_node reuse, *newnode, *parent, *new_left, *left, *node;
+ struct maple_enode *eparent, *old_eparent;
+ unsigned char offset, tmp, split = mt_slots[mt] / 2;
+ void __rcu **l_slots, **slots;
+ unsigned long *l_pivs, *pivs, gap;
+ bool in_rcu = mt_in_rcu(mas->tree);
+
+ MA_STATE(l_mas, mas->tree, mas->index, mas->last);
+
+ l_mas = *mas;
+ mas_prev_sibling(&l_mas);
+
+ /* set up node. */
+ if (in_rcu) {
+ /* Allocate for both left and right as well as parent. */
+ mas_node_count(mas, 3);
+ if (mas_is_err(mas))
+ return;
+
+ newnode = mas_pop_node(mas);
+ } else {
+ newnode = &reuse;
+ }
+
+ node = mas_mn(mas);
+ newnode->parent = node->parent;
+ slots = ma_slots(newnode, mt);
+ pivs = ma_pivots(newnode, mt);
+ left = mas_mn(&l_mas);
+ l_slots = ma_slots(left, mt);
+ l_pivs = ma_pivots(left, mt);
+ if (!l_slots[split])
+ split++;
+ tmp = mas_data_end(&l_mas) - split;
+
+ memcpy(slots, l_slots + split + 1, sizeof(void *) * tmp);
+ memcpy(pivs, l_pivs + split + 1, sizeof(unsigned long) * tmp);
+ pivs[tmp] = l_mas.max;
+ memcpy(slots + tmp, ma_slots(node, mt), sizeof(void *) * end);
+ memcpy(pivs + tmp, ma_pivots(node, mt), sizeof(unsigned long) * end);
+
+ l_mas.max = l_pivs[split];
+ mas->min = l_mas.max + 1;
+ old_eparent = mt_mk_node(mte_parent(l_mas.node),
+ mas_parent_type(&l_mas, l_mas.node));
+ tmp += end;
+ if (!in_rcu) {
+ unsigned char max_p = mt_pivots[mt];
+ unsigned char max_s = mt_slots[mt];
+
+ if (tmp < max_p)
+ memset(pivs + tmp, 0,
+ sizeof(unsigned long) * (max_p - tmp));
+
+ if (tmp < mt_slots[mt])
+ memset(slots + tmp, 0, sizeof(void *) * (max_s - tmp));
+
+ memcpy(node, newnode, sizeof(struct maple_node));
+ ma_set_meta(node, mt, 0, tmp - 1);
+ mte_set_pivot(old_eparent, mte_parent_slot(l_mas.node),
+ l_pivs[split]);
+
+ /* Remove data from l_pivs. */
+ tmp = split + 1;
+ memset(l_pivs + tmp, 0, sizeof(unsigned long) * (max_p - tmp));
+ memset(l_slots + tmp, 0, sizeof(void *) * (max_s - tmp));
+ ma_set_meta(left, mt, 0, split);
+ eparent = old_eparent;
+
+ goto done;
+ }
+
+ /* RCU requires replacing both l_mas, mas, and parent. */
+ mas->node = mt_mk_node(newnode, mt);
+ ma_set_meta(newnode, mt, 0, tmp);
+
+ new_left = mas_pop_node(mas);
+ new_left->parent = left->parent;
+ mt = mte_node_type(l_mas.node);
+ slots = ma_slots(new_left, mt);
+ pivs = ma_pivots(new_left, mt);
+ memcpy(slots, l_slots, sizeof(void *) * split);
+ memcpy(pivs, l_pivs, sizeof(unsigned long) * split);
+ ma_set_meta(new_left, mt, 0, split);
+ l_mas.node = mt_mk_node(new_left, mt);
+
+ /* replace parent. */
+ offset = mte_parent_slot(mas->node);
+ mt = mas_parent_type(&l_mas, l_mas.node);
+ parent = mas_pop_node(mas);
+ slots = ma_slots(parent, mt);
+ pivs = ma_pivots(parent, mt);
+ memcpy(parent, mte_to_node(old_eparent), sizeof(struct maple_node));
+ rcu_assign_pointer(slots[offset], mas->node);
+ rcu_assign_pointer(slots[offset - 1], l_mas.node);
+ pivs[offset - 1] = l_mas.max;
+ eparent = mt_mk_node(parent, mt);
+done:
+ gap = mas_leaf_max_gap(mas);
+ mte_set_gap(eparent, mte_parent_slot(mas->node), gap);
+ gap = mas_leaf_max_gap(&l_mas);
+ mte_set_gap(eparent, mte_parent_slot(l_mas.node), gap);
+ mas_ascend(mas);
+
+ if (in_rcu) {
+ mas_replace_node(mas, old_eparent);
+ mas_adopt_children(mas, mas->node);
+ }
+
+ mas_update_gap(mas);
+}
+
+/*
+ * mas_split_final_node() - Split the final node in a subtree operation.
+ * @mast: the maple subtree state
+ * @mas: The maple state
+ * @height: The height of the tree in case it's a new root.
+ */
+static inline void mas_split_final_node(struct maple_subtree_state *mast,
+ struct ma_state *mas, int height)
+{
+ struct maple_enode *ancestor;
+
+ if (mte_is_root(mas->node)) {
+ if (mt_is_alloc(mas->tree))
+ mast->bn->type = maple_arange_64;
+ else
+ mast->bn->type = maple_range_64;
+ mas->depth = height;
+ }
+ /*
+ * Only a single node is used here, could be root.
+ * The Big_node data should just fit in a single node.
+ */
+ ancestor = mas_new_ma_node(mas, mast->bn);
+ mas_set_parent(mas, mast->l->node, ancestor, mast->l->offset);
+ mas_set_parent(mas, mast->r->node, ancestor, mast->r->offset);
+ mte_to_node(ancestor)->parent = mas_mn(mas)->parent;
+
+ mast->l->node = ancestor;
+ mab_mas_cp(mast->bn, 0, mt_slots[mast->bn->type] - 1, mast->l, true);
+ mas->offset = mast->bn->b_end - 1;
+}
+
+/*
+ * mast_fill_bnode() - Copy data into the big node in the subtree state
+ * @mast: The maple subtree state
+ * @mas: the maple state
+ * @skip: The number of entries to skip for new nodes insertion.
+ */
+static inline void mast_fill_bnode(struct maple_subtree_state *mast,
+ struct ma_state *mas,
+ unsigned char skip)
+{
+ bool cp = true;
+ unsigned char split;
+
+ memset(mast->bn->gap, 0, sizeof(unsigned long) * ARRAY_SIZE(mast->bn->gap));
+ memset(mast->bn->slot, 0, sizeof(unsigned long) * ARRAY_SIZE(mast->bn->slot));
+ memset(mast->bn->pivot, 0, sizeof(unsigned long) * ARRAY_SIZE(mast->bn->pivot));
+ mast->bn->b_end = 0;
+
+ if (mte_is_root(mas->node)) {
+ cp = false;
+ } else {
+ mas_ascend(mas);
+ mas->offset = mte_parent_slot(mas->node);
+ }
+
+ if (cp && mast->l->offset)
+ mas_mab_cp(mas, 0, mast->l->offset - 1, mast->bn, 0);
+
+ split = mast->bn->b_end;
+ mab_set_b_end(mast->bn, mast->l, mast->l->node);
+ mast->r->offset = mast->bn->b_end;
+ mab_set_b_end(mast->bn, mast->r, mast->r->node);
+ if (mast->bn->pivot[mast->bn->b_end - 1] == mas->max)
+ cp = false;
+
+ if (cp)
+ mas_mab_cp(mas, split + skip, mt_slot_count(mas->node) - 1,
+ mast->bn, mast->bn->b_end);
+
+ mast->bn->b_end--;
+ mast->bn->type = mte_node_type(mas->node);
+}
+
+/*
+ * mast_split_data() - Split the data in the subtree state big node into regular
+ * nodes.
+ * @mast: The maple subtree state
+ * @mas: The maple state
+ * @split: The location to split the big node
+ */
+static inline void mast_split_data(struct maple_subtree_state *mast,
+ struct ma_state *mas, unsigned char split)
+{
+ unsigned char p_slot;
+
+ mab_mas_cp(mast->bn, 0, split, mast->l, true);
+ mte_set_pivot(mast->r->node, 0, mast->r->max);
+ mab_mas_cp(mast->bn, split + 1, mast->bn->b_end, mast->r, false);
+ mast->l->offset = mte_parent_slot(mas->node);
+ mast->l->max = mast->bn->pivot[split];
+ mast->r->min = mast->l->max + 1;
+ if (mte_is_leaf(mas->node))
+ return;
+
+ p_slot = mast->orig_l->offset;
+ mas_set_split_parent(mast->orig_l, mast->l->node, mast->r->node,
+ &p_slot, split);
+ mas_set_split_parent(mast->orig_r, mast->l->node, mast->r->node,
+ &p_slot, split);
+}
+
+/*
+ * mas_push_data() - Instead of splitting a node, it is beneficial to push the
+ * data to the right or left node if there is room.
+ * @mas: The maple state
+ * @height: The current height of the maple state
+ * @mast: The maple subtree state
+ * @left: Push left or not.
+ *
+ * Keeping the height of the tree low means faster lookups.
+ *
+ * Return: True if pushed, false otherwise.
+ */
+static inline bool mas_push_data(struct ma_state *mas, int height,
+ struct maple_subtree_state *mast, bool left)
+{
+ unsigned char slot_total = mast->bn->b_end;
+ unsigned char end, space, split;
+
+ MA_STATE(tmp_mas, mas->tree, mas->index, mas->last);
+ tmp_mas = *mas;
+ tmp_mas.depth = mast->l->depth;
+
+ if (left && !mas_prev_sibling(&tmp_mas))
+ return false;
+ else if (!left && !mas_next_sibling(&tmp_mas))
+ return false;
+
+ end = mas_data_end(&tmp_mas);
+ slot_total += end;
+ space = 2 * mt_slot_count(mas->node) - 2;
+ /* -2 instead of -1 to ensure there isn't a triple split */
+ if (ma_is_leaf(mast->bn->type))
+ space--;
+
+ if (mas->max == ULONG_MAX)
+ space--;
+
+ if (slot_total >= space)
+ return false;
+
+ /* Get the data; Fill mast->bn */
+ mast->bn->b_end++;
+ if (left) {
+ mab_shift_right(mast->bn, end + 1);
+ mas_mab_cp(&tmp_mas, 0, end, mast->bn, 0);
+ mast->bn->b_end = slot_total + 1;
+ } else {
+ mas_mab_cp(&tmp_mas, 0, end, mast->bn, mast->bn->b_end);
+ }
+
+ /* Configure mast for splitting of mast->bn */
+ split = mt_slots[mast->bn->type] - 2;
+ if (left) {
+ /* Switch mas to prev node */
+ *mas = tmp_mas;
+ /* Start using mast->l for the left side. */
+ tmp_mas.node = mast->l->node;
+ *mast->l = tmp_mas;
+ } else {
+ tmp_mas.node = mast->r->node;
+ *mast->r = tmp_mas;
+ split = slot_total - split;
+ }
+ split = mab_no_null_split(mast->bn, split, mt_slots[mast->bn->type]);
+ /* Update parent slot for split calculation. */
+ if (left)
+ mast->orig_l->offset += end + 1;
+
+ mast_split_data(mast, mas, split);
+ mast_fill_bnode(mast, mas, 2);
+ mas_split_final_node(mast, mas, height + 1);
+ return true;
+}
+
+/*
+ * mas_split() - Split data that is too big for one node into two.
+ * @mas: The maple state
+ * @b_node: The maple big node
+ * Return: 1 on success, 0 on failure.
+ */
+static int mas_split(struct ma_state *mas, struct maple_big_node *b_node)
+{
+ struct maple_subtree_state mast;
+ int height = 0;
+ unsigned char mid_split, split = 0;
+ struct maple_enode *old;
+
+ /*
+ * Splitting is handled differently from any other B-tree; the Maple
+ * Tree splits upwards. Splitting up means that the split operation
+ * occurs when the walk of the tree hits the leaves and not on the way
+ * down. The reason for splitting up is that it is impossible to know
+ * how much space will be needed until the leaf is (or leaves are)
+ * reached. Since overwriting data is allowed and a range could
+ * overwrite more than one range or result in changing one entry into 3
+ * entries, it is impossible to know if a split is required until the
+ * data is examined.
+ *
+ * Splitting is a balancing act between keeping allocations to a minimum
+ * and avoiding a 'jitter' event where a tree is expanded to make room
+ * for an entry followed by a contraction when the entry is removed. To
+ * accomplish the balance, there are empty slots remaining in both left
+ * and right nodes after a split.
+ */
+ MA_STATE(l_mas, mas->tree, mas->index, mas->last);
+ MA_STATE(r_mas, mas->tree, mas->index, mas->last);
+ MA_STATE(prev_l_mas, mas->tree, mas->index, mas->last);
+ MA_STATE(prev_r_mas, mas->tree, mas->index, mas->last);
+
+ trace_ma_op(__func__, mas);
+ mas->depth = mas_mt_height(mas);
+ /* Allocation failures will happen early. */
+ mas_node_count(mas, 1 + mas->depth * 2);
+ if (mas_is_err(mas))
+ return 0;
+
+ mast.l = &l_mas;
+ mast.r = &r_mas;
+ mast.orig_l = &prev_l_mas;
+ mast.orig_r = &prev_r_mas;
+ mast.bn = b_node;
+
+ while (height++ <= mas->depth) {
+ if (mt_slots[b_node->type] > b_node->b_end) {
+ mas_split_final_node(&mast, mas, height);
+ break;
+ }
+
+ l_mas = r_mas = *mas;
+ l_mas.node = mas_new_ma_node(mas, b_node);
+ r_mas.node = mas_new_ma_node(mas, b_node);
+ /*
+ * Another way that 'jitter' is avoided is to terminate a split up early if the
+ * left or right node has space to spare. This is referred to as "pushing left"
+ * or "pushing right" and is similar to the B* tree, except the nodes left or
+ * right can rarely be reused due to RCU, but the ripple upwards is halted which
+ * is a significant savings.
+ */
+ /* Try to push left. */
+ if (mas_push_data(mas, height, &mast, true))
+ break;
+ /* Try to push right. */
+ if (mas_push_data(mas, height, &mast, false))
+ break;
+
+ split = mab_calc_split(mas, b_node, &mid_split, prev_l_mas.min);
+ mast_split_data(&mast, mas, split);
+ /*
+ * Usually correct, mab_mas_cp in the above call overwrites
+ * r->max.
+ */
+ mast.r->max = mas->max;
+ mast_fill_bnode(&mast, mas, 1);
+ prev_l_mas = *mast.l;
+ prev_r_mas = *mast.r;
+ }
+
+ /* Set the original node as dead */
+ old = mas->node;
+ mas->node = l_mas.node;
+ mas_wmb_replace(mas, old);
+ mtree_range_walk(mas);
+ return 1;
+}
+
+/*
+ * mas_reuse_node() - Reuse the node to store the data.
+ * @wr_mas: The maple write state
+ * @bn: The maple big node
+ * @end: The end of the data.
+ *
+ * Will always return false in RCU mode.
+ *
+ * Return: True if node was reused, false otherwise.
+ */
+static inline bool mas_reuse_node(struct ma_wr_state *wr_mas,
+ struct maple_big_node *bn, unsigned char end)
+{
+ /* Need to be rcu safe. */
+ if (mt_in_rcu(wr_mas->mas->tree))
+ return false;
+
+ if (end > bn->b_end) {
+ int clear = mt_slots[wr_mas->type] - bn->b_end;
+
+ memset(wr_mas->slots + bn->b_end, 0, sizeof(void *) * clear--);
+ memset(wr_mas->pivots + bn->b_end, 0, sizeof(void *) * clear);
+ }
+ mab_mas_cp(bn, 0, bn->b_end, wr_mas->mas, false);
+ return true;
+}
+
+/*
+ * mas_commit_b_node() - Commit the big node into the tree.
+ * @wr_mas: The maple write state
+ * @b_node: The maple big node
+ * @end: The end of the data.
+ */
+static noinline_for_kasan int mas_commit_b_node(struct ma_wr_state *wr_mas,
+ struct maple_big_node *b_node, unsigned char end)
+{
+ struct maple_node *node;
+ struct maple_enode *old_enode;
+ unsigned char b_end = b_node->b_end;
+ enum maple_type b_type = b_node->type;
+
+ old_enode = wr_mas->mas->node;
+ if ((b_end < mt_min_slots[b_type]) &&
+ (!mte_is_root(old_enode)) &&
+ (mas_mt_height(wr_mas->mas) > 1))
+ return mas_rebalance(wr_mas->mas, b_node);
+
+ if (b_end >= mt_slots[b_type])
+ return mas_split(wr_mas->mas, b_node);
+
+ if (mas_reuse_node(wr_mas, b_node, end))
+ goto reuse_node;
+
+ mas_node_count(wr_mas->mas, 1);
+ if (mas_is_err(wr_mas->mas))
+ return 0;
+
+ node = mas_pop_node(wr_mas->mas);
+ node->parent = mas_mn(wr_mas->mas)->parent;
+ wr_mas->mas->node = mt_mk_node(node, b_type);
+ mab_mas_cp(b_node, 0, b_end, wr_mas->mas, false);
+ mas_replace_node(wr_mas->mas, old_enode);
+reuse_node:
+ mas_update_gap(wr_mas->mas);
+ wr_mas->mas->end = b_end;
+ return 1;
+}
+
+/*
+ * mas_root_expand() - Expand a root to a node
+ * @mas: The maple state
+ * @entry: The entry to store into the tree
+ */
+static inline int mas_root_expand(struct ma_state *mas, void *entry)
+{
+ void *contents = mas_root_locked(mas);
+ enum maple_type type = maple_leaf_64;
+ struct maple_node *node;
+ void __rcu **slots;
+ unsigned long *pivots;
+ int slot = 0;
+
+ mas_node_count(mas, 1);
+ if (unlikely(mas_is_err(mas)))
+ return 0;
+
+ node = mas_pop_node(mas);
+ pivots = ma_pivots(node, type);
+ slots = ma_slots(node, type);
+ node->parent = ma_parent_ptr(mas_tree_parent(mas));
+ mas->node = mt_mk_node(node, type);
+ mas->status = ma_active;
+
+ if (mas->index) {
+ if (contents) {
+ rcu_assign_pointer(slots[slot], contents);
+ if (likely(mas->index > 1))
+ slot++;
+ }
+ pivots[slot++] = mas->index - 1;
+ }
+
+ rcu_assign_pointer(slots[slot], entry);
+ mas->offset = slot;
+ pivots[slot] = mas->last;
+ if (mas->last != ULONG_MAX)
+ pivots[++slot] = ULONG_MAX;
+
+ mas->depth = 1;
+ mas_set_height(mas);
+ ma_set_meta(node, maple_leaf_64, 0, slot);
+ /* swap the new root into the tree */
+ rcu_assign_pointer(mas->tree->ma_root, mte_mk_root(mas->node));
+ return slot;
+}
+
+static inline void mas_store_root(struct ma_state *mas, void *entry)
+{
+ if (likely((mas->last != 0) || (mas->index != 0)))
+ mas_root_expand(mas, entry);
+ else if (((unsigned long) (entry) & 3) == 2)
+ mas_root_expand(mas, entry);
+ else {
+ rcu_assign_pointer(mas->tree->ma_root, entry);
+ mas->status = ma_start;
+ }
+}
+
+/*
+ * mas_is_span_wr() - Check if the write needs to be treated as a write that
+ * spans the node.
+ * @mas: The maple state
+ * @piv: The pivot value being written
+ * @type: The maple node type
+ * @entry: The data to write
+ *
+ * Spanning writes are writes that start in one node and end in another OR if
+ * the write of a %NULL will cause the node to end with a %NULL.
+ *
+ * Return: True if this is a spanning write, false otherwise.
+ */
+static bool mas_is_span_wr(struct ma_wr_state *wr_mas)
+{
+ unsigned long max = wr_mas->r_max;
+ unsigned long last = wr_mas->mas->last;
+ enum maple_type type = wr_mas->type;
+ void *entry = wr_mas->entry;
+
+ /* Contained in this pivot, fast path */
+ if (last < max)
+ return false;
+
+ if (ma_is_leaf(type)) {
+ max = wr_mas->mas->max;
+ if (last < max)
+ return false;
+ }
+
+ if (last == max) {
+ /*
+ * The last entry of leaf node cannot be NULL unless it is the
+ * rightmost node (writing ULONG_MAX), otherwise it spans slots.
+ */
+ if (entry || last == ULONG_MAX)
+ return false;
+ }
+
+ trace_ma_write(__func__, wr_mas->mas, wr_mas->r_max, entry);
+ return true;
+}
+
+static inline void mas_wr_walk_descend(struct ma_wr_state *wr_mas)
+{
+ wr_mas->type = mte_node_type(wr_mas->mas->node);
+ mas_wr_node_walk(wr_mas);
+ wr_mas->slots = ma_slots(wr_mas->node, wr_mas->type);
+}
+
+static inline void mas_wr_walk_traverse(struct ma_wr_state *wr_mas)
+{
+ wr_mas->mas->max = wr_mas->r_max;
+ wr_mas->mas->min = wr_mas->r_min;
+ wr_mas->mas->node = wr_mas->content;
+ wr_mas->mas->offset = 0;
+ wr_mas->mas->depth++;
+}
+/*
+ * mas_wr_walk() - Walk the tree for a write.
+ * @wr_mas: The maple write state
+ *
+ * Uses mas_slot_locked() and does not need to worry about dead nodes.
+ *
+ * Return: True if it's contained in a node, false on spanning write.
+ */
+static bool mas_wr_walk(struct ma_wr_state *wr_mas)
+{
+ struct ma_state *mas = wr_mas->mas;
+
+ while (true) {
+ mas_wr_walk_descend(wr_mas);
+ if (unlikely(mas_is_span_wr(wr_mas)))
+ return false;
+
+ wr_mas->content = mas_slot_locked(mas, wr_mas->slots,
+ mas->offset);
+ if (ma_is_leaf(wr_mas->type))
+ return true;
+
+ mas_wr_walk_traverse(wr_mas);
+ }
+
+ return true;
+}
+
+static bool mas_wr_walk_index(struct ma_wr_state *wr_mas)
+{
+ struct ma_state *mas = wr_mas->mas;
+
+ while (true) {
+ mas_wr_walk_descend(wr_mas);
+ wr_mas->content = mas_slot_locked(mas, wr_mas->slots,
+ mas->offset);
+ if (ma_is_leaf(wr_mas->type))
+ return true;
+ mas_wr_walk_traverse(wr_mas);
+
+ }
+ return true;
+}
+/*
+ * mas_extend_spanning_null() - Extend a store of a %NULL to include surrounding %NULLs.
+ * @l_wr_mas: The left maple write state
+ * @r_wr_mas: The right maple write state
+ */
+static inline void mas_extend_spanning_null(struct ma_wr_state *l_wr_mas,
+ struct ma_wr_state *r_wr_mas)
+{
+ struct ma_state *r_mas = r_wr_mas->mas;
+ struct ma_state *l_mas = l_wr_mas->mas;
+ unsigned char l_slot;
+
+ l_slot = l_mas->offset;
+ if (!l_wr_mas->content)
+ l_mas->index = l_wr_mas->r_min;
+
+ if ((l_mas->index == l_wr_mas->r_min) &&
+ (l_slot &&
+ !mas_slot_locked(l_mas, l_wr_mas->slots, l_slot - 1))) {
+ if (l_slot > 1)
+ l_mas->index = l_wr_mas->pivots[l_slot - 2] + 1;
+ else
+ l_mas->index = l_mas->min;
+
+ l_mas->offset = l_slot - 1;
+ }
+
+ if (!r_wr_mas->content) {
+ if (r_mas->last < r_wr_mas->r_max)
+ r_mas->last = r_wr_mas->r_max;
+ r_mas->offset++;
+ } else if ((r_mas->last == r_wr_mas->r_max) &&
+ (r_mas->last < r_mas->max) &&
+ !mas_slot_locked(r_mas, r_wr_mas->slots, r_mas->offset + 1)) {
+ r_mas->last = mas_safe_pivot(r_mas, r_wr_mas->pivots,
+ r_wr_mas->type, r_mas->offset + 1);
+ r_mas->offset++;
+ }
+}
+
+static inline void *mas_state_walk(struct ma_state *mas)
+{
+ void *entry;
+
+ entry = mas_start(mas);
+ if (mas_is_none(mas))
+ return NULL;
+
+ if (mas_is_ptr(mas))
+ return entry;
+
+ return mtree_range_walk(mas);
+}
+
+/*
+ * mtree_lookup_walk() - Internal quick lookup that does not keep maple state up
+ * to date.
+ *
+ * @mas: The maple state.
+ *
+ * Note: Leaves mas in undesirable state.
+ * Return: The entry for @mas->index or %NULL on dead node.
+ */
+static inline void *mtree_lookup_walk(struct ma_state *mas)
+{
+ unsigned long *pivots;
+ unsigned char offset;
+ struct maple_node *node;
+ struct maple_enode *next;
+ enum maple_type type;
+ void __rcu **slots;
+ unsigned char end;
+
+ next = mas->node;
+ do {
+ node = mte_to_node(next);
+ type = mte_node_type(next);
+ pivots = ma_pivots(node, type);
+ end = mt_pivots[type];
+ offset = 0;
+ do {
+ if (pivots[offset] >= mas->index)
+ break;
+ } while (++offset < end);
+
+ slots = ma_slots(node, type);
+ next = mt_slot(mas->tree, slots, offset);
+ if (unlikely(ma_dead_node(node)))
+ goto dead_node;
+ } while (!ma_is_leaf(type));
+
+ return (void *)next;
+
+dead_node:
+ mas_reset(mas);
+ return NULL;
+}
+
+static void mte_destroy_walk(struct maple_enode *, struct maple_tree *);
+/*
+ * mas_new_root() - Create a new root node that only contains the entry passed
+ * in.
+ * @mas: The maple state
+ * @entry: The entry to store.
+ *
+ * Only valid when the index == 0 and the last == ULONG_MAX
+ *
+ * Return 0 on error, 1 on success.
+ */
+static inline int mas_new_root(struct ma_state *mas, void *entry)
+{
+ struct maple_enode *root = mas_root_locked(mas);
+ enum maple_type type = maple_leaf_64;
+ struct maple_node *node;
+ void __rcu **slots;
+ unsigned long *pivots;
+
+ if (!entry && !mas->index && mas->last == ULONG_MAX) {
+ mas->depth = 0;
+ mas_set_height(mas);
+ rcu_assign_pointer(mas->tree->ma_root, entry);
+ mas->status = ma_start;
+ goto done;
+ }
+
+ mas_node_count(mas, 1);
+ if (mas_is_err(mas))
+ return 0;
+
+ node = mas_pop_node(mas);
+ pivots = ma_pivots(node, type);
+ slots = ma_slots(node, type);
+ node->parent = ma_parent_ptr(mas_tree_parent(mas));
+ mas->node = mt_mk_node(node, type);
+ mas->status = ma_active;
+ rcu_assign_pointer(slots[0], entry);
+ pivots[0] = mas->last;
+ mas->depth = 1;
+ mas_set_height(mas);
+ rcu_assign_pointer(mas->tree->ma_root, mte_mk_root(mas->node));
+
+done:
+ if (xa_is_node(root))
+ mte_destroy_walk(root, mas->tree);
+
+ return 1;
+}
+/*
+ * mas_wr_spanning_store() - Create a subtree with the store operation completed
+ * and new nodes where necessary, then place the sub-tree in the actual tree.
+ * Note that mas is expected to point to the node which caused the store to
+ * span.
+ * @wr_mas: The maple write state
+ *
+ * Return: 0 on error, positive on success.
+ */
+static inline int mas_wr_spanning_store(struct ma_wr_state *wr_mas)
+{
+ struct maple_subtree_state mast;
+ struct maple_big_node b_node;
+ struct ma_state *mas;
+ unsigned char height;
+
+ /* Left and Right side of spanning store */
+ MA_STATE(l_mas, NULL, 0, 0);
+ MA_STATE(r_mas, NULL, 0, 0);
+ MA_WR_STATE(r_wr_mas, &r_mas, wr_mas->entry);
+ MA_WR_STATE(l_wr_mas, &l_mas, wr_mas->entry);
+
+ /*
+ * A store operation that spans multiple nodes is called a spanning
+ * store and is handled early in the store call stack by the function
+ * mas_is_span_wr(). When a spanning store is identified, the maple
+ * state is duplicated. The first maple state walks the left tree path
+ * to ``index``, the duplicate walks the right tree path to ``last``.
+ * The data in the two nodes are combined into a single node, two nodes,
+ * or possibly three nodes (see the 3-way split above). A ``NULL``
+ * written to the last entry of a node is considered a spanning store as
+ * a rebalance is required for the operation to complete and an overflow
+ * of data may happen.
+ */
+ mas = wr_mas->mas;
+ trace_ma_op(__func__, mas);
+
+ if (unlikely(!mas->index && mas->last == ULONG_MAX))
+ return mas_new_root(mas, wr_mas->entry);
+ /*
+ * Node rebalancing may occur due to this store, so there may be three new
+ * entries per level plus a new root.
+ */
+ height = mas_mt_height(mas);
+ mas_node_count(mas, 1 + height * 3);
+ if (mas_is_err(mas))
+ return 0;
+
+ /*
+ * Set up right side. Need to get to the next offset after the spanning
+ * store to ensure it's not NULL and to combine both the next node and
+ * the node with the start together.
+ */
+ r_mas = *mas;
+ /* Avoid overflow, walk to next slot in the tree. */
+ if (r_mas.last + 1)
+ r_mas.last++;
+
+ r_mas.index = r_mas.last;
+ mas_wr_walk_index(&r_wr_mas);
+ r_mas.last = r_mas.index = mas->last;
+
+ /* Set up left side. */
+ l_mas = *mas;
+ mas_wr_walk_index(&l_wr_mas);
+
+ if (!wr_mas->entry) {
+ mas_extend_spanning_null(&l_wr_mas, &r_wr_mas);
+ mas->offset = l_mas.offset;
+ mas->index = l_mas.index;
+ mas->last = l_mas.last = r_mas.last;
+ }
+
+ /* expanding NULLs may make this cover the entire range */
+ if (!l_mas.index && r_mas.last == ULONG_MAX) {
+ mas_set_range(mas, 0, ULONG_MAX);
+ return mas_new_root(mas, wr_mas->entry);
+ }
+
+ memset(&b_node, 0, sizeof(struct maple_big_node));
+ /* Copy l_mas and store the value in b_node. */
+ mas_store_b_node(&l_wr_mas, &b_node, l_mas.end);
+ /* Copy r_mas into b_node. */
+ if (r_mas.offset <= r_mas.end)
+ mas_mab_cp(&r_mas, r_mas.offset, r_mas.end,
+ &b_node, b_node.b_end + 1);
+ else
+ b_node.b_end++;
+
+ /* Stop spanning searches by searching for just index. */
+ l_mas.index = l_mas.last = mas->index;
+
+ mast.bn = &b_node;
+ mast.orig_l = &l_mas;
+ mast.orig_r = &r_mas;
+ /* Combine l_mas and r_mas and split them up evenly again. */
+ return mas_spanning_rebalance(mas, &mast, height + 1);
+}
+
+/*
+ * mas_wr_node_store() - Attempt to store the value in a node
+ * @wr_mas: The maple write state
+ *
+ * Attempts to reuse the node, but may allocate.
+ *
+ * Return: True if stored, false otherwise
+ */
+static inline bool mas_wr_node_store(struct ma_wr_state *wr_mas,
+ unsigned char new_end)
+{
+ struct ma_state *mas = wr_mas->mas;
+ void __rcu **dst_slots;
+ unsigned long *dst_pivots;
+ unsigned char dst_offset, offset_end = wr_mas->offset_end;
+ struct maple_node reuse, *newnode;
+ unsigned char copy_size, node_pivots = mt_pivots[wr_mas->type];
+ bool in_rcu = mt_in_rcu(mas->tree);
+
+ /* Check if there is enough data. The room is enough. */
+ if (!mte_is_root(mas->node) && (new_end <= mt_min_slots[wr_mas->type]) &&
+ !(mas->mas_flags & MA_STATE_BULK))
+ return false;
+
+ if (mas->last == wr_mas->end_piv)
+ offset_end++; /* don't copy this offset */
+ else if (unlikely(wr_mas->r_max == ULONG_MAX))
+ mas_bulk_rebalance(mas, mas->end, wr_mas->type);
+
+ /* set up node. */
+ if (in_rcu) {
+ mas_node_count(mas, 1);
+ if (mas_is_err(mas))
+ return false;
+
+ newnode = mas_pop_node(mas);
+ } else {
+ memset(&reuse, 0, sizeof(struct maple_node));
+ newnode = &reuse;
+ }
+
+ newnode->parent = mas_mn(mas)->parent;
+ dst_pivots = ma_pivots(newnode, wr_mas->type);
+ dst_slots = ma_slots(newnode, wr_mas->type);
+ /* Copy from start to insert point */
+ memcpy(dst_pivots, wr_mas->pivots, sizeof(unsigned long) * mas->offset);
+ memcpy(dst_slots, wr_mas->slots, sizeof(void *) * mas->offset);
+
+ /* Handle insert of new range starting after old range */
+ if (wr_mas->r_min < mas->index) {
+ rcu_assign_pointer(dst_slots[mas->offset], wr_mas->content);
+ dst_pivots[mas->offset++] = mas->index - 1;
+ }
+
+ /* Store the new entry and range end. */
+ if (mas->offset < node_pivots)
+ dst_pivots[mas->offset] = mas->last;
+ rcu_assign_pointer(dst_slots[mas->offset], wr_mas->entry);
+
+ /*
+ * this range wrote to the end of the node or it overwrote the rest of
+ * the data
+ */
+ if (offset_end > mas->end)
+ goto done;
+
+ dst_offset = mas->offset + 1;
+ /* Copy to the end of node if necessary. */
+ copy_size = mas->end - offset_end + 1;
+ memcpy(dst_slots + dst_offset, wr_mas->slots + offset_end,
+ sizeof(void *) * copy_size);
+ memcpy(dst_pivots + dst_offset, wr_mas->pivots + offset_end,
+ sizeof(unsigned long) * (copy_size - 1));
+
+ if (new_end < node_pivots)
+ dst_pivots[new_end] = mas->max;
+
+done:
+ mas_leaf_set_meta(newnode, maple_leaf_64, new_end);
+ if (in_rcu) {
+ struct maple_enode *old_enode = mas->node;
+
+ mas->node = mt_mk_node(newnode, wr_mas->type);
+ mas_replace_node(mas, old_enode);
+ } else {
+ memcpy(wr_mas->node, newnode, sizeof(struct maple_node));
+ }
+ trace_ma_write(__func__, mas, 0, wr_mas->entry);
+ mas_update_gap(mas);
+ mas->end = new_end;
+ return true;
+}
+
+/*
+ * mas_wr_slot_store: Attempt to store a value in a slot.
+ * @wr_mas: the maple write state
+ *
+ * Return: True if stored, false otherwise
+ */
+static inline bool mas_wr_slot_store(struct ma_wr_state *wr_mas)
+{
+ struct ma_state *mas = wr_mas->mas;
+ unsigned char offset = mas->offset;
+ void __rcu **slots = wr_mas->slots;
+ bool gap = false;
+
+ gap |= !mt_slot_locked(mas->tree, slots, offset);
+ gap |= !mt_slot_locked(mas->tree, slots, offset + 1);
+
+ if (wr_mas->offset_end - offset == 1) {
+ if (mas->index == wr_mas->r_min) {
+ /* Overwriting the range and a part of the next one */
+ rcu_assign_pointer(slots[offset], wr_mas->entry);
+ wr_mas->pivots[offset] = mas->last;
+ } else {
+ /* Overwriting a part of the range and the next one */
+ rcu_assign_pointer(slots[offset + 1], wr_mas->entry);
+ wr_mas->pivots[offset] = mas->index - 1;
+ mas->offset++; /* Keep mas accurate. */
+ }
+ } else if (!mt_in_rcu(mas->tree)) {
+ /*
+ * Expand the range, only partially overwriting the previous and
+ * next ranges
+ */
+ gap |= !mt_slot_locked(mas->tree, slots, offset + 2);
+ rcu_assign_pointer(slots[offset + 1], wr_mas->entry);
+ wr_mas->pivots[offset] = mas->index - 1;
+ wr_mas->pivots[offset + 1] = mas->last;
+ mas->offset++; /* Keep mas accurate. */
+ } else {
+ return false;
+ }
+
+ trace_ma_write(__func__, mas, 0, wr_mas->entry);
+ /*
+ * Only update gap when the new entry is empty or there is an empty
+ * entry in the original two ranges.
+ */
+ if (!wr_mas->entry || gap)
+ mas_update_gap(mas);
+
+ return true;
+}
+
+static inline void mas_wr_extend_null(struct ma_wr_state *wr_mas)
+{
+ struct ma_state *mas = wr_mas->mas;
+
+ if (!wr_mas->slots[wr_mas->offset_end]) {
+ /* If this one is null, the next and prev are not */
+ mas->last = wr_mas->end_piv;
+ } else {
+ /* Check next slot(s) if we are overwriting the end */
+ if ((mas->last == wr_mas->end_piv) &&
+ (mas->end != wr_mas->offset_end) &&
+ !wr_mas->slots[wr_mas->offset_end + 1]) {
+ wr_mas->offset_end++;
+ if (wr_mas->offset_end == mas->end)
+ mas->last = mas->max;
+ else
+ mas->last = wr_mas->pivots[wr_mas->offset_end];
+ wr_mas->end_piv = mas->last;
+ }
+ }
+
+ if (!wr_mas->content) {
+ /* If this one is null, the next and prev are not */
+ mas->index = wr_mas->r_min;
+ } else {
+ /* Check prev slot if we are overwriting the start */
+ if (mas->index == wr_mas->r_min && mas->offset &&
+ !wr_mas->slots[mas->offset - 1]) {
+ mas->offset--;
+ wr_mas->r_min = mas->index =
+ mas_safe_min(mas, wr_mas->pivots, mas->offset);
+ wr_mas->r_max = wr_mas->pivots[mas->offset];
+ }
+ }
+}
+
+static inline void mas_wr_end_piv(struct ma_wr_state *wr_mas)
+{
+ while ((wr_mas->offset_end < wr_mas->mas->end) &&
+ (wr_mas->mas->last > wr_mas->pivots[wr_mas->offset_end]))
+ wr_mas->offset_end++;
+
+ if (wr_mas->offset_end < wr_mas->mas->end)
+ wr_mas->end_piv = wr_mas->pivots[wr_mas->offset_end];
+ else
+ wr_mas->end_piv = wr_mas->mas->max;
+
+ if (!wr_mas->entry)
+ mas_wr_extend_null(wr_mas);
+}
+
+static inline unsigned char mas_wr_new_end(struct ma_wr_state *wr_mas)
+{
+ struct ma_state *mas = wr_mas->mas;
+ unsigned char new_end = mas->end + 2;
+
+ new_end -= wr_mas->offset_end - mas->offset;
+ if (wr_mas->r_min == mas->index)
+ new_end--;
+
+ if (wr_mas->end_piv == mas->last)
+ new_end--;
+
+ return new_end;
+}
+
+/*
+ * mas_wr_append: Attempt to append
+ * @wr_mas: the maple write state
+ * @new_end: The end of the node after the modification
+ *
+ * This is currently unsafe in rcu mode since the end of the node may be cached
+ * by readers while the node contents may be updated which could result in
+ * inaccurate information.
+ *
+ * Return: True if appended, false otherwise
+ */
+static inline bool mas_wr_append(struct ma_wr_state *wr_mas,
+ unsigned char new_end)
+{
+ struct ma_state *mas;
+ void __rcu **slots;
+ unsigned char end;
+
+ mas = wr_mas->mas;
+ if (mt_in_rcu(mas->tree))
+ return false;
+
+ end = mas->end;
+ if (mas->offset != end)
+ return false;
+
+ if (new_end < mt_pivots[wr_mas->type]) {
+ wr_mas->pivots[new_end] = wr_mas->pivots[end];
+ ma_set_meta(wr_mas->node, wr_mas->type, 0, new_end);
+ }
+
+ slots = wr_mas->slots;
+ if (new_end == end + 1) {
+ if (mas->last == wr_mas->r_max) {
+ /* Append to end of range */
+ rcu_assign_pointer(slots[new_end], wr_mas->entry);
+ wr_mas->pivots[end] = mas->index - 1;
+ mas->offset = new_end;
+ } else {
+ /* Append to start of range */
+ rcu_assign_pointer(slots[new_end], wr_mas->content);
+ wr_mas->pivots[end] = mas->last;
+ rcu_assign_pointer(slots[end], wr_mas->entry);
+ }
+ } else {
+ /* Append to the range without touching any boundaries. */
+ rcu_assign_pointer(slots[new_end], wr_mas->content);
+ wr_mas->pivots[end + 1] = mas->last;
+ rcu_assign_pointer(slots[end + 1], wr_mas->entry);
+ wr_mas->pivots[end] = mas->index - 1;
+ mas->offset = end + 1;
+ }
+
+ if (!wr_mas->content || !wr_mas->entry)
+ mas_update_gap(mas);
+
+ mas->end = new_end;
+ trace_ma_write(__func__, mas, new_end, wr_mas->entry);
+ return true;
+}
+
+/*
+ * mas_wr_bnode() - Slow path for a modification.
+ * @wr_mas: The write maple state
+ *
+ * This is where split, rebalance end up.
+ */
+static void mas_wr_bnode(struct ma_wr_state *wr_mas)
+{
+ struct maple_big_node b_node;
+
+ trace_ma_write(__func__, wr_mas->mas, 0, wr_mas->entry);
+ memset(&b_node, 0, sizeof(struct maple_big_node));
+ mas_store_b_node(wr_mas, &b_node, wr_mas->offset_end);
+ mas_commit_b_node(wr_mas, &b_node, wr_mas->mas->end);
+}
+
+static inline void mas_wr_modify(struct ma_wr_state *wr_mas)
+{
+ struct ma_state *mas = wr_mas->mas;
+ unsigned char new_end;
+
+ /* Direct replacement */
+ if (wr_mas->r_min == mas->index && wr_mas->r_max == mas->last) {
+ rcu_assign_pointer(wr_mas->slots[mas->offset], wr_mas->entry);
+ if (!!wr_mas->entry ^ !!wr_mas->content)
+ mas_update_gap(mas);
+ return;
+ }
+
+ /*
+ * new_end exceeds the size of the maple node and cannot enter the fast
+ * path.
+ */
+ new_end = mas_wr_new_end(wr_mas);
+ if (new_end >= mt_slots[wr_mas->type])
+ goto slow_path;
+
+ /* Attempt to append */
+ if (mas_wr_append(wr_mas, new_end))
+ return;
+
+ if (new_end == mas->end && mas_wr_slot_store(wr_mas))
+ return;
+
+ if (mas_wr_node_store(wr_mas, new_end))
+ return;
+
+ if (mas_is_err(mas))
+ return;
+
+slow_path:
+ mas_wr_bnode(wr_mas);
+}
+
+/*
+ * mas_wr_store_entry() - Internal call to store a value
+ * @mas: The maple state
+ * @entry: The entry to store.
+ *
+ * Return: The contents that was stored at the index.
+ */
+static inline void *mas_wr_store_entry(struct ma_wr_state *wr_mas)
+{
+ struct ma_state *mas = wr_mas->mas;
+
+ wr_mas->content = mas_start(mas);
+ if (mas_is_none(mas) || mas_is_ptr(mas)) {
+ mas_store_root(mas, wr_mas->entry);
+ return wr_mas->content;
+ }
+
+ if (unlikely(!mas_wr_walk(wr_mas))) {
+ mas_wr_spanning_store(wr_mas);
+ return wr_mas->content;
+ }
+
+ /* At this point, we are at the leaf node that needs to be altered. */
+ mas_wr_end_piv(wr_mas);
+ /* New root for a single pointer */
+ if (unlikely(!mas->index && mas->last == ULONG_MAX)) {
+ mas_new_root(mas, wr_mas->entry);
+ return wr_mas->content;
+ }
+
+ mas_wr_modify(wr_mas);
+ return wr_mas->content;
+}
+
+/**
+ * mas_insert() - Internal call to insert a value
+ * @mas: The maple state
+ * @entry: The entry to store
+ *
+ * Return: %NULL or the contents that already exists at the requested index
+ * otherwise. The maple state needs to be checked for error conditions.
+ */
+static inline void *mas_insert(struct ma_state *mas, void *entry)
+{
+ MA_WR_STATE(wr_mas, mas, entry);
+
+ /*
+ * Inserting a new range inserts either 0, 1, or 2 pivots within the
+ * tree. If the insert fits exactly into an existing gap with a value
+ * of NULL, then the slot only needs to be written with the new value.
+ * If the range being inserted is adjacent to another range, then only a
+ * single pivot needs to be inserted (as well as writing the entry). If
+ * the new range is within a gap but does not touch any other ranges,
+ * then two pivots need to be inserted: the start - 1, and the end. As
+ * usual, the entry must be written. Most operations require a new node
+ * to be allocated and replace an existing node to ensure RCU safety,
+ * when in RCU mode. The exception to requiring a newly allocated node
+ * is when inserting at the end of a node (appending). When done
+ * carefully, appending can reuse the node in place.
+ */
+ wr_mas.content = mas_start(mas);
+ if (wr_mas.content)
+ goto exists;
+
+ if (mas_is_none(mas) || mas_is_ptr(mas)) {
+ mas_store_root(mas, entry);
+ return NULL;
+ }
+
+ /* spanning writes always overwrite something */
+ if (!mas_wr_walk(&wr_mas))
+ goto exists;
+
+ /* At this point, we are at the leaf node that needs to be altered. */
+ wr_mas.offset_end = mas->offset;
+ wr_mas.end_piv = wr_mas.r_max;
+
+ if (wr_mas.content || (mas->last > wr_mas.r_max))
+ goto exists;
+
+ if (!entry)
+ return NULL;
+
+ mas_wr_modify(&wr_mas);
+ return wr_mas.content;
+
+exists:
+ mas_set_err(mas, -EEXIST);
+ return wr_mas.content;
+
+}
+
+static __always_inline void mas_rewalk(struct ma_state *mas, unsigned long index)
+{
+retry:
+ mas_set(mas, index);
+ mas_state_walk(mas);
+ if (mas_is_start(mas))
+ goto retry;
+}
+
+static __always_inline bool mas_rewalk_if_dead(struct ma_state *mas,
+ struct maple_node *node, const unsigned long index)
+{
+ if (unlikely(ma_dead_node(node))) {
+ mas_rewalk(mas, index);
+ return true;
+ }
+ return false;
+}
+
+/*
+ * mas_prev_node() - Find the prev non-null entry at the same level in the
+ * tree. The prev value will be mas->node[mas->offset] or the status will be
+ * ma_none.
+ * @mas: The maple state
+ * @min: The lower limit to search
+ *
+ * The prev node value will be mas->node[mas->offset] or the status will be
+ * ma_none.
+ * Return: 1 if the node is dead, 0 otherwise.
+ */
+static int mas_prev_node(struct ma_state *mas, unsigned long min)
+{
+ enum maple_type mt;
+ int offset, level;
+ void __rcu **slots;
+ struct maple_node *node;
+ unsigned long *pivots;
+ unsigned long max;
+
+ node = mas_mn(mas);
+ if (!mas->min)
+ goto no_entry;
+
+ max = mas->min - 1;
+ if (max < min)
+ goto no_entry;
+
+ level = 0;
+ do {
+ if (ma_is_root(node))
+ goto no_entry;
+
+ /* Walk up. */
+ if (unlikely(mas_ascend(mas)))
+ return 1;
+ offset = mas->offset;
+ level++;
+ node = mas_mn(mas);
+ } while (!offset);
+
+ offset--;
+ mt = mte_node_type(mas->node);
+ while (level > 1) {
+ level--;
+ slots = ma_slots(node, mt);
+ mas->node = mas_slot(mas, slots, offset);
+ if (unlikely(ma_dead_node(node)))
+ return 1;
+
+ mt = mte_node_type(mas->node);
+ node = mas_mn(mas);
+ pivots = ma_pivots(node, mt);
+ offset = ma_data_end(node, mt, pivots, max);
+ if (unlikely(ma_dead_node(node)))
+ return 1;
+ }
+
+ slots = ma_slots(node, mt);
+ mas->node = mas_slot(mas, slots, offset);
+ pivots = ma_pivots(node, mt);
+ if (unlikely(ma_dead_node(node)))
+ return 1;
+
+ if (likely(offset))
+ mas->min = pivots[offset - 1] + 1;
+ mas->max = max;
+ mas->offset = mas_data_end(mas);
+ if (unlikely(mte_dead_node(mas->node)))
+ return 1;
+
+ mas->end = mas->offset;
+ return 0;
+
+no_entry:
+ if (unlikely(ma_dead_node(node)))
+ return 1;
+
+ mas->status = ma_underflow;
+ return 0;
+}
+
+/*
+ * mas_prev_slot() - Get the entry in the previous slot
+ *
+ * @mas: The maple state
+ * @max: The minimum starting range
+ * @empty: Can be empty
+ * @set_underflow: Set the @mas->node to underflow state on limit.
+ *
+ * Return: The entry in the previous slot which is possibly NULL
+ */
+static void *mas_prev_slot(struct ma_state *mas, unsigned long min, bool empty)
+{
+ void *entry;
+ void __rcu **slots;
+ unsigned long pivot;
+ enum maple_type type;
+ unsigned long *pivots;
+ struct maple_node *node;
+ unsigned long save_point = mas->index;
+
+retry:
+ node = mas_mn(mas);
+ type = mte_node_type(mas->node);
+ pivots = ma_pivots(node, type);
+ if (unlikely(mas_rewalk_if_dead(mas, node, save_point)))
+ goto retry;
+
+ if (mas->min <= min) {
+ pivot = mas_safe_min(mas, pivots, mas->offset);
+
+ if (unlikely(mas_rewalk_if_dead(mas, node, save_point)))
+ goto retry;
+
+ if (pivot <= min)
+ goto underflow;
+ }
+
+again:
+ if (likely(mas->offset)) {
+ mas->offset--;
+ mas->last = mas->index - 1;
+ mas->index = mas_safe_min(mas, pivots, mas->offset);
+ } else {
+ if (mas->index <= min)
+ goto underflow;
+
+ if (mas_prev_node(mas, min)) {
+ mas_rewalk(mas, save_point);
+ goto retry;
+ }
+
+ if (WARN_ON_ONCE(mas_is_underflow(mas)))
+ return NULL;
+
+ mas->last = mas->max;
+ node = mas_mn(mas);
+ type = mte_node_type(mas->node);
+ pivots = ma_pivots(node, type);
+ mas->index = pivots[mas->offset - 1] + 1;
+ }
+
+ slots = ma_slots(node, type);
+ entry = mas_slot(mas, slots, mas->offset);
+ if (unlikely(mas_rewalk_if_dead(mas, node, save_point)))
+ goto retry;
+
+
+ if (likely(entry))
+ return entry;
+
+ if (!empty) {
+ if (mas->index <= min) {
+ mas->status = ma_underflow;
+ return NULL;
+ }
+
+ goto again;
+ }
+
+ return entry;
+
+underflow:
+ mas->status = ma_underflow;
+ return NULL;
+}
+
+/*
+ * mas_next_node() - Get the next node at the same level in the tree.
+ * @mas: The maple state
+ * @max: The maximum pivot value to check.
+ *
+ * The next value will be mas->node[mas->offset] or the status will have
+ * overflowed.
+ * Return: 1 on dead node, 0 otherwise.
+ */
+static int mas_next_node(struct ma_state *mas, struct maple_node *node,
+ unsigned long max)
+{
+ unsigned long min;
+ unsigned long *pivots;
+ struct maple_enode *enode;
+ struct maple_node *tmp;
+ int level = 0;
+ unsigned char node_end;
+ enum maple_type mt;
+ void __rcu **slots;
+
+ if (mas->max >= max)
+ goto overflow;
+
+ min = mas->max + 1;
+ level = 0;
+ do {
+ if (ma_is_root(node))
+ goto overflow;
+
+ /* Walk up. */
+ if (unlikely(mas_ascend(mas)))
+ return 1;
+
+ level++;
+ node = mas_mn(mas);
+ mt = mte_node_type(mas->node);
+ pivots = ma_pivots(node, mt);
+ node_end = ma_data_end(node, mt, pivots, mas->max);
+ if (unlikely(ma_dead_node(node)))
+ return 1;
+
+ } while (unlikely(mas->offset == node_end));
+
+ slots = ma_slots(node, mt);
+ mas->offset++;
+ enode = mas_slot(mas, slots, mas->offset);
+ if (unlikely(ma_dead_node(node)))
+ return 1;
+
+ if (level > 1)
+ mas->offset = 0;
+
+ while (unlikely(level > 1)) {
+ level--;
+ mas->node = enode;
+ node = mas_mn(mas);
+ mt = mte_node_type(mas->node);
+ slots = ma_slots(node, mt);
+ enode = mas_slot(mas, slots, 0);
+ if (unlikely(ma_dead_node(node)))
+ return 1;
+ }
+
+ if (!mas->offset)
+ pivots = ma_pivots(node, mt);
+
+ mas->max = mas_safe_pivot(mas, pivots, mas->offset, mt);
+ tmp = mte_to_node(enode);
+ mt = mte_node_type(enode);
+ pivots = ma_pivots(tmp, mt);
+ mas->end = ma_data_end(tmp, mt, pivots, mas->max);
+ if (unlikely(ma_dead_node(node)))
+ return 1;
+
+ mas->node = enode;
+ mas->min = min;
+ return 0;
+
+overflow:
+ if (unlikely(ma_dead_node(node)))
+ return 1;
+
+ mas->status = ma_overflow;
+ return 0;
+}
+
+/*
+ * mas_next_slot() - Get the entry in the next slot
+ *
+ * @mas: The maple state
+ * @max: The maximum starting range
+ * @empty: Can be empty
+ * @set_overflow: Should @mas->node be set to overflow when the limit is
+ * reached.
+ *
+ * Return: The entry in the next slot which is possibly NULL
+ */
+static void *mas_next_slot(struct ma_state *mas, unsigned long max, bool empty)
+{
+ void __rcu **slots;
+ unsigned long *pivots;
+ unsigned long pivot;
+ enum maple_type type;
+ struct maple_node *node;
+ unsigned long save_point = mas->last;
+ void *entry;
+
+retry:
+ node = mas_mn(mas);
+ type = mte_node_type(mas->node);
+ pivots = ma_pivots(node, type);
+ if (unlikely(mas_rewalk_if_dead(mas, node, save_point)))
+ goto retry;
+
+ if (mas->max >= max) {
+ if (likely(mas->offset < mas->end))
+ pivot = pivots[mas->offset];
+ else
+ pivot = mas->max;
+
+ if (unlikely(mas_rewalk_if_dead(mas, node, save_point)))
+ goto retry;
+
+ if (pivot >= max) { /* Was at the limit, next will extend beyond */
+ mas->status = ma_overflow;
+ return NULL;
+ }
+ }
+
+ if (likely(mas->offset < mas->end)) {
+ mas->index = pivots[mas->offset] + 1;
+again:
+ mas->offset++;
+ if (likely(mas->offset < mas->end))
+ mas->last = pivots[mas->offset];
+ else
+ mas->last = mas->max;
+ } else {
+ if (mas->last >= max) {
+ mas->status = ma_overflow;
+ return NULL;
+ }
+
+ if (mas_next_node(mas, node, max)) {
+ mas_rewalk(mas, save_point);
+ goto retry;
+ }
+
+ if (WARN_ON_ONCE(mas_is_overflow(mas)))
+ return NULL;
+
+ mas->offset = 0;
+ mas->index = mas->min;
+ node = mas_mn(mas);
+ type = mte_node_type(mas->node);
+ pivots = ma_pivots(node, type);
+ mas->last = pivots[0];
+ }
+
+ slots = ma_slots(node, type);
+ entry = mt_slot(mas->tree, slots, mas->offset);
+ if (unlikely(mas_rewalk_if_dead(mas, node, save_point)))
+ goto retry;
+
+ if (entry)
+ return entry;
+
+
+ if (!empty) {
+ if (mas->last >= max) {
+ mas->status = ma_overflow;
+ return NULL;
+ }
+
+ mas->index = mas->last + 1;
+ goto again;
+ }
+
+ return entry;
+}
+
+/*
+ * mas_next_entry() - Internal function to get the next entry.
+ * @mas: The maple state
+ * @limit: The maximum range start.
+ *
+ * Set the @mas->node to the next entry and the range_start to
+ * the beginning value for the entry. Does not check beyond @limit.
+ * Sets @mas->index and @mas->last to the range, Does not update @mas->index and
+ * @mas->last on overflow.
+ * Restarts on dead nodes.
+ *
+ * Return: the next entry or %NULL.
+ */
+static inline void *mas_next_entry(struct ma_state *mas, unsigned long limit)
+{
+ if (mas->last >= limit) {
+ mas->status = ma_overflow;
+ return NULL;
+ }
+
+ return mas_next_slot(mas, limit, false);
+}
+
+/*
+ * mas_rev_awalk() - Internal function. Reverse allocation walk. Find the
+ * highest gap address of a given size in a given node and descend.
+ * @mas: The maple state
+ * @size: The needed size.
+ *
+ * Return: True if found in a leaf, false otherwise.
+ *
+ */
+static bool mas_rev_awalk(struct ma_state *mas, unsigned long size,
+ unsigned long *gap_min, unsigned long *gap_max)
+{
+ enum maple_type type = mte_node_type(mas->node);
+ struct maple_node *node = mas_mn(mas);
+ unsigned long *pivots, *gaps;
+ void __rcu **slots;
+ unsigned long gap = 0;
+ unsigned long max, min;
+ unsigned char offset;
+
+ if (unlikely(mas_is_err(mas)))
+ return true;
+
+ if (ma_is_dense(type)) {
+ /* dense nodes. */
+ mas->offset = (unsigned char)(mas->index - mas->min);
+ return true;
+ }
+
+ pivots = ma_pivots(node, type);
+ slots = ma_slots(node, type);
+ gaps = ma_gaps(node, type);
+ offset = mas->offset;
+ min = mas_safe_min(mas, pivots, offset);
+ /* Skip out of bounds. */
+ while (mas->last < min)
+ min = mas_safe_min(mas, pivots, --offset);
+
+ max = mas_safe_pivot(mas, pivots, offset, type);
+ while (mas->index <= max) {
+ gap = 0;
+ if (gaps)
+ gap = gaps[offset];
+ else if (!mas_slot(mas, slots, offset))
+ gap = max - min + 1;
+
+ if (gap) {
+ if ((size <= gap) && (size <= mas->last - min + 1))
+ break;
+
+ if (!gaps) {
+ /* Skip the next slot, it cannot be a gap. */
+ if (offset < 2)
+ goto ascend;
+
+ offset -= 2;
+ max = pivots[offset];
+ min = mas_safe_min(mas, pivots, offset);
+ continue;
+ }
+ }
+
+ if (!offset)
+ goto ascend;
+
+ offset--;
+ max = min - 1;
+ min = mas_safe_min(mas, pivots, offset);
+ }
+
+ if (unlikely((mas->index > max) || (size - 1 > max - mas->index)))
+ goto no_space;
+
+ if (unlikely(ma_is_leaf(type))) {
+ mas->offset = offset;
+ *gap_min = min;
+ *gap_max = min + gap - 1;
+ return true;
+ }
+
+ /* descend, only happens under lock. */
+ mas->node = mas_slot(mas, slots, offset);
+ mas->min = min;
+ mas->max = max;
+ mas->offset = mas_data_end(mas);
+ return false;
+
+ascend:
+ if (!mte_is_root(mas->node))
+ return false;
+
+no_space:
+ mas_set_err(mas, -EBUSY);
+ return false;
+}
+
+static inline bool mas_anode_descend(struct ma_state *mas, unsigned long size)
+{
+ enum maple_type type = mte_node_type(mas->node);
+ unsigned long pivot, min, gap = 0;
+ unsigned char offset, data_end;
+ unsigned long *gaps, *pivots;
+ void __rcu **slots;
+ struct maple_node *node;
+ bool found = false;
+
+ if (ma_is_dense(type)) {
+ mas->offset = (unsigned char)(mas->index - mas->min);
+ return true;
+ }
+
+ node = mas_mn(mas);
+ pivots = ma_pivots(node, type);
+ slots = ma_slots(node, type);
+ gaps = ma_gaps(node, type);
+ offset = mas->offset;
+ min = mas_safe_min(mas, pivots, offset);
+ data_end = ma_data_end(node, type, pivots, mas->max);
+ for (; offset <= data_end; offset++) {
+ pivot = mas_safe_pivot(mas, pivots, offset, type);
+
+ /* Not within lower bounds */
+ if (mas->index > pivot)
+ goto next_slot;
+
+ if (gaps)
+ gap = gaps[offset];
+ else if (!mas_slot(mas, slots, offset))
+ gap = min(pivot, mas->last) - max(mas->index, min) + 1;
+ else
+ goto next_slot;
+
+ if (gap >= size) {
+ if (ma_is_leaf(type)) {
+ found = true;
+ goto done;
+ }
+ if (mas->index <= pivot) {
+ mas->node = mas_slot(mas, slots, offset);
+ mas->min = min;
+ mas->max = pivot;
+ offset = 0;
+ break;
+ }
+ }
+next_slot:
+ min = pivot + 1;
+ if (mas->last <= pivot) {
+ mas_set_err(mas, -EBUSY);
+ return true;
+ }
+ }
+
+ if (mte_is_root(mas->node))
+ found = true;
+done:
+ mas->offset = offset;
+ return found;
+}
+
+/**
+ * mas_walk() - Search for @mas->index in the tree.
+ * @mas: The maple state.
+ *
+ * mas->index and mas->last will be set to the range if there is a value. If
+ * mas->status is ma_none, reset to ma_start
+ *
+ * Return: the entry at the location or %NULL.
+ */
+void *mas_walk(struct ma_state *mas)
+{
+ void *entry;
+
+ if (!mas_is_active(mas) || !mas_is_start(mas))
+ mas->status = ma_start;
+retry:
+ entry = mas_state_walk(mas);
+ if (mas_is_start(mas)) {
+ goto retry;
+ } else if (mas_is_none(mas)) {
+ mas->index = 0;
+ mas->last = ULONG_MAX;
+ } else if (mas_is_ptr(mas)) {
+ if (!mas->index) {
+ mas->last = 0;
+ return entry;
+ }
+
+ mas->index = 1;
+ mas->last = ULONG_MAX;
+ mas->status = ma_none;
+ return NULL;
+ }
+
+ return entry;
+}
+EXPORT_SYMBOL_GPL(mas_walk);
+
+static inline bool mas_rewind_node(struct ma_state *mas)
+{
+ unsigned char slot;
+
+ do {
+ if (mte_is_root(mas->node)) {
+ slot = mas->offset;
+ if (!slot)
+ return false;
+ } else {
+ mas_ascend(mas);
+ slot = mas->offset;
+ }
+ } while (!slot);
+
+ mas->offset = --slot;
+ return true;
+}
+
+/*
+ * mas_skip_node() - Internal function. Skip over a node.
+ * @mas: The maple state.
+ *
+ * Return: true if there is another node, false otherwise.
+ */
+static inline bool mas_skip_node(struct ma_state *mas)
+{
+ if (mas_is_err(mas))
+ return false;
+
+ do {
+ if (mte_is_root(mas->node)) {
+ if (mas->offset >= mas_data_end(mas)) {
+ mas_set_err(mas, -EBUSY);
+ return false;
+ }
+ } else {
+ mas_ascend(mas);
+ }
+ } while (mas->offset >= mas_data_end(mas));
+
+ mas->offset++;
+ return true;
+}
+
+/*
+ * mas_awalk() - Allocation walk. Search from low address to high, for a gap of
+ * @size
+ * @mas: The maple state
+ * @size: The size of the gap required
+ *
+ * Search between @mas->index and @mas->last for a gap of @size.
+ */
+static inline void mas_awalk(struct ma_state *mas, unsigned long size)
+{
+ struct maple_enode *last = NULL;
+
+ /*
+ * There are 4 options:
+ * go to child (descend)
+ * go back to parent (ascend)
+ * no gap found. (return, slot == MAPLE_NODE_SLOTS)
+ * found the gap. (return, slot != MAPLE_NODE_SLOTS)
+ */
+ while (!mas_is_err(mas) && !mas_anode_descend(mas, size)) {
+ if (last == mas->node)
+ mas_skip_node(mas);
+ else
+ last = mas->node;
+ }
+}
+
+/*
+ * mas_sparse_area() - Internal function. Return upper or lower limit when
+ * searching for a gap in an empty tree.
+ * @mas: The maple state
+ * @min: the minimum range
+ * @max: The maximum range
+ * @size: The size of the gap
+ * @fwd: Searching forward or back
+ */
+static inline int mas_sparse_area(struct ma_state *mas, unsigned long min,
+ unsigned long max, unsigned long size, bool fwd)
+{
+ if (!unlikely(mas_is_none(mas)) && min == 0) {
+ min++;
+ /*
+ * At this time, min is increased, we need to recheck whether
+ * the size is satisfied.
+ */
+ if (min > max || max - min + 1 < size)
+ return -EBUSY;
+ }
+ /* mas_is_ptr */
+
+ if (fwd) {
+ mas->index = min;
+ mas->last = min + size - 1;
+ } else {
+ mas->last = max;
+ mas->index = max - size + 1;
+ }
+ return 0;
+}
+
+/*
+ * mas_empty_area() - Get the lowest address within the range that is
+ * sufficient for the size requested.
+ * @mas: The maple state
+ * @min: The lowest value of the range
+ * @max: The highest value of the range
+ * @size: The size needed
+ */
+int mas_empty_area(struct ma_state *mas, unsigned long min,
+ unsigned long max, unsigned long size)
+{
+ unsigned char offset;
+ unsigned long *pivots;
+ enum maple_type mt;
+ struct maple_node *node;
+
+ if (min > max)
+ return -EINVAL;
+
+ if (size == 0 || max - min < size - 1)
+ return -EINVAL;
+
+ if (mas_is_start(mas))
+ mas_start(mas);
+ else if (mas->offset >= 2)
+ mas->offset -= 2;
+ else if (!mas_skip_node(mas))
+ return -EBUSY;
+
+ /* Empty set */
+ if (mas_is_none(mas) || mas_is_ptr(mas))
+ return mas_sparse_area(mas, min, max, size, true);
+
+ /* The start of the window can only be within these values */
+ mas->index = min;
+ mas->last = max;
+ mas_awalk(mas, size);
+
+ if (unlikely(mas_is_err(mas)))
+ return xa_err(mas->node);
+
+ offset = mas->offset;
+ if (unlikely(offset == MAPLE_NODE_SLOTS))
+ return -EBUSY;
+
+ node = mas_mn(mas);
+ mt = mte_node_type(mas->node);
+ pivots = ma_pivots(node, mt);
+ min = mas_safe_min(mas, pivots, offset);
+ if (mas->index < min)
+ mas->index = min;
+ mas->last = mas->index + size - 1;
+ mas->end = ma_data_end(node, mt, pivots, mas->max);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mas_empty_area);
+
+/*
+ * mas_empty_area_rev() - Get the highest address within the range that is
+ * sufficient for the size requested.
+ * @mas: The maple state
+ * @min: The lowest value of the range
+ * @max: The highest value of the range
+ * @size: The size needed
+ */
+int mas_empty_area_rev(struct ma_state *mas, unsigned long min,
+ unsigned long max, unsigned long size)
+{
+ struct maple_enode *last = mas->node;
+
+ if (min > max)
+ return -EINVAL;
+
+ if (size == 0 || max - min < size - 1)
+ return -EINVAL;
+
+ if (mas_is_start(mas)) {
+ mas_start(mas);
+ mas->offset = mas_data_end(mas);
+ } else if (mas->offset >= 2) {
+ mas->offset -= 2;
+ } else if (!mas_rewind_node(mas)) {
+ return -EBUSY;
+ }
+
+ /* Empty set. */
+ if (mas_is_none(mas) || mas_is_ptr(mas))
+ return mas_sparse_area(mas, min, max, size, false);
+
+ /* The start of the window can only be within these values. */
+ mas->index = min;
+ mas->last = max;
+
+ while (!mas_rev_awalk(mas, size, &min, &max)) {
+ if (last == mas->node) {
+ if (!mas_rewind_node(mas))
+ return -EBUSY;
+ } else {
+ last = mas->node;
+ }
+ }
+
+ if (mas_is_err(mas))
+ return xa_err(mas->node);
+
+ if (unlikely(mas->offset == MAPLE_NODE_SLOTS))
+ return -EBUSY;
+
+ /* Trim the upper limit to the max. */
+ if (max < mas->last)
+ mas->last = max;
+
+ mas->index = mas->last - size + 1;
+ mas->end = mas_data_end(mas);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mas_empty_area_rev);
+
+/*
+ * mte_dead_leaves() - Mark all leaves of a node as dead.
+ * @mas: The maple state
+ * @slots: Pointer to the slot array
+ * @type: The maple node type
+ *
+ * Must hold the write lock.
+ *
+ * Return: The number of leaves marked as dead.
+ */
+static inline
+unsigned char mte_dead_leaves(struct maple_enode *enode, struct maple_tree *mt,
+ void __rcu **slots)
+{
+ struct maple_node *node;
+ enum maple_type type;
+ void *entry;
+ int offset;
+
+ for (offset = 0; offset < mt_slot_count(enode); offset++) {
+ entry = mt_slot(mt, slots, offset);
+ type = mte_node_type(entry);
+ node = mte_to_node(entry);
+ /* Use both node and type to catch LE & BE metadata */
+ if (!node || !type)
+ break;
+
+ mte_set_node_dead(entry);
+ node->type = type;
+ rcu_assign_pointer(slots[offset], node);
+ }
+
+ return offset;
+}
+
+/**
+ * mte_dead_walk() - Walk down a dead tree to just before the leaves
+ * @enode: The maple encoded node
+ * @offset: The starting offset
+ *
+ * Note: This can only be used from the RCU callback context.
+ */
+static void __rcu **mte_dead_walk(struct maple_enode **enode, unsigned char offset)
+{
+ struct maple_node *node, *next;
+ void __rcu **slots = NULL;
+
+ next = mte_to_node(*enode);
+ do {
+ *enode = ma_enode_ptr(next);
+ node = mte_to_node(*enode);
+ slots = ma_slots(node, node->type);
+ next = rcu_dereference_protected(slots[offset],
+ lock_is_held(&rcu_callback_map));
+ offset = 0;
+ } while (!ma_is_leaf(next->type));
+
+ return slots;
+}
+
+/**
+ * mt_free_walk() - Walk & free a tree in the RCU callback context
+ * @head: The RCU head that's within the node.
+ *
+ * Note: This can only be used from the RCU callback context.
+ */
+static void mt_free_walk(struct rcu_head *head)
+{
+ void __rcu **slots;
+ struct maple_node *node, *start;
+ struct maple_enode *enode;
+ unsigned char offset;
+ enum maple_type type;
+
+ node = container_of(head, struct maple_node, rcu);
+
+ if (ma_is_leaf(node->type))
+ goto free_leaf;
+
+ start = node;
+ enode = mt_mk_node(node, node->type);
+ slots = mte_dead_walk(&enode, 0);
+ node = mte_to_node(enode);
+ do {
+ mt_free_bulk(node->slot_len, slots);
+ offset = node->parent_slot + 1;
+ enode = node->piv_parent;
+ if (mte_to_node(enode) == node)
+ goto free_leaf;
+
+ type = mte_node_type(enode);
+ slots = ma_slots(mte_to_node(enode), type);
+ if ((offset < mt_slots[type]) &&
+ rcu_dereference_protected(slots[offset],
+ lock_is_held(&rcu_callback_map)))
+ slots = mte_dead_walk(&enode, offset);
+ node = mte_to_node(enode);
+ } while ((node != start) || (node->slot_len < offset));
+
+ slots = ma_slots(node, node->type);
+ mt_free_bulk(node->slot_len, slots);
+
+free_leaf:
+ mt_free_rcu(&node->rcu);
+}
+
+static inline void __rcu **mte_destroy_descend(struct maple_enode **enode,
+ struct maple_tree *mt, struct maple_enode *prev, unsigned char offset)
+{
+ struct maple_node *node;
+ struct maple_enode *next = *enode;
+ void __rcu **slots = NULL;
+ enum maple_type type;
+ unsigned char next_offset = 0;
+
+ do {
+ *enode = next;
+ node = mte_to_node(*enode);
+ type = mte_node_type(*enode);
+ slots = ma_slots(node, type);
+ next = mt_slot_locked(mt, slots, next_offset);
+ if ((mte_dead_node(next)))
+ next = mt_slot_locked(mt, slots, ++next_offset);
+
+ mte_set_node_dead(*enode);
+ node->type = type;
+ node->piv_parent = prev;
+ node->parent_slot = offset;
+ offset = next_offset;
+ next_offset = 0;
+ prev = *enode;
+ } while (!mte_is_leaf(next));
+
+ return slots;
+}
+
+static void mt_destroy_walk(struct maple_enode *enode, struct maple_tree *mt,
+ bool free)
+{
+ void __rcu **slots;
+ struct maple_node *node = mte_to_node(enode);
+ struct maple_enode *start;
+
+ if (mte_is_leaf(enode)) {
+ node->type = mte_node_type(enode);
+ goto free_leaf;
+ }
+
+ start = enode;
+ slots = mte_destroy_descend(&enode, mt, start, 0);
+ node = mte_to_node(enode); // Updated in the above call.
+ do {
+ enum maple_type type;
+ unsigned char offset;
+ struct maple_enode *parent, *tmp;
+
+ node->slot_len = mte_dead_leaves(enode, mt, slots);
+ if (free)
+ mt_free_bulk(node->slot_len, slots);
+ offset = node->parent_slot + 1;
+ enode = node->piv_parent;
+ if (mte_to_node(enode) == node)
+ goto free_leaf;
+
+ type = mte_node_type(enode);
+ slots = ma_slots(mte_to_node(enode), type);
+ if (offset >= mt_slots[type])
+ goto next;
+
+ tmp = mt_slot_locked(mt, slots, offset);
+ if (mte_node_type(tmp) && mte_to_node(tmp)) {
+ parent = enode;
+ enode = tmp;
+ slots = mte_destroy_descend(&enode, mt, parent, offset);
+ }
+next:
+ node = mte_to_node(enode);
+ } while (start != enode);
+
+ node = mte_to_node(enode);
+ node->slot_len = mte_dead_leaves(enode, mt, slots);
+ if (free)
+ mt_free_bulk(node->slot_len, slots);
+
+free_leaf:
+ if (free)
+ mt_free_rcu(&node->rcu);
+ else
+ mt_clear_meta(mt, node, node->type);
+}
+
+/*
+ * mte_destroy_walk() - Free a tree or sub-tree.
+ * @enode: the encoded maple node (maple_enode) to start
+ * @mt: the tree to free - needed for node types.
+ *
+ * Must hold the write lock.
+ */
+static inline void mte_destroy_walk(struct maple_enode *enode,
+ struct maple_tree *mt)
+{
+ struct maple_node *node = mte_to_node(enode);
+
+ if (mt_in_rcu(mt)) {
+ mt_destroy_walk(enode, mt, false);
+ call_rcu(&node->rcu, mt_free_walk);
+ } else {
+ mt_destroy_walk(enode, mt, true);
+ }
+}
+
+static void mas_wr_store_setup(struct ma_wr_state *wr_mas)
+{
+ if (!mas_is_active(wr_mas->mas)) {
+ if (mas_is_start(wr_mas->mas))
+ return;
+
+ if (unlikely(mas_is_paused(wr_mas->mas)))
+ goto reset;
+
+ if (unlikely(mas_is_none(wr_mas->mas)))
+ goto reset;
+
+ if (unlikely(mas_is_overflow(wr_mas->mas)))
+ goto reset;
+
+ if (unlikely(mas_is_underflow(wr_mas->mas)))
+ goto reset;
+ }
+
+ /*
+ * A less strict version of mas_is_span_wr() where we allow spanning
+ * writes within this node. This is to stop partial walks in
+ * mas_prealloc() from being reset.
+ */
+ if (wr_mas->mas->last > wr_mas->mas->max)
+ goto reset;
+
+ if (wr_mas->entry)
+ return;
+
+ if (mte_is_leaf(wr_mas->mas->node) &&
+ wr_mas->mas->last == wr_mas->mas->max)
+ goto reset;
+
+ return;
+
+reset:
+ mas_reset(wr_mas->mas);
+}
+
+/* Interface */
+
+/**
+ * mas_store() - Store an @entry.
+ * @mas: The maple state.
+ * @entry: The entry to store.
+ *
+ * The @mas->index and @mas->last is used to set the range for the @entry.
+ * Note: The @mas should have pre-allocated entries to ensure there is memory to
+ * store the entry. Please see mas_expected_entries()/mas_destroy() for more details.
+ *
+ * Return: the first entry between mas->index and mas->last or %NULL.
+ */
+void *mas_store(struct ma_state *mas, void *entry)
+{
+ MA_WR_STATE(wr_mas, mas, entry);
+
+ trace_ma_write(__func__, mas, 0, entry);
+#ifdef CONFIG_DEBUG_MAPLE_TREE
+ if (MAS_WARN_ON(mas, mas->index > mas->last))
+ pr_err("Error %lX > %lX %p\n", mas->index, mas->last, entry);
+
+ if (mas->index > mas->last) {
+ mas_set_err(mas, -EINVAL);
+ return NULL;
+ }
+
+#endif
+
+ /*
+ * Storing is the same operation as insert with the added caveat that it
+ * can overwrite entries. Although this seems simple enough, one may
+ * want to examine what happens if a single store operation was to
+ * overwrite multiple entries within a self-balancing B-Tree.
+ */
+ mas_wr_store_setup(&wr_mas);
+ mas_wr_store_entry(&wr_mas);
+ return wr_mas.content;
+}
+EXPORT_SYMBOL_GPL(mas_store);
+
+/**
+ * mas_store_gfp() - Store a value into the tree.
+ * @mas: The maple state
+ * @entry: The entry to store
+ * @gfp: The GFP_FLAGS to use for allocations if necessary.
+ *
+ * Return: 0 on success, -EINVAL on invalid request, -ENOMEM if memory could not
+ * be allocated.
+ */
+int mas_store_gfp(struct ma_state *mas, void *entry, gfp_t gfp)
+{
+ MA_WR_STATE(wr_mas, mas, entry);
+
+ mas_wr_store_setup(&wr_mas);
+ trace_ma_write(__func__, mas, 0, entry);
+retry:
+ mas_wr_store_entry(&wr_mas);
+ if (unlikely(mas_nomem(mas, gfp)))
+ goto retry;
+
+ if (unlikely(mas_is_err(mas)))
+ return xa_err(mas->node);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mas_store_gfp);
+
+/**
+ * mas_store_prealloc() - Store a value into the tree using memory
+ * preallocated in the maple state.
+ * @mas: The maple state
+ * @entry: The entry to store.
+ */
+void mas_store_prealloc(struct ma_state *mas, void *entry)
+{
+ MA_WR_STATE(wr_mas, mas, entry);
+
+ mas_wr_store_setup(&wr_mas);
+ trace_ma_write(__func__, mas, 0, entry);
+ mas_wr_store_entry(&wr_mas);
+ MAS_WR_BUG_ON(&wr_mas, mas_is_err(mas));
+ mas_destroy(mas);
+}
+EXPORT_SYMBOL_GPL(mas_store_prealloc);
+
+/**
+ * mas_preallocate() - Preallocate enough nodes for a store operation
+ * @mas: The maple state
+ * @entry: The entry that will be stored
+ * @gfp: The GFP_FLAGS to use for allocations.
+ *
+ * Return: 0 on success, -ENOMEM if memory could not be allocated.
+ */
+int mas_preallocate(struct ma_state *mas, void *entry, gfp_t gfp)
+{
+ MA_WR_STATE(wr_mas, mas, entry);
+ unsigned char node_size;
+ int request = 1;
+ int ret;
+
+
+ if (unlikely(!mas->index && mas->last == ULONG_MAX))
+ goto ask_now;
+
+ mas_wr_store_setup(&wr_mas);
+ wr_mas.content = mas_start(mas);
+ /* Root expand */
+ if (unlikely(mas_is_none(mas) || mas_is_ptr(mas)))
+ goto ask_now;
+
+ if (unlikely(!mas_wr_walk(&wr_mas))) {
+ /* Spanning store, use worst case for now */
+ request = 1 + mas_mt_height(mas) * 3;
+ goto ask_now;
+ }
+
+ /* At this point, we are at the leaf node that needs to be altered. */
+ /* Exact fit, no nodes needed. */
+ if (wr_mas.r_min == mas->index && wr_mas.r_max == mas->last)
+ return 0;
+
+ mas_wr_end_piv(&wr_mas);
+ node_size = mas_wr_new_end(&wr_mas);
+
+ /* Slot store, does not require additional nodes */
+ if (node_size == mas->end) {
+ /* reuse node */
+ if (!mt_in_rcu(mas->tree))
+ return 0;
+ /* shifting boundary */
+ if (wr_mas.offset_end - mas->offset == 1)
+ return 0;
+ }
+
+ if (node_size >= mt_slots[wr_mas.type]) {
+ /* Split, worst case for now. */
+ request = 1 + mas_mt_height(mas) * 2;
+ goto ask_now;
+ }
+
+ /* New root needs a single node */
+ if (unlikely(mte_is_root(mas->node)))
+ goto ask_now;
+
+ /* Potential spanning rebalance collapsing a node, use worst-case */
+ if (node_size - 1 <= mt_min_slots[wr_mas.type])
+ request = mas_mt_height(mas) * 2 - 1;
+
+ /* node store, slot store needs one node */
+ask_now:
+ mas_node_count_gfp(mas, request, gfp);
+ mas->mas_flags |= MA_STATE_PREALLOC;
+ if (likely(!mas_is_err(mas)))
+ return 0;
+
+ mas_set_alloc_req(mas, 0);
+ ret = xa_err(mas->node);
+ mas_reset(mas);
+ mas_destroy(mas);
+ mas_reset(mas);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mas_preallocate);
+
+/*
+ * mas_destroy() - destroy a maple state.
+ * @mas: The maple state
+ *
+ * Upon completion, check the left-most node and rebalance against the node to
+ * the right if necessary. Frees any allocated nodes associated with this maple
+ * state.
+ */
+void mas_destroy(struct ma_state *mas)
+{
+ struct maple_alloc *node;
+ unsigned long total;
+
+ /*
+ * When using mas_for_each() to insert an expected number of elements,
+ * it is possible that the number inserted is less than the expected
+ * number. To fix an invalid final node, a check is performed here to
+ * rebalance the previous node with the final node.
+ */
+ if (mas->mas_flags & MA_STATE_REBALANCE) {
+ unsigned char end;
+
+ mas_start(mas);
+ mtree_range_walk(mas);
+ end = mas->end + 1;
+ if (end < mt_min_slot_count(mas->node) - 1)
+ mas_destroy_rebalance(mas, end);
+
+ mas->mas_flags &= ~MA_STATE_REBALANCE;
+ }
+ mas->mas_flags &= ~(MA_STATE_BULK|MA_STATE_PREALLOC);
+
+ total = mas_allocated(mas);
+ while (total) {
+ node = mas->alloc;
+ mas->alloc = node->slot[0];
+ if (node->node_count > 1) {
+ size_t count = node->node_count - 1;
+
+ mt_free_bulk(count, (void __rcu **)&node->slot[1]);
+ total -= count;
+ }
+ mt_free_one(ma_mnode_ptr(node));
+ total--;
+ }
+
+ mas->alloc = NULL;
+}
+EXPORT_SYMBOL_GPL(mas_destroy);
+
+/*
+ * mas_expected_entries() - Set the expected number of entries that will be inserted.
+ * @mas: The maple state
+ * @nr_entries: The number of expected entries.
+ *
+ * This will attempt to pre-allocate enough nodes to store the expected number
+ * of entries. The allocations will occur using the bulk allocator interface
+ * for speed. Please call mas_destroy() on the @mas after inserting the entries
+ * to ensure any unused nodes are freed.
+ *
+ * Return: 0 on success, -ENOMEM if memory could not be allocated.
+ */
+int mas_expected_entries(struct ma_state *mas, unsigned long nr_entries)
+{
+ int nonleaf_cap = MAPLE_ARANGE64_SLOTS - 2;
+ struct maple_enode *enode = mas->node;
+ int nr_nodes;
+ int ret;
+
+ /*
+ * Sometimes it is necessary to duplicate a tree to a new tree, such as
+ * forking a process and duplicating the VMAs from one tree to a new
+ * tree. When such a situation arises, it is known that the new tree is
+ * not going to be used until the entire tree is populated. For
+ * performance reasons, it is best to use a bulk load with RCU disabled.
+ * This allows for optimistic splitting that favours the left and reuse
+ * of nodes during the operation.
+ */
+
+ /* Optimize splitting for bulk insert in-order */
+ mas->mas_flags |= MA_STATE_BULK;
+
+ /*
+ * Avoid overflow, assume a gap between each entry and a trailing null.
+ * If this is wrong, it just means allocation can happen during
+ * insertion of entries.
+ */
+ nr_nodes = max(nr_entries, nr_entries * 2 + 1);
+ if (!mt_is_alloc(mas->tree))
+ nonleaf_cap = MAPLE_RANGE64_SLOTS - 2;
+
+ /* Leaves; reduce slots to keep space for expansion */
+ nr_nodes = DIV_ROUND_UP(nr_nodes, MAPLE_RANGE64_SLOTS - 2);
+ /* Internal nodes */
+ nr_nodes += DIV_ROUND_UP(nr_nodes, nonleaf_cap);
+ /* Add working room for split (2 nodes) + new parents */
+ mas_node_count_gfp(mas, nr_nodes + 3, GFP_KERNEL);
+
+ /* Detect if allocations run out */
+ mas->mas_flags |= MA_STATE_PREALLOC;
+
+ if (!mas_is_err(mas))
+ return 0;
+
+ ret = xa_err(mas->node);
+ mas->node = enode;
+ mas_destroy(mas);
+ return ret;
+
+}
+EXPORT_SYMBOL_GPL(mas_expected_entries);
+
+static bool mas_next_setup(struct ma_state *mas, unsigned long max,
+ void **entry)
+{
+ bool was_none = mas_is_none(mas);
+
+ if (unlikely(mas->last >= max)) {
+ mas->status = ma_overflow;
+ return true;
+ }
+
+ switch (mas->status) {
+ case ma_active:
+ return false;
+ case ma_none:
+ fallthrough;
+ case ma_pause:
+ mas->status = ma_start;
+ fallthrough;
+ case ma_start:
+ mas_walk(mas); /* Retries on dead nodes handled by mas_walk */
+ break;
+ case ma_overflow:
+ /* Overflowed before, but the max changed */
+ mas->status = ma_active;
+ break;
+ case ma_underflow:
+ /* The user expects the mas to be one before where it is */
+ mas->status = ma_active;
+ *entry = mas_walk(mas);
+ if (*entry)
+ return true;
+ break;
+ case ma_root:
+ break;
+ case ma_error:
+ return true;
+ }
+
+ if (likely(mas_is_active(mas))) /* Fast path */
+ return false;
+
+ if (mas_is_ptr(mas)) {
+ *entry = NULL;
+ if (was_none && mas->index == 0) {
+ mas->index = mas->last = 0;
+ return true;
+ }
+ mas->index = 1;
+ mas->last = ULONG_MAX;
+ mas->status = ma_none;
+ return true;
+ }
+
+ if (mas_is_none(mas))
+ return true;
+
+ return false;
+}
+
+/**
+ * mas_next() - Get the next entry.
+ * @mas: The maple state
+ * @max: The maximum index to check.
+ *
+ * Returns the next entry after @mas->index.
+ * Must hold rcu_read_lock or the write lock.
+ * Can return the zero entry.
+ *
+ * Return: The next entry or %NULL
+ */
+void *mas_next(struct ma_state *mas, unsigned long max)
+{
+ void *entry = NULL;
+
+ if (mas_next_setup(mas, max, &entry))
+ return entry;
+
+ /* Retries on dead nodes handled by mas_next_slot */
+ return mas_next_slot(mas, max, false);
+}
+EXPORT_SYMBOL_GPL(mas_next);
+
+/**
+ * mas_next_range() - Advance the maple state to the next range
+ * @mas: The maple state
+ * @max: The maximum index to check.
+ *
+ * Sets @mas->index and @mas->last to the range.
+ * Must hold rcu_read_lock or the write lock.
+ * Can return the zero entry.
+ *
+ * Return: The next entry or %NULL
+ */
+void *mas_next_range(struct ma_state *mas, unsigned long max)
+{
+ void *entry = NULL;
+
+ if (mas_next_setup(mas, max, &entry))
+ return entry;
+
+ /* Retries on dead nodes handled by mas_next_slot */
+ return mas_next_slot(mas, max, true);
+}
+EXPORT_SYMBOL_GPL(mas_next_range);
+
+/**
+ * mt_next() - get the next value in the maple tree
+ * @mt: The maple tree
+ * @index: The start index
+ * @max: The maximum index to check
+ *
+ * Takes RCU read lock internally to protect the search, which does not
+ * protect the returned pointer after dropping RCU read lock.
+ * See also: Documentation/core-api/maple_tree.rst
+ *
+ * Return: The entry higher than @index or %NULL if nothing is found.
+ */
+void *mt_next(struct maple_tree *mt, unsigned long index, unsigned long max)
+{
+ void *entry = NULL;
+ MA_STATE(mas, mt, index, index);
+
+ rcu_read_lock();
+ entry = mas_next(&mas, max);
+ rcu_read_unlock();
+ return entry;
+}
+EXPORT_SYMBOL_GPL(mt_next);
+
+static bool mas_prev_setup(struct ma_state *mas, unsigned long min, void **entry)
+{
+ if (unlikely(mas->index <= min)) {
+ mas->status = ma_underflow;
+ return true;
+ }
+
+ switch (mas->status) {
+ case ma_active:
+ return false;
+ case ma_start:
+ break;
+ case ma_none:
+ fallthrough;
+ case ma_pause:
+ mas->status = ma_start;
+ break;
+ case ma_underflow:
+ /* underflowed before but the min changed */
+ mas->status = ma_active;
+ break;
+ case ma_overflow:
+ /* User expects mas to be one after where it is */
+ mas->status = ma_active;
+ *entry = mas_walk(mas);
+ if (*entry)
+ return true;
+ break;
+ case ma_root:
+ break;
+ case ma_error:
+ return true;
+ }
+
+ if (mas_is_start(mas))
+ mas_walk(mas);
+
+ if (unlikely(mas_is_ptr(mas))) {
+ if (!mas->index) {
+ mas->status = ma_none;
+ return true;
+ }
+ mas->index = mas->last = 0;
+ *entry = mas_root(mas);
+ return true;
+ }
+
+ if (mas_is_none(mas)) {
+ if (mas->index) {
+ /* Walked to out-of-range pointer? */
+ mas->index = mas->last = 0;
+ mas->status = ma_root;
+ *entry = mas_root(mas);
+ return true;
+ }
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * mas_prev() - Get the previous entry
+ * @mas: The maple state
+ * @min: The minimum value to check.
+ *
+ * Must hold rcu_read_lock or the write lock.
+ * Will reset mas to ma_start if the status is ma_none. Will stop on not
+ * searchable nodes.
+ *
+ * Return: the previous value or %NULL.
+ */
+void *mas_prev(struct ma_state *mas, unsigned long min)
+{
+ void *entry = NULL;
+
+ if (mas_prev_setup(mas, min, &entry))
+ return entry;
+
+ return mas_prev_slot(mas, min, false);
+}
+EXPORT_SYMBOL_GPL(mas_prev);
+
+/**
+ * mas_prev_range() - Advance to the previous range
+ * @mas: The maple state
+ * @min: The minimum value to check.
+ *
+ * Sets @mas->index and @mas->last to the range.
+ * Must hold rcu_read_lock or the write lock.
+ * Will reset mas to ma_start if the node is ma_none. Will stop on not
+ * searchable nodes.
+ *
+ * Return: the previous value or %NULL.
+ */
+void *mas_prev_range(struct ma_state *mas, unsigned long min)
+{
+ void *entry = NULL;
+
+ if (mas_prev_setup(mas, min, &entry))
+ return entry;
+
+ return mas_prev_slot(mas, min, true);
+}
+EXPORT_SYMBOL_GPL(mas_prev_range);
+
+/**
+ * mt_prev() - get the previous value in the maple tree
+ * @mt: The maple tree
+ * @index: The start index
+ * @min: The minimum index to check
+ *
+ * Takes RCU read lock internally to protect the search, which does not
+ * protect the returned pointer after dropping RCU read lock.
+ * See also: Documentation/core-api/maple_tree.rst
+ *
+ * Return: The entry before @index or %NULL if nothing is found.
+ */
+void *mt_prev(struct maple_tree *mt, unsigned long index, unsigned long min)
+{
+ void *entry = NULL;
+ MA_STATE(mas, mt, index, index);
+
+ rcu_read_lock();
+ entry = mas_prev(&mas, min);
+ rcu_read_unlock();
+ return entry;
+}
+EXPORT_SYMBOL_GPL(mt_prev);
+
+/**
+ * mas_pause() - Pause a mas_find/mas_for_each to drop the lock.
+ * @mas: The maple state to pause
+ *
+ * Some users need to pause a walk and drop the lock they're holding in
+ * order to yield to a higher priority thread or carry out an operation
+ * on an entry. Those users should call this function before they drop
+ * the lock. It resets the @mas to be suitable for the next iteration
+ * of the loop after the user has reacquired the lock. If most entries
+ * found during a walk require you to call mas_pause(), the mt_for_each()
+ * iterator may be more appropriate.
+ *
+ */
+void mas_pause(struct ma_state *mas)
+{
+ mas->status = ma_pause;
+ mas->node = NULL;
+}
+EXPORT_SYMBOL_GPL(mas_pause);
+
+/**
+ * mas_find_setup() - Internal function to set up mas_find*().
+ * @mas: The maple state
+ * @max: The maximum index
+ * @entry: Pointer to the entry
+ *
+ * Returns: True if entry is the answer, false otherwise.
+ */
+static __always_inline bool mas_find_setup(struct ma_state *mas, unsigned long max, void **entry)
+{
+ switch (mas->status) {
+ case ma_active:
+ if (mas->last < max)
+ return false;
+ return true;
+ case ma_start:
+ break;
+ case ma_pause:
+ if (unlikely(mas->last >= max))
+ return true;
+
+ mas->index = ++mas->last;
+ mas->status = ma_start;
+ break;
+ case ma_none:
+ if (unlikely(mas->last >= max))
+ return true;
+
+ mas->index = mas->last;
+ mas->status = ma_start;
+ break;
+ case ma_underflow:
+ /* mas is pointing at entry before unable to go lower */
+ if (unlikely(mas->index >= max)) {
+ mas->status = ma_overflow;
+ return true;
+ }
+
+ mas->status = ma_active;
+ *entry = mas_walk(mas);
+ if (*entry)
+ return true;
+ break;
+ case ma_overflow:
+ if (unlikely(mas->last >= max))
+ return true;
+
+ mas->status = ma_active;
+ *entry = mas_walk(mas);
+ if (*entry)
+ return true;
+ break;
+ case ma_root:
+ break;
+ case ma_error:
+ return true;
+ }
+
+ if (mas_is_start(mas)) {
+ /* First run or continue */
+ if (mas->index > max)
+ return true;
+
+ *entry = mas_walk(mas);
+ if (*entry)
+ return true;
+
+ }
+
+ if (unlikely(mas_is_ptr(mas)))
+ goto ptr_out_of_range;
+
+ if (unlikely(mas_is_none(mas)))
+ return true;
+
+ if (mas->index == max)
+ return true;
+
+ return false;
+
+ptr_out_of_range:
+ mas->status = ma_none;
+ mas->index = 1;
+ mas->last = ULONG_MAX;
+ return true;
+}
+
+/**
+ * mas_find() - On the first call, find the entry at or after mas->index up to
+ * %max. Otherwise, find the entry after mas->index.
+ * @mas: The maple state
+ * @max: The maximum value to check.
+ *
+ * Must hold rcu_read_lock or the write lock.
+ * If an entry exists, last and index are updated accordingly.
+ * May set @mas->status to ma_overflow.
+ *
+ * Return: The entry or %NULL.
+ */
+void *mas_find(struct ma_state *mas, unsigned long max)
+{
+ void *entry = NULL;
+
+ if (mas_find_setup(mas, max, &entry))
+ return entry;
+
+ /* Retries on dead nodes handled by mas_next_slot */
+ entry = mas_next_slot(mas, max, false);
+ /* Ignore overflow */
+ mas->status = ma_active;
+ return entry;
+}
+EXPORT_SYMBOL_GPL(mas_find);
+
+/**
+ * mas_find_range() - On the first call, find the entry at or after
+ * mas->index up to %max. Otherwise, advance to the next slot mas->index.
+ * @mas: The maple state
+ * @max: The maximum value to check.
+ *
+ * Must hold rcu_read_lock or the write lock.
+ * If an entry exists, last and index are updated accordingly.
+ * May set @mas->status to ma_overflow.
+ *
+ * Return: The entry or %NULL.
+ */
+void *mas_find_range(struct ma_state *mas, unsigned long max)
+{
+ void *entry = NULL;
+
+ if (mas_find_setup(mas, max, &entry))
+ return entry;
+
+ /* Retries on dead nodes handled by mas_next_slot */
+ return mas_next_slot(mas, max, true);
+}
+EXPORT_SYMBOL_GPL(mas_find_range);
+
+/**
+ * mas_find_rev_setup() - Internal function to set up mas_find_*_rev()
+ * @mas: The maple state
+ * @min: The minimum index
+ * @entry: Pointer to the entry
+ *
+ * Returns: True if entry is the answer, false otherwise.
+ */
+static bool mas_find_rev_setup(struct ma_state *mas, unsigned long min,
+ void **entry)
+{
+
+ switch (mas->status) {
+ case ma_active:
+ goto active;
+ case ma_start:
+ break;
+ case ma_pause:
+ if (unlikely(mas->index <= min)) {
+ mas->status = ma_underflow;
+ return true;
+ }
+ mas->last = --mas->index;
+ mas->status = ma_start;
+ break;
+ case ma_none:
+ if (mas->index <= min)
+ goto none;
+
+ mas->last = mas->index;
+ mas->status = ma_start;
+ break;
+ case ma_overflow: /* user expects the mas to be one after where it is */
+ if (unlikely(mas->index <= min)) {
+ mas->status = ma_underflow;
+ return true;
+ }
+
+ mas->status = ma_active;
+ break;
+ case ma_underflow: /* user expects the mas to be one before where it is */
+ if (unlikely(mas->index <= min))
+ return true;
+
+ mas->status = ma_active;
+ break;
+ case ma_root:
+ break;
+ case ma_error:
+ return true;
+ }
+
+ if (mas_is_start(mas)) {
+ /* First run or continue */
+ if (mas->index < min)
+ return true;
+
+ *entry = mas_walk(mas);
+ if (*entry)
+ return true;
+ }
+
+ if (unlikely(mas_is_ptr(mas)))
+ goto none;
+
+ if (unlikely(mas_is_none(mas))) {
+ /*
+ * Walked to the location, and there was nothing so the previous
+ * location is 0.
+ */
+ mas->last = mas->index = 0;
+ mas->status = ma_root;
+ *entry = mas_root(mas);
+ return true;
+ }
+
+active:
+ if (mas->index < min)
+ return true;
+
+ return false;
+
+none:
+ mas->status = ma_none;
+ return true;
+}
+
+/**
+ * mas_find_rev: On the first call, find the first non-null entry at or below
+ * mas->index down to %min. Otherwise find the first non-null entry below
+ * mas->index down to %min.
+ * @mas: The maple state
+ * @min: The minimum value to check.
+ *
+ * Must hold rcu_read_lock or the write lock.
+ * If an entry exists, last and index are updated accordingly.
+ * May set @mas->status to ma_underflow.
+ *
+ * Return: The entry or %NULL.
+ */
+void *mas_find_rev(struct ma_state *mas, unsigned long min)
+{
+ void *entry = NULL;
+
+ if (mas_find_rev_setup(mas, min, &entry))
+ return entry;
+
+ /* Retries on dead nodes handled by mas_prev_slot */
+ return mas_prev_slot(mas, min, false);
+
+}
+EXPORT_SYMBOL_GPL(mas_find_rev);
+
+/**
+ * mas_find_range_rev: On the first call, find the first non-null entry at or
+ * below mas->index down to %min. Otherwise advance to the previous slot after
+ * mas->index down to %min.
+ * @mas: The maple state
+ * @min: The minimum value to check.
+ *
+ * Must hold rcu_read_lock or the write lock.
+ * If an entry exists, last and index are updated accordingly.
+ * May set @mas->status to ma_underflow.
+ *
+ * Return: The entry or %NULL.
+ */
+void *mas_find_range_rev(struct ma_state *mas, unsigned long min)
+{
+ void *entry = NULL;
+
+ if (mas_find_rev_setup(mas, min, &entry))
+ return entry;
+
+ /* Retries on dead nodes handled by mas_prev_slot */
+ return mas_prev_slot(mas, min, true);
+}
+EXPORT_SYMBOL_GPL(mas_find_range_rev);
+
+/**
+ * mas_erase() - Find the range in which index resides and erase the entire
+ * range.
+ * @mas: The maple state
+ *
+ * Must hold the write lock.
+ * Searches for @mas->index, sets @mas->index and @mas->last to the range and
+ * erases that range.
+ *
+ * Return: the entry that was erased or %NULL, @mas->index and @mas->last are updated.
+ */
+void *mas_erase(struct ma_state *mas)
+{
+ void *entry;
+ MA_WR_STATE(wr_mas, mas, NULL);
+
+ if (!mas_is_active(mas) || !mas_is_start(mas))
+ mas->status = ma_start;
+
+ /* Retry unnecessary when holding the write lock. */
+ entry = mas_state_walk(mas);
+ if (!entry)
+ return NULL;
+
+write_retry:
+ /* Must reset to ensure spanning writes of last slot are detected */
+ mas_reset(mas);
+ mas_wr_store_setup(&wr_mas);
+ mas_wr_store_entry(&wr_mas);
+ if (mas_nomem(mas, GFP_KERNEL))
+ goto write_retry;
+
+ return entry;
+}
+EXPORT_SYMBOL_GPL(mas_erase);
+
+/**
+ * mas_nomem() - Check if there was an error allocating and do the allocation
+ * if necessary If there are allocations, then free them.
+ * @mas: The maple state
+ * @gfp: The GFP_FLAGS to use for allocations
+ * Return: true on allocation, false otherwise.
+ */
+bool mas_nomem(struct ma_state *mas, gfp_t gfp)
+ __must_hold(mas->tree->ma_lock)
+{
+ if (likely(mas->node != MA_ERROR(-ENOMEM))) {
+ mas_destroy(mas);
+ return false;
+ }
+
+ if (gfpflags_allow_blocking(gfp) && !mt_external_lock(mas->tree)) {
+ mtree_unlock(mas->tree);
+ mas_alloc_nodes(mas, gfp);
+ mtree_lock(mas->tree);
+ } else {
+ mas_alloc_nodes(mas, gfp);
+ }
+
+ if (!mas_allocated(mas))
+ return false;
+
+ mas->status = ma_start;
+ return true;
+}
+
+void __init maple_tree_init(void)
+{
+ maple_node_cache = kmem_cache_create("maple_node",
+ sizeof(struct maple_node), sizeof(struct maple_node),
+ SLAB_PANIC, NULL);
+}
+
+/**
+ * mtree_load() - Load a value stored in a maple tree
+ * @mt: The maple tree
+ * @index: The index to load
+ *
+ * Return: the entry or %NULL
+ */
+void *mtree_load(struct maple_tree *mt, unsigned long index)
+{
+ MA_STATE(mas, mt, index, index);
+ void *entry;
+
+ trace_ma_read(__func__, &mas);
+ rcu_read_lock();
+retry:
+ entry = mas_start(&mas);
+ if (unlikely(mas_is_none(&mas)))
+ goto unlock;
+
+ if (unlikely(mas_is_ptr(&mas))) {
+ if (index)
+ entry = NULL;
+
+ goto unlock;
+ }
+
+ entry = mtree_lookup_walk(&mas);
+ if (!entry && unlikely(mas_is_start(&mas)))
+ goto retry;
+unlock:
+ rcu_read_unlock();
+ if (xa_is_zero(entry))
+ return NULL;
+
+ return entry;
+}
+EXPORT_SYMBOL(mtree_load);
+
+/**
+ * mtree_store_range() - Store an entry at a given range.
+ * @mt: The maple tree
+ * @index: The start of the range
+ * @last: The end of the range
+ * @entry: The entry to store
+ * @gfp: The GFP_FLAGS to use for allocations
+ *
+ * Return: 0 on success, -EINVAL on invalid request, -ENOMEM if memory could not
+ * be allocated.
+ */
+int mtree_store_range(struct maple_tree *mt, unsigned long index,
+ unsigned long last, void *entry, gfp_t gfp)
+{
+ MA_STATE(mas, mt, index, last);
+ MA_WR_STATE(wr_mas, &mas, entry);
+
+ trace_ma_write(__func__, &mas, 0, entry);
+ if (WARN_ON_ONCE(xa_is_advanced(entry)))
+ return -EINVAL;
+
+ if (index > last)
+ return -EINVAL;
+
+ mtree_lock(mt);
+retry:
+ mas_wr_store_entry(&wr_mas);
+ if (mas_nomem(&mas, gfp))
+ goto retry;
+
+ mtree_unlock(mt);
+ if (mas_is_err(&mas))
+ return xa_err(mas.node);
+
+ return 0;
+}
+EXPORT_SYMBOL(mtree_store_range);
+
+/**
+ * mtree_store() - Store an entry at a given index.
+ * @mt: The maple tree
+ * @index: The index to store the value
+ * @entry: The entry to store
+ * @gfp: The GFP_FLAGS to use for allocations
+ *
+ * Return: 0 on success, -EINVAL on invalid request, -ENOMEM if memory could not
+ * be allocated.
+ */
+int mtree_store(struct maple_tree *mt, unsigned long index, void *entry,
+ gfp_t gfp)
+{
+ return mtree_store_range(mt, index, index, entry, gfp);
+}
+EXPORT_SYMBOL(mtree_store);
+
+/**
+ * mtree_insert_range() - Insert an entry at a given range if there is no value.
+ * @mt: The maple tree
+ * @first: The start of the range
+ * @last: The end of the range
+ * @entry: The entry to store
+ * @gfp: The GFP_FLAGS to use for allocations.
+ *
+ * Return: 0 on success, -EEXISTS if the range is occupied, -EINVAL on invalid
+ * request, -ENOMEM if memory could not be allocated.
+ */
+int mtree_insert_range(struct maple_tree *mt, unsigned long first,
+ unsigned long last, void *entry, gfp_t gfp)
+{
+ MA_STATE(ms, mt, first, last);
+
+ if (WARN_ON_ONCE(xa_is_advanced(entry)))
+ return -EINVAL;
+
+ if (first > last)
+ return -EINVAL;
+
+ mtree_lock(mt);
+retry:
+ mas_insert(&ms, entry);
+ if (mas_nomem(&ms, gfp))
+ goto retry;
+
+ mtree_unlock(mt);
+ if (mas_is_err(&ms))
+ return xa_err(ms.node);
+
+ return 0;
+}
+EXPORT_SYMBOL(mtree_insert_range);
+
+/**
+ * mtree_insert() - Insert an entry at a given index if there is no value.
+ * @mt: The maple tree
+ * @index : The index to store the value
+ * @entry: The entry to store
+ * @gfp: The GFP_FLAGS to use for allocations.
+ *
+ * Return: 0 on success, -EEXISTS if the range is occupied, -EINVAL on invalid
+ * request, -ENOMEM if memory could not be allocated.
+ */
+int mtree_insert(struct maple_tree *mt, unsigned long index, void *entry,
+ gfp_t gfp)
+{
+ return mtree_insert_range(mt, index, index, entry, gfp);
+}
+EXPORT_SYMBOL(mtree_insert);
+
+int mtree_alloc_range(struct maple_tree *mt, unsigned long *startp,
+ void *entry, unsigned long size, unsigned long min,
+ unsigned long max, gfp_t gfp)
+{
+ int ret = 0;
+
+ MA_STATE(mas, mt, 0, 0);
+ if (!mt_is_alloc(mt))
+ return -EINVAL;
+
+ if (WARN_ON_ONCE(mt_is_reserved(entry)))
+ return -EINVAL;
+
+ mtree_lock(mt);
+retry:
+ ret = mas_empty_area(&mas, min, max, size);
+ if (ret)
+ goto unlock;
+
+ mas_insert(&mas, entry);
+ /*
+ * mas_nomem() may release the lock, causing the allocated area
+ * to be unavailable, so try to allocate a free area again.
+ */
+ if (mas_nomem(&mas, gfp))
+ goto retry;
+
+ if (mas_is_err(&mas))
+ ret = xa_err(mas.node);
+ else
+ *startp = mas.index;
+
+unlock:
+ mtree_unlock(mt);
+ return ret;
+}
+EXPORT_SYMBOL(mtree_alloc_range);
+
+int mtree_alloc_rrange(struct maple_tree *mt, unsigned long *startp,
+ void *entry, unsigned long size, unsigned long min,
+ unsigned long max, gfp_t gfp)
+{
+ int ret = 0;
+
+ MA_STATE(mas, mt, 0, 0);
+ if (!mt_is_alloc(mt))
+ return -EINVAL;
+
+ if (WARN_ON_ONCE(mt_is_reserved(entry)))
+ return -EINVAL;
+
+ mtree_lock(mt);
+retry:
+ ret = mas_empty_area_rev(&mas, min, max, size);
+ if (ret)
+ goto unlock;
+
+ mas_insert(&mas, entry);
+ /*
+ * mas_nomem() may release the lock, causing the allocated area
+ * to be unavailable, so try to allocate a free area again.
+ */
+ if (mas_nomem(&mas, gfp))
+ goto retry;
+
+ if (mas_is_err(&mas))
+ ret = xa_err(mas.node);
+ else
+ *startp = mas.index;
+
+unlock:
+ mtree_unlock(mt);
+ return ret;
+}
+EXPORT_SYMBOL(mtree_alloc_rrange);
+
+/**
+ * mtree_erase() - Find an index and erase the entire range.
+ * @mt: The maple tree
+ * @index: The index to erase
+ *
+ * Erasing is the same as a walk to an entry then a store of a NULL to that
+ * ENTIRE range. In fact, it is implemented as such using the advanced API.
+ *
+ * Return: The entry stored at the @index or %NULL
+ */
+void *mtree_erase(struct maple_tree *mt, unsigned long index)
+{
+ void *entry = NULL;
+
+ MA_STATE(mas, mt, index, index);
+ trace_ma_op(__func__, &mas);
+
+ mtree_lock(mt);
+ entry = mas_erase(&mas);
+ mtree_unlock(mt);
+
+ return entry;
+}
+EXPORT_SYMBOL(mtree_erase);
+
+/*
+ * mas_dup_free() - Free an incomplete duplication of a tree.
+ * @mas: The maple state of a incomplete tree.
+ *
+ * The parameter @mas->node passed in indicates that the allocation failed on
+ * this node. This function frees all nodes starting from @mas->node in the
+ * reverse order of mas_dup_build(). There is no need to hold the source tree
+ * lock at this time.
+ */
+static void mas_dup_free(struct ma_state *mas)
+{
+ struct maple_node *node;
+ enum maple_type type;
+ void __rcu **slots;
+ unsigned char count, i;
+
+ /* Maybe the first node allocation failed. */
+ if (mas_is_none(mas))
+ return;
+
+ while (!mte_is_root(mas->node)) {
+ mas_ascend(mas);
+ if (mas->offset) {
+ mas->offset--;
+ do {
+ mas_descend(mas);
+ mas->offset = mas_data_end(mas);
+ } while (!mte_is_leaf(mas->node));
+
+ mas_ascend(mas);
+ }
+
+ node = mte_to_node(mas->node);
+ type = mte_node_type(mas->node);
+ slots = ma_slots(node, type);
+ count = mas_data_end(mas) + 1;
+ for (i = 0; i < count; i++)
+ ((unsigned long *)slots)[i] &= ~MAPLE_NODE_MASK;
+ mt_free_bulk(count, slots);
+ }
+
+ node = mte_to_node(mas->node);
+ mt_free_one(node);
+}
+
+/*
+ * mas_copy_node() - Copy a maple node and replace the parent.
+ * @mas: The maple state of source tree.
+ * @new_mas: The maple state of new tree.
+ * @parent: The parent of the new node.
+ *
+ * Copy @mas->node to @new_mas->node, set @parent to be the parent of
+ * @new_mas->node. If memory allocation fails, @mas is set to -ENOMEM.
+ */
+static inline void mas_copy_node(struct ma_state *mas, struct ma_state *new_mas,
+ struct maple_pnode *parent)
+{
+ struct maple_node *node = mte_to_node(mas->node);
+ struct maple_node *new_node = mte_to_node(new_mas->node);
+ unsigned long val;
+
+ /* Copy the node completely. */
+ memcpy(new_node, node, sizeof(struct maple_node));
+ /* Update the parent node pointer. */
+ val = (unsigned long)node->parent & MAPLE_NODE_MASK;
+ new_node->parent = ma_parent_ptr(val | (unsigned long)parent);
+}
+
+/*
+ * mas_dup_alloc() - Allocate child nodes for a maple node.
+ * @mas: The maple state of source tree.
+ * @new_mas: The maple state of new tree.
+ * @gfp: The GFP_FLAGS to use for allocations.
+ *
+ * This function allocates child nodes for @new_mas->node during the duplication
+ * process. If memory allocation fails, @mas is set to -ENOMEM.
+ */
+static inline void mas_dup_alloc(struct ma_state *mas, struct ma_state *new_mas,
+ gfp_t gfp)
+{
+ struct maple_node *node = mte_to_node(mas->node);
+ struct maple_node *new_node = mte_to_node(new_mas->node);
+ enum maple_type type;
+ unsigned char request, count, i;
+ void __rcu **slots;
+ void __rcu **new_slots;
+ unsigned long val;
+
+ /* Allocate memory for child nodes. */
+ type = mte_node_type(mas->node);
+ new_slots = ma_slots(new_node, type);
+ request = mas_data_end(mas) + 1;
+ count = mt_alloc_bulk(gfp, request, (void **)new_slots);
+ if (unlikely(count < request)) {
+ memset(new_slots, 0, request * sizeof(void *));
+ mas_set_err(mas, -ENOMEM);
+ return;
+ }
+
+ /* Restore node type information in slots. */
+ slots = ma_slots(node, type);
+ for (i = 0; i < count; i++) {
+ val = (unsigned long)mt_slot_locked(mas->tree, slots, i);
+ val &= MAPLE_NODE_MASK;
+ ((unsigned long *)new_slots)[i] |= val;
+ }
+}
+
+/*
+ * mas_dup_build() - Build a new maple tree from a source tree
+ * @mas: The maple state of source tree, need to be in MAS_START state.
+ * @new_mas: The maple state of new tree, need to be in MAS_START state.
+ * @gfp: The GFP_FLAGS to use for allocations.
+ *
+ * This function builds a new tree in DFS preorder. If the memory allocation
+ * fails, the error code -ENOMEM will be set in @mas, and @new_mas points to the
+ * last node. mas_dup_free() will free the incomplete duplication of a tree.
+ *
+ * Note that the attributes of the two trees need to be exactly the same, and the
+ * new tree needs to be empty, otherwise -EINVAL will be set in @mas.
+ */
+static inline void mas_dup_build(struct ma_state *mas, struct ma_state *new_mas,
+ gfp_t gfp)
+{
+ struct maple_node *node;
+ struct maple_pnode *parent = NULL;
+ struct maple_enode *root;
+ enum maple_type type;
+
+ if (unlikely(mt_attr(mas->tree) != mt_attr(new_mas->tree)) ||
+ unlikely(!mtree_empty(new_mas->tree))) {
+ mas_set_err(mas, -EINVAL);
+ return;
+ }
+
+ root = mas_start(mas);
+ if (mas_is_ptr(mas) || mas_is_none(mas))
+ goto set_new_tree;
+
+ node = mt_alloc_one(gfp);
+ if (!node) {
+ new_mas->status = ma_none;
+ mas_set_err(mas, -ENOMEM);
+ return;
+ }
+
+ type = mte_node_type(mas->node);
+ root = mt_mk_node(node, type);
+ new_mas->node = root;
+ new_mas->min = 0;
+ new_mas->max = ULONG_MAX;
+ root = mte_mk_root(root);
+ while (1) {
+ mas_copy_node(mas, new_mas, parent);
+ if (!mte_is_leaf(mas->node)) {
+ /* Only allocate child nodes for non-leaf nodes. */
+ mas_dup_alloc(mas, new_mas, gfp);
+ if (unlikely(mas_is_err(mas)))
+ return;
+ } else {
+ /*
+ * This is the last leaf node and duplication is
+ * completed.
+ */
+ if (mas->max == ULONG_MAX)
+ goto done;
+
+ /* This is not the last leaf node and needs to go up. */
+ do {
+ mas_ascend(mas);
+ mas_ascend(new_mas);
+ } while (mas->offset == mas_data_end(mas));
+
+ /* Move to the next subtree. */
+ mas->offset++;
+ new_mas->offset++;
+ }
+
+ mas_descend(mas);
+ parent = ma_parent_ptr(mte_to_node(new_mas->node));
+ mas_descend(new_mas);
+ mas->offset = 0;
+ new_mas->offset = 0;
+ }
+done:
+ /* Specially handle the parent of the root node. */
+ mte_to_node(root)->parent = ma_parent_ptr(mas_tree_parent(new_mas));
+set_new_tree:
+ /* Make them the same height */
+ new_mas->tree->ma_flags = mas->tree->ma_flags;
+ rcu_assign_pointer(new_mas->tree->ma_root, root);
+}
+
+/**
+ * __mt_dup(): Duplicate an entire maple tree
+ * @mt: The source maple tree
+ * @new: The new maple tree
+ * @gfp: The GFP_FLAGS to use for allocations
+ *
+ * This function duplicates a maple tree in Depth-First Search (DFS) pre-order
+ * traversal. It uses memcpy() to copy nodes in the source tree and allocate
+ * new child nodes in non-leaf nodes. The new node is exactly the same as the
+ * source node except for all the addresses stored in it. It will be faster than
+ * traversing all elements in the source tree and inserting them one by one into
+ * the new tree.
+ * The user needs to ensure that the attributes of the source tree and the new
+ * tree are the same, and the new tree needs to be an empty tree, otherwise
+ * -EINVAL will be returned.
+ * Note that the user needs to manually lock the source tree and the new tree.
+ *
+ * Return: 0 on success, -ENOMEM if memory could not be allocated, -EINVAL If
+ * the attributes of the two trees are different or the new tree is not an empty
+ * tree.
+ */
+int __mt_dup(struct maple_tree *mt, struct maple_tree *new, gfp_t gfp)
+{
+ int ret = 0;
+ MA_STATE(mas, mt, 0, 0);
+ MA_STATE(new_mas, new, 0, 0);
+
+ mas_dup_build(&mas, &new_mas, gfp);
+ if (unlikely(mas_is_err(&mas))) {
+ ret = xa_err(mas.node);
+ if (ret == -ENOMEM)
+ mas_dup_free(&new_mas);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(__mt_dup);
+
+/**
+ * mtree_dup(): Duplicate an entire maple tree
+ * @mt: The source maple tree
+ * @new: The new maple tree
+ * @gfp: The GFP_FLAGS to use for allocations
+ *
+ * This function duplicates a maple tree in Depth-First Search (DFS) pre-order
+ * traversal. It uses memcpy() to copy nodes in the source tree and allocate
+ * new child nodes in non-leaf nodes. The new node is exactly the same as the
+ * source node except for all the addresses stored in it. It will be faster than
+ * traversing all elements in the source tree and inserting them one by one into
+ * the new tree.
+ * The user needs to ensure that the attributes of the source tree and the new
+ * tree are the same, and the new tree needs to be an empty tree, otherwise
+ * -EINVAL will be returned.
+ *
+ * Return: 0 on success, -ENOMEM if memory could not be allocated, -EINVAL If
+ * the attributes of the two trees are different or the new tree is not an empty
+ * tree.
+ */
+int mtree_dup(struct maple_tree *mt, struct maple_tree *new, gfp_t gfp)
+{
+ int ret = 0;
+ MA_STATE(mas, mt, 0, 0);
+ MA_STATE(new_mas, new, 0, 0);
+
+ mas_lock(&new_mas);
+ mas_lock_nested(&mas, SINGLE_DEPTH_NESTING);
+ mas_dup_build(&mas, &new_mas, gfp);
+ mas_unlock(&mas);
+ if (unlikely(mas_is_err(&mas))) {
+ ret = xa_err(mas.node);
+ if (ret == -ENOMEM)
+ mas_dup_free(&new_mas);
+ }
+
+ mas_unlock(&new_mas);
+ return ret;
+}
+EXPORT_SYMBOL(mtree_dup);
+
+/**
+ * __mt_destroy() - Walk and free all nodes of a locked maple tree.
+ * @mt: The maple tree
+ *
+ * Note: Does not handle locking.
+ */
+void __mt_destroy(struct maple_tree *mt)
+{
+ void *root = mt_root_locked(mt);
+
+ rcu_assign_pointer(mt->ma_root, NULL);
+ if (xa_is_node(root))
+ mte_destroy_walk(root, mt);
+
+ mt->ma_flags = mt_attr(mt);
+}
+EXPORT_SYMBOL_GPL(__mt_destroy);
+
+/**
+ * mtree_destroy() - Destroy a maple tree
+ * @mt: The maple tree
+ *
+ * Frees all resources used by the tree. Handles locking.
+ */
+void mtree_destroy(struct maple_tree *mt)
+{
+ mtree_lock(mt);
+ __mt_destroy(mt);
+ mtree_unlock(mt);
+}
+EXPORT_SYMBOL(mtree_destroy);
+
+/**
+ * mt_find() - Search from the start up until an entry is found.
+ * @mt: The maple tree
+ * @index: Pointer which contains the start location of the search
+ * @max: The maximum value of the search range
+ *
+ * Takes RCU read lock internally to protect the search, which does not
+ * protect the returned pointer after dropping RCU read lock.
+ * See also: Documentation/core-api/maple_tree.rst
+ *
+ * In case that an entry is found @index is updated to point to the next
+ * possible entry independent whether the found entry is occupying a
+ * single index or a range if indices.
+ *
+ * Return: The entry at or after the @index or %NULL
+ */
+void *mt_find(struct maple_tree *mt, unsigned long *index, unsigned long max)
+{
+ MA_STATE(mas, mt, *index, *index);
+ void *entry;
+#ifdef CONFIG_DEBUG_MAPLE_TREE
+ unsigned long copy = *index;
+#endif
+
+ trace_ma_read(__func__, &mas);
+
+ if ((*index) > max)
+ return NULL;
+
+ rcu_read_lock();
+retry:
+ entry = mas_state_walk(&mas);
+ if (mas_is_start(&mas))
+ goto retry;
+
+ if (unlikely(xa_is_zero(entry)))
+ entry = NULL;
+
+ if (entry)
+ goto unlock;
+
+ while (mas_is_active(&mas) && (mas.last < max)) {
+ entry = mas_next_entry(&mas, max);
+ if (likely(entry && !xa_is_zero(entry)))
+ break;
+ }
+
+ if (unlikely(xa_is_zero(entry)))
+ entry = NULL;
+unlock:
+ rcu_read_unlock();
+ if (likely(entry)) {
+ *index = mas.last + 1;
+#ifdef CONFIG_DEBUG_MAPLE_TREE
+ if (MT_WARN_ON(mt, (*index) && ((*index) <= copy)))
+ pr_err("index not increased! %lx <= %lx\n",
+ *index, copy);
+#endif
+ }
+
+ return entry;
+}
+EXPORT_SYMBOL(mt_find);
+
+/**
+ * mt_find_after() - Search from the start up until an entry is found.
+ * @mt: The maple tree
+ * @index: Pointer which contains the start location of the search
+ * @max: The maximum value to check
+ *
+ * Same as mt_find() except that it checks @index for 0 before
+ * searching. If @index == 0, the search is aborted. This covers a wrap
+ * around of @index to 0 in an iterator loop.
+ *
+ * Return: The entry at or after the @index or %NULL
+ */
+void *mt_find_after(struct maple_tree *mt, unsigned long *index,
+ unsigned long max)
+{
+ if (!(*index))
+ return NULL;
+
+ return mt_find(mt, index, max);
+}
+EXPORT_SYMBOL(mt_find_after);
+
+#ifdef CONFIG_DEBUG_MAPLE_TREE
+atomic_t maple_tree_tests_run;
+EXPORT_SYMBOL_GPL(maple_tree_tests_run);
+atomic_t maple_tree_tests_passed;
+EXPORT_SYMBOL_GPL(maple_tree_tests_passed);
+
+#ifndef __KERNEL__
+extern void kmem_cache_set_non_kernel(struct kmem_cache *, unsigned int);
+void mt_set_non_kernel(unsigned int val)
+{
+ kmem_cache_set_non_kernel(maple_node_cache, val);
+}
+
+extern unsigned long kmem_cache_get_alloc(struct kmem_cache *);
+unsigned long mt_get_alloc_size(void)
+{
+ return kmem_cache_get_alloc(maple_node_cache);
+}
+
+extern void kmem_cache_zero_nr_tallocated(struct kmem_cache *);
+void mt_zero_nr_tallocated(void)
+{
+ kmem_cache_zero_nr_tallocated(maple_node_cache);
+}
+
+extern unsigned int kmem_cache_nr_tallocated(struct kmem_cache *);
+unsigned int mt_nr_tallocated(void)
+{
+ return kmem_cache_nr_tallocated(maple_node_cache);
+}
+
+extern unsigned int kmem_cache_nr_allocated(struct kmem_cache *);
+unsigned int mt_nr_allocated(void)
+{
+ return kmem_cache_nr_allocated(maple_node_cache);
+}
+
+void mt_cache_shrink(void)
+{
+}
+#else
+/*
+ * mt_cache_shrink() - For testing, don't use this.
+ *
+ * Certain testcases can trigger an OOM when combined with other memory
+ * debugging configuration options. This function is used to reduce the
+ * possibility of an out of memory even due to kmem_cache objects remaining
+ * around for longer than usual.
+ */
+void mt_cache_shrink(void)
+{
+ kmem_cache_shrink(maple_node_cache);
+
+}
+EXPORT_SYMBOL_GPL(mt_cache_shrink);
+
+#endif /* not defined __KERNEL__ */
+/*
+ * mas_get_slot() - Get the entry in the maple state node stored at @offset.
+ * @mas: The maple state
+ * @offset: The offset into the slot array to fetch.
+ *
+ * Return: The entry stored at @offset.
+ */
+static inline struct maple_enode *mas_get_slot(struct ma_state *mas,
+ unsigned char offset)
+{
+ return mas_slot(mas, ma_slots(mas_mn(mas), mte_node_type(mas->node)),
+ offset);
+}
+
+/* Depth first search, post-order */
+static void mas_dfs_postorder(struct ma_state *mas, unsigned long max)
+{
+
+ struct maple_enode *p, *mn = mas->node;
+ unsigned long p_min, p_max;
+
+ mas_next_node(mas, mas_mn(mas), max);
+ if (!mas_is_overflow(mas))
+ return;
+
+ if (mte_is_root(mn))
+ return;
+
+ mas->node = mn;
+ mas_ascend(mas);
+ do {
+ p = mas->node;
+ p_min = mas->min;
+ p_max = mas->max;
+ mas_prev_node(mas, 0);
+ } while (!mas_is_underflow(mas));
+
+ mas->node = p;
+ mas->max = p_max;
+ mas->min = p_min;
+}
+
+/* Tree validations */
+static void mt_dump_node(const struct maple_tree *mt, void *entry,
+ unsigned long min, unsigned long max, unsigned int depth,
+ enum mt_dump_format format);
+static void mt_dump_range(unsigned long min, unsigned long max,
+ unsigned int depth, enum mt_dump_format format)
+{
+ static const char spaces[] = " ";
+
+ switch(format) {
+ case mt_dump_hex:
+ if (min == max)
+ pr_info("%.*s%lx: ", depth * 2, spaces, min);
+ else
+ pr_info("%.*s%lx-%lx: ", depth * 2, spaces, min, max);
+ break;
+ case mt_dump_dec:
+ if (min == max)
+ pr_info("%.*s%lu: ", depth * 2, spaces, min);
+ else
+ pr_info("%.*s%lu-%lu: ", depth * 2, spaces, min, max);
+ }
+}
+
+static void mt_dump_entry(void *entry, unsigned long min, unsigned long max,
+ unsigned int depth, enum mt_dump_format format)
+{
+ mt_dump_range(min, max, depth, format);
+
+ if (xa_is_value(entry))
+ pr_cont("value %ld (0x%lx) [%p]\n", xa_to_value(entry),
+ xa_to_value(entry), entry);
+ else if (xa_is_zero(entry))
+ pr_cont("zero (%ld)\n", xa_to_internal(entry));
+ else if (mt_is_reserved(entry))
+ pr_cont("UNKNOWN ENTRY (%p)\n", entry);
+ else
+ pr_cont("%p\n", entry);
+}
+
+static void mt_dump_range64(const struct maple_tree *mt, void *entry,
+ unsigned long min, unsigned long max, unsigned int depth,
+ enum mt_dump_format format)
+{
+ struct maple_range_64 *node = &mte_to_node(entry)->mr64;
+ bool leaf = mte_is_leaf(entry);
+ unsigned long first = min;
+ int i;
+
+ pr_cont(" contents: ");
+ for (i = 0; i < MAPLE_RANGE64_SLOTS - 1; i++) {
+ switch(format) {
+ case mt_dump_hex:
+ pr_cont("%p %lX ", node->slot[i], node->pivot[i]);
+ break;
+ case mt_dump_dec:
+ pr_cont("%p %lu ", node->slot[i], node->pivot[i]);
+ }
+ }
+ pr_cont("%p\n", node->slot[i]);
+ for (i = 0; i < MAPLE_RANGE64_SLOTS; i++) {
+ unsigned long last = max;
+
+ if (i < (MAPLE_RANGE64_SLOTS - 1))
+ last = node->pivot[i];
+ else if (!node->slot[i] && max != mt_node_max(entry))
+ break;
+ if (last == 0 && i > 0)
+ break;
+ if (leaf)
+ mt_dump_entry(mt_slot(mt, node->slot, i),
+ first, last, depth + 1, format);
+ else if (node->slot[i])
+ mt_dump_node(mt, mt_slot(mt, node->slot, i),
+ first, last, depth + 1, format);
+
+ if (last == max)
+ break;
+ if (last > max) {
+ switch(format) {
+ case mt_dump_hex:
+ pr_err("node %p last (%lx) > max (%lx) at pivot %d!\n",
+ node, last, max, i);
+ break;
+ case mt_dump_dec:
+ pr_err("node %p last (%lu) > max (%lu) at pivot %d!\n",
+ node, last, max, i);
+ }
+ }
+ first = last + 1;
+ }
+}
+
+static void mt_dump_arange64(const struct maple_tree *mt, void *entry,
+ unsigned long min, unsigned long max, unsigned int depth,
+ enum mt_dump_format format)
+{
+ struct maple_arange_64 *node = &mte_to_node(entry)->ma64;
+ bool leaf = mte_is_leaf(entry);
+ unsigned long first = min;
+ int i;
+
+ pr_cont(" contents: ");
+ for (i = 0; i < MAPLE_ARANGE64_SLOTS; i++) {
+ switch (format) {
+ case mt_dump_hex:
+ pr_cont("%lx ", node->gap[i]);
+ break;
+ case mt_dump_dec:
+ pr_cont("%lu ", node->gap[i]);
+ }
+ }
+ pr_cont("| %02X %02X| ", node->meta.end, node->meta.gap);
+ for (i = 0; i < MAPLE_ARANGE64_SLOTS - 1; i++) {
+ switch (format) {
+ case mt_dump_hex:
+ pr_cont("%p %lX ", node->slot[i], node->pivot[i]);
+ break;
+ case mt_dump_dec:
+ pr_cont("%p %lu ", node->slot[i], node->pivot[i]);
+ }
+ }
+ pr_cont("%p\n", node->slot[i]);
+ for (i = 0; i < MAPLE_ARANGE64_SLOTS; i++) {
+ unsigned long last = max;
+
+ if (i < (MAPLE_ARANGE64_SLOTS - 1))
+ last = node->pivot[i];
+ else if (!node->slot[i])
+ break;
+ if (last == 0 && i > 0)
+ break;
+ if (leaf)
+ mt_dump_entry(mt_slot(mt, node->slot, i),
+ first, last, depth + 1, format);
+ else if (node->slot[i])
+ mt_dump_node(mt, mt_slot(mt, node->slot, i),
+ first, last, depth + 1, format);
+
+ if (last == max)
+ break;
+ if (last > max) {
+ pr_err("node %p last (%lu) > max (%lu) at pivot %d!\n",
+ node, last, max, i);
+ break;
+ }
+ first = last + 1;
+ }
+}
+
+static void mt_dump_node(const struct maple_tree *mt, void *entry,
+ unsigned long min, unsigned long max, unsigned int depth,
+ enum mt_dump_format format)
+{
+ struct maple_node *node = mte_to_node(entry);
+ unsigned int type = mte_node_type(entry);
+ unsigned int i;
+
+ mt_dump_range(min, max, depth, format);
+
+ pr_cont("node %p depth %d type %d parent %p", node, depth, type,
+ node ? node->parent : NULL);
+ switch (type) {
+ case maple_dense:
+ pr_cont("\n");
+ for (i = 0; i < MAPLE_NODE_SLOTS; i++) {
+ if (min + i > max)
+ pr_cont("OUT OF RANGE: ");
+ mt_dump_entry(mt_slot(mt, node->slot, i),
+ min + i, min + i, depth, format);
+ }
+ break;
+ case maple_leaf_64:
+ case maple_range_64:
+ mt_dump_range64(mt, entry, min, max, depth, format);
+ break;
+ case maple_arange_64:
+ mt_dump_arange64(mt, entry, min, max, depth, format);
+ break;
+
+ default:
+ pr_cont(" UNKNOWN TYPE\n");
+ }
+}
+
+void mt_dump(const struct maple_tree *mt, enum mt_dump_format format)
+{
+ void *entry = rcu_dereference_check(mt->ma_root, mt_locked(mt));
+
+ pr_info("maple_tree(%p) flags %X, height %u root %p\n",
+ mt, mt->ma_flags, mt_height(mt), entry);
+ if (!xa_is_node(entry))
+ mt_dump_entry(entry, 0, 0, 0, format);
+ else if (entry)
+ mt_dump_node(mt, entry, 0, mt_node_max(entry), 0, format);
+}
+EXPORT_SYMBOL_GPL(mt_dump);
+
+/*
+ * Calculate the maximum gap in a node and check if that's what is reported in
+ * the parent (unless root).
+ */
+static void mas_validate_gaps(struct ma_state *mas)
+{
+ struct maple_enode *mte = mas->node;
+ struct maple_node *p_mn, *node = mte_to_node(mte);
+ enum maple_type mt = mte_node_type(mas->node);
+ unsigned long gap = 0, max_gap = 0;
+ unsigned long p_end, p_start = mas->min;
+ unsigned char p_slot, offset;
+ unsigned long *gaps = NULL;
+ unsigned long *pivots = ma_pivots(node, mt);
+ unsigned int i;
+
+ if (ma_is_dense(mt)) {
+ for (i = 0; i < mt_slot_count(mte); i++) {
+ if (mas_get_slot(mas, i)) {
+ if (gap > max_gap)
+ max_gap = gap;
+ gap = 0;
+ continue;
+ }
+ gap++;
+ }
+ goto counted;
+ }
+
+ gaps = ma_gaps(node, mt);
+ for (i = 0; i < mt_slot_count(mte); i++) {
+ p_end = mas_safe_pivot(mas, pivots, i, mt);
+
+ if (!gaps) {
+ if (!mas_get_slot(mas, i))
+ gap = p_end - p_start + 1;
+ } else {
+ void *entry = mas_get_slot(mas, i);
+
+ gap = gaps[i];
+ MT_BUG_ON(mas->tree, !entry);
+
+ if (gap > p_end - p_start + 1) {
+ pr_err("%p[%u] %lu >= %lu - %lu + 1 (%lu)\n",
+ mas_mn(mas), i, gap, p_end, p_start,
+ p_end - p_start + 1);
+ MT_BUG_ON(mas->tree, gap > p_end - p_start + 1);
+ }
+ }
+
+ if (gap > max_gap)
+ max_gap = gap;
+
+ p_start = p_end + 1;
+ if (p_end >= mas->max)
+ break;
+ }
+
+counted:
+ if (mt == maple_arange_64) {
+ MT_BUG_ON(mas->tree, !gaps);
+ offset = ma_meta_gap(node);
+ if (offset > i) {
+ pr_err("gap offset %p[%u] is invalid\n", node, offset);
+ MT_BUG_ON(mas->tree, 1);
+ }
+
+ if (gaps[offset] != max_gap) {
+ pr_err("gap %p[%u] is not the largest gap %lu\n",
+ node, offset, max_gap);
+ MT_BUG_ON(mas->tree, 1);
+ }
+
+ for (i++ ; i < mt_slot_count(mte); i++) {
+ if (gaps[i] != 0) {
+ pr_err("gap %p[%u] beyond node limit != 0\n",
+ node, i);
+ MT_BUG_ON(mas->tree, 1);
+ }
+ }
+ }
+
+ if (mte_is_root(mte))
+ return;
+
+ p_slot = mte_parent_slot(mas->node);
+ p_mn = mte_parent(mte);
+ MT_BUG_ON(mas->tree, max_gap > mas->max);
+ if (ma_gaps(p_mn, mas_parent_type(mas, mte))[p_slot] != max_gap) {
+ pr_err("gap %p[%u] != %lu\n", p_mn, p_slot, max_gap);
+ mt_dump(mas->tree, mt_dump_hex);
+ MT_BUG_ON(mas->tree, 1);
+ }
+}
+
+static void mas_validate_parent_slot(struct ma_state *mas)
+{
+ struct maple_node *parent;
+ struct maple_enode *node;
+ enum maple_type p_type;
+ unsigned char p_slot;
+ void __rcu **slots;
+ int i;
+
+ if (mte_is_root(mas->node))
+ return;
+
+ p_slot = mte_parent_slot(mas->node);
+ p_type = mas_parent_type(mas, mas->node);
+ parent = mte_parent(mas->node);
+ slots = ma_slots(parent, p_type);
+ MT_BUG_ON(mas->tree, mas_mn(mas) == parent);
+
+ /* Check prev/next parent slot for duplicate node entry */
+
+ for (i = 0; i < mt_slots[p_type]; i++) {
+ node = mas_slot(mas, slots, i);
+ if (i == p_slot) {
+ if (node != mas->node)
+ pr_err("parent %p[%u] does not have %p\n",
+ parent, i, mas_mn(mas));
+ MT_BUG_ON(mas->tree, node != mas->node);
+ } else if (node == mas->node) {
+ pr_err("Invalid child %p at parent %p[%u] p_slot %u\n",
+ mas_mn(mas), parent, i, p_slot);
+ MT_BUG_ON(mas->tree, node == mas->node);
+ }
+ }
+}
+
+static void mas_validate_child_slot(struct ma_state *mas)
+{
+ enum maple_type type = mte_node_type(mas->node);
+ void __rcu **slots = ma_slots(mte_to_node(mas->node), type);
+ unsigned long *pivots = ma_pivots(mte_to_node(mas->node), type);
+ struct maple_enode *child;
+ unsigned char i;
+
+ if (mte_is_leaf(mas->node))
+ return;
+
+ for (i = 0; i < mt_slots[type]; i++) {
+ child = mas_slot(mas, slots, i);
+
+ if (!child) {
+ pr_err("Non-leaf node lacks child at %p[%u]\n",
+ mas_mn(mas), i);
+ MT_BUG_ON(mas->tree, 1);
+ }
+
+ if (mte_parent_slot(child) != i) {
+ pr_err("Slot error at %p[%u]: child %p has pslot %u\n",
+ mas_mn(mas), i, mte_to_node(child),
+ mte_parent_slot(child));
+ MT_BUG_ON(mas->tree, 1);
+ }
+
+ if (mte_parent(child) != mte_to_node(mas->node)) {
+ pr_err("child %p has parent %p not %p\n",
+ mte_to_node(child), mte_parent(child),
+ mte_to_node(mas->node));
+ MT_BUG_ON(mas->tree, 1);
+ }
+
+ if (i < mt_pivots[type] && pivots[i] == mas->max)
+ break;
+ }
+}
+
+/*
+ * Validate all pivots are within mas->min and mas->max, check metadata ends
+ * where the maximum ends and ensure there is no slots or pivots set outside of
+ * the end of the data.
+ */
+static void mas_validate_limits(struct ma_state *mas)
+{
+ int i;
+ unsigned long prev_piv = 0;
+ enum maple_type type = mte_node_type(mas->node);
+ void __rcu **slots = ma_slots(mte_to_node(mas->node), type);
+ unsigned long *pivots = ma_pivots(mas_mn(mas), type);
+
+ for (i = 0; i < mt_slots[type]; i++) {
+ unsigned long piv;
+
+ piv = mas_safe_pivot(mas, pivots, i, type);
+
+ if (!piv && (i != 0)) {
+ pr_err("Missing node limit pivot at %p[%u]",
+ mas_mn(mas), i);
+ MAS_WARN_ON(mas, 1);
+ }
+
+ if (prev_piv > piv) {
+ pr_err("%p[%u] piv %lu < prev_piv %lu\n",
+ mas_mn(mas), i, piv, prev_piv);
+ MAS_WARN_ON(mas, piv < prev_piv);
+ }
+
+ if (piv < mas->min) {
+ pr_err("%p[%u] %lu < %lu\n", mas_mn(mas), i,
+ piv, mas->min);
+ MAS_WARN_ON(mas, piv < mas->min);
+ }
+ if (piv > mas->max) {
+ pr_err("%p[%u] %lu > %lu\n", mas_mn(mas), i,
+ piv, mas->max);
+ MAS_WARN_ON(mas, piv > mas->max);
+ }
+ prev_piv = piv;
+ if (piv == mas->max)
+ break;
+ }
+
+ if (mas_data_end(mas) != i) {
+ pr_err("node%p: data_end %u != the last slot offset %u\n",
+ mas_mn(mas), mas_data_end(mas), i);
+ MT_BUG_ON(mas->tree, 1);
+ }
+
+ for (i += 1; i < mt_slots[type]; i++) {
+ void *entry = mas_slot(mas, slots, i);
+
+ if (entry && (i != mt_slots[type] - 1)) {
+ pr_err("%p[%u] should not have entry %p\n", mas_mn(mas),
+ i, entry);
+ MT_BUG_ON(mas->tree, entry != NULL);
+ }
+
+ if (i < mt_pivots[type]) {
+ unsigned long piv = pivots[i];
+
+ if (!piv)
+ continue;
+
+ pr_err("%p[%u] should not have piv %lu\n",
+ mas_mn(mas), i, piv);
+ MAS_WARN_ON(mas, i < mt_pivots[type] - 1);
+ }
+ }
+}
+
+static void mt_validate_nulls(struct maple_tree *mt)
+{
+ void *entry, *last = (void *)1;
+ unsigned char offset = 0;
+ void __rcu **slots;
+ MA_STATE(mas, mt, 0, 0);
+
+ mas_start(&mas);
+ if (mas_is_none(&mas) || (mas_is_ptr(&mas)))
+ return;
+
+ while (!mte_is_leaf(mas.node))
+ mas_descend(&mas);
+
+ slots = ma_slots(mte_to_node(mas.node), mte_node_type(mas.node));
+ do {
+ entry = mas_slot(&mas, slots, offset);
+ if (!last && !entry) {
+ pr_err("Sequential nulls end at %p[%u]\n",
+ mas_mn(&mas), offset);
+ }
+ MT_BUG_ON(mt, !last && !entry);
+ last = entry;
+ if (offset == mas_data_end(&mas)) {
+ mas_next_node(&mas, mas_mn(&mas), ULONG_MAX);
+ if (mas_is_overflow(&mas))
+ return;
+ offset = 0;
+ slots = ma_slots(mte_to_node(mas.node),
+ mte_node_type(mas.node));
+ } else {
+ offset++;
+ }
+
+ } while (!mas_is_overflow(&mas));
+}
+
+/*
+ * validate a maple tree by checking:
+ * 1. The limits (pivots are within mas->min to mas->max)
+ * 2. The gap is correctly set in the parents
+ */
+void mt_validate(struct maple_tree *mt)
+{
+ unsigned char end;
+
+ MA_STATE(mas, mt, 0, 0);
+ rcu_read_lock();
+ mas_start(&mas);
+ if (!mas_is_active(&mas))
+ goto done;
+
+ while (!mte_is_leaf(mas.node))
+ mas_descend(&mas);
+
+ while (!mas_is_overflow(&mas)) {
+ MAS_WARN_ON(&mas, mte_dead_node(mas.node));
+ end = mas_data_end(&mas);
+ if (MAS_WARN_ON(&mas, (end < mt_min_slot_count(mas.node)) &&
+ (mas.max != ULONG_MAX))) {
+ pr_err("Invalid size %u of %p\n", end, mas_mn(&mas));
+ }
+
+ mas_validate_parent_slot(&mas);
+ mas_validate_limits(&mas);
+ mas_validate_child_slot(&mas);
+ if (mt_is_alloc(mt))
+ mas_validate_gaps(&mas);
+ mas_dfs_postorder(&mas, ULONG_MAX);
+ }
+ mt_validate_nulls(mt);
+done:
+ rcu_read_unlock();
+
+}
+EXPORT_SYMBOL_GPL(mt_validate);
+
+void mas_dump(const struct ma_state *mas)
+{
+ pr_err("MAS: tree=%p enode=%p ", mas->tree, mas->node);
+ switch (mas->status) {
+ case ma_active:
+ pr_err("(ma_active)");
+ break;
+ case ma_none:
+ pr_err("(ma_none)");
+ break;
+ case ma_root:
+ pr_err("(ma_root)");
+ break;
+ case ma_start:
+ pr_err("(ma_start) ");
+ break;
+ case ma_pause:
+ pr_err("(ma_pause) ");
+ break;
+ case ma_overflow:
+ pr_err("(ma_overflow) ");
+ break;
+ case ma_underflow:
+ pr_err("(ma_underflow) ");
+ break;
+ case ma_error:
+ pr_err("(ma_error) ");
+ break;
+ }
+
+ pr_err("[%u/%u] index=%lx last=%lx\n", mas->offset, mas->end,
+ mas->index, mas->last);
+ pr_err(" min=%lx max=%lx alloc=%p, depth=%u, flags=%x\n",
+ mas->min, mas->max, mas->alloc, mas->depth, mas->mas_flags);
+ if (mas->index > mas->last)
+ pr_err("Check index & last\n");
+}
+EXPORT_SYMBOL_GPL(mas_dump);
+
+void mas_wr_dump(const struct ma_wr_state *wr_mas)
+{
+ pr_err("WR_MAS: node=%p r_min=%lx r_max=%lx\n",
+ wr_mas->node, wr_mas->r_min, wr_mas->r_max);
+ pr_err(" type=%u off_end=%u, node_end=%u, end_piv=%lx\n",
+ wr_mas->type, wr_mas->offset_end, wr_mas->mas->end,
+ wr_mas->end_piv);
+}
+EXPORT_SYMBOL_GPL(mas_wr_dump);
+
+#endif /* CONFIG_DEBUG_MAPLE_TREE */
diff --git a/lib/math/Kconfig b/lib/math/Kconfig
index 15bd50d92308..0634b428d0cb 100644
--- a/lib/math/Kconfig
+++ b/lib/math/Kconfig
@@ -6,7 +6,12 @@ config CORDIC
calculations are in fixed point. Module will be called cordic.
config PRIME_NUMBERS
- tristate
+ tristate "Simple prime number generator for testing"
+ help
+ This option provides a simple prime number generator for test
+ modules.
+
+ If unsure, say N.
config RATIONAL
- bool
+ tristate
diff --git a/lib/math/Makefile b/lib/math/Makefile
index be6909e943bd..91fcdb0c9efe 100644
--- a/lib/math/Makefile
+++ b/lib/math/Makefile
@@ -1,6 +1,9 @@
# SPDX-License-Identifier: GPL-2.0-only
-obj-y += div64.o gcd.o lcm.o int_pow.o int_sqrt.o reciprocal_div.o
+obj-y += div64.o gcd.o lcm.o int_log.o int_pow.o int_sqrt.o reciprocal_div.o
obj-$(CONFIG_CORDIC) += cordic.o
obj-$(CONFIG_PRIME_NUMBERS) += prime_numbers.o
obj-$(CONFIG_RATIONAL) += rational.o
+
+obj-$(CONFIG_TEST_DIV64) += test_div64.o
+obj-$(CONFIG_RATIONAL_KUNIT_TEST) += rational-test.o
diff --git a/lib/math/div64.c b/lib/math/div64.c
index 368ca7fd0d82..55a81782e271 100644
--- a/lib/math/div64.c
+++ b/lib/math/div64.c
@@ -18,9 +18,11 @@
* or by defining a preprocessor macro in arch/include/asm/div64.h.
*/
+#include <linux/bitops.h>
#include <linux/export.h>
-#include <linux/kernel.h>
+#include <linux/math.h>
#include <linux/math64.h>
+#include <linux/log2.h>
/* Not needed on 64bit architectures */
#if BITS_PER_LONG == 32
@@ -61,12 +63,6 @@ uint32_t __attribute__((weak)) __div64_32(uint64_t *n, uint32_t base)
EXPORT_SYMBOL(__div64_32);
#endif
-/**
- * div_s64_rem - signed 64bit divide with 64bit divisor and remainder
- * @dividend: 64bit dividend
- * @divisor: 64bit divisor
- * @remainder: 64bit remainder
- */
#ifndef div_s64_rem
s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
{
@@ -87,7 +83,7 @@ s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
EXPORT_SYMBOL(div_s64_rem);
#endif
-/**
+/*
* div64_u64_rem - unsigned 64bit divide with 64bit divisor and remainder
* @dividend: 64bit dividend
* @divisor: 64bit divisor
@@ -127,7 +123,7 @@ u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder)
EXPORT_SYMBOL(div64_u64_rem);
#endif
-/**
+/*
* div64_u64 - unsigned 64bit divide with 64bit divisor
* @dividend: 64bit dividend
* @divisor: 64bit divisor
@@ -161,11 +157,6 @@ u64 div64_u64(u64 dividend, u64 divisor)
EXPORT_SYMBOL(div64_u64);
#endif
-/**
- * div64_s64 - signed 64bit divide with 64bit divisor
- * @dividend: 64bit dividend
- * @divisor: 64bit divisor
- */
#ifndef div64_s64
s64 div64_s64(s64 dividend, s64 divisor)
{
@@ -190,3 +181,45 @@ u32 iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder)
return __iter_div_u64_rem(dividend, divisor, remainder);
}
EXPORT_SYMBOL(iter_div_u64_rem);
+
+#ifndef mul_u64_u64_div_u64
+u64 mul_u64_u64_div_u64(u64 a, u64 b, u64 c)
+{
+ u64 res = 0, div, rem;
+ int shift;
+
+ /* can a * b overflow ? */
+ if (ilog2(a) + ilog2(b) > 62) {
+ /*
+ * (b * a) / c is equal to
+ *
+ * (b / c) * a +
+ * (b % c) * a / c
+ *
+ * if nothing overflows. Can the 1st multiplication
+ * overflow? Yes, but we do not care: this can only
+ * happen if the end result can't fit in u64 anyway.
+ *
+ * So the code below does
+ *
+ * res = (b / c) * a;
+ * b = b % c;
+ */
+ div = div64_u64_rem(b, c, &rem);
+ res = div * a;
+ b = rem;
+
+ shift = ilog2(a) + ilog2(b) - 62;
+ if (shift > 0) {
+ /* drop precision */
+ b >>= shift;
+ c >>= shift;
+ if (!c)
+ return res;
+ }
+ }
+
+ return res + div64_u64(a * b, c);
+}
+EXPORT_SYMBOL(mul_u64_u64_div_u64);
+#endif
diff --git a/lib/math/int_log.c b/lib/math/int_log.c
new file mode 100644
index 000000000000..8f9da3a2ad39
--- /dev/null
+++ b/lib/math/int_log.c
@@ -0,0 +1,133 @@
+// SPDX-License-Identifier: LGPL-2.1-or-later
+/*
+ * Provides fixed-point logarithm operations.
+ *
+ * Copyright (C) 2006 Christoph Pfister (christophpfister@gmail.com)
+ */
+
+#include <linux/bitops.h>
+#include <linux/export.h>
+#include <linux/int_log.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+
+#include <asm/bug.h>
+
+static const unsigned short logtable[256] = {
+ 0x0000, 0x0171, 0x02e0, 0x044e, 0x05ba, 0x0725, 0x088e, 0x09f7,
+ 0x0b5d, 0x0cc3, 0x0e27, 0x0f8a, 0x10eb, 0x124b, 0x13aa, 0x1508,
+ 0x1664, 0x17bf, 0x1919, 0x1a71, 0x1bc8, 0x1d1e, 0x1e73, 0x1fc6,
+ 0x2119, 0x226a, 0x23ba, 0x2508, 0x2656, 0x27a2, 0x28ed, 0x2a37,
+ 0x2b80, 0x2cc8, 0x2e0f, 0x2f54, 0x3098, 0x31dc, 0x331e, 0x345f,
+ 0x359f, 0x36de, 0x381b, 0x3958, 0x3a94, 0x3bce, 0x3d08, 0x3e41,
+ 0x3f78, 0x40af, 0x41e4, 0x4319, 0x444c, 0x457f, 0x46b0, 0x47e1,
+ 0x4910, 0x4a3f, 0x4b6c, 0x4c99, 0x4dc5, 0x4eef, 0x5019, 0x5142,
+ 0x526a, 0x5391, 0x54b7, 0x55dc, 0x5700, 0x5824, 0x5946, 0x5a68,
+ 0x5b89, 0x5ca8, 0x5dc7, 0x5ee5, 0x6003, 0x611f, 0x623a, 0x6355,
+ 0x646f, 0x6588, 0x66a0, 0x67b7, 0x68ce, 0x69e4, 0x6af8, 0x6c0c,
+ 0x6d20, 0x6e32, 0x6f44, 0x7055, 0x7165, 0x7274, 0x7383, 0x7490,
+ 0x759d, 0x76aa, 0x77b5, 0x78c0, 0x79ca, 0x7ad3, 0x7bdb, 0x7ce3,
+ 0x7dea, 0x7ef0, 0x7ff6, 0x80fb, 0x81ff, 0x8302, 0x8405, 0x8507,
+ 0x8608, 0x8709, 0x8809, 0x8908, 0x8a06, 0x8b04, 0x8c01, 0x8cfe,
+ 0x8dfa, 0x8ef5, 0x8fef, 0x90e9, 0x91e2, 0x92db, 0x93d2, 0x94ca,
+ 0x95c0, 0x96b6, 0x97ab, 0x98a0, 0x9994, 0x9a87, 0x9b7a, 0x9c6c,
+ 0x9d5e, 0x9e4f, 0x9f3f, 0xa02e, 0xa11e, 0xa20c, 0xa2fa, 0xa3e7,
+ 0xa4d4, 0xa5c0, 0xa6ab, 0xa796, 0xa881, 0xa96a, 0xaa53, 0xab3c,
+ 0xac24, 0xad0c, 0xadf2, 0xaed9, 0xafbe, 0xb0a4, 0xb188, 0xb26c,
+ 0xb350, 0xb433, 0xb515, 0xb5f7, 0xb6d9, 0xb7ba, 0xb89a, 0xb97a,
+ 0xba59, 0xbb38, 0xbc16, 0xbcf4, 0xbdd1, 0xbead, 0xbf8a, 0xc065,
+ 0xc140, 0xc21b, 0xc2f5, 0xc3cf, 0xc4a8, 0xc580, 0xc658, 0xc730,
+ 0xc807, 0xc8de, 0xc9b4, 0xca8a, 0xcb5f, 0xcc34, 0xcd08, 0xcddc,
+ 0xceaf, 0xcf82, 0xd054, 0xd126, 0xd1f7, 0xd2c8, 0xd399, 0xd469,
+ 0xd538, 0xd607, 0xd6d6, 0xd7a4, 0xd872, 0xd93f, 0xda0c, 0xdad9,
+ 0xdba5, 0xdc70, 0xdd3b, 0xde06, 0xded0, 0xdf9a, 0xe063, 0xe12c,
+ 0xe1f5, 0xe2bd, 0xe385, 0xe44c, 0xe513, 0xe5d9, 0xe69f, 0xe765,
+ 0xe82a, 0xe8ef, 0xe9b3, 0xea77, 0xeb3b, 0xebfe, 0xecc1, 0xed83,
+ 0xee45, 0xef06, 0xefc8, 0xf088, 0xf149, 0xf209, 0xf2c8, 0xf387,
+ 0xf446, 0xf505, 0xf5c3, 0xf680, 0xf73e, 0xf7fb, 0xf8b7, 0xf973,
+ 0xfa2f, 0xfaea, 0xfba5, 0xfc60, 0xfd1a, 0xfdd4, 0xfe8e, 0xff47,
+};
+
+unsigned int intlog2(u32 value)
+{
+ /**
+ * returns: log2(value) * 2^24
+ * wrong result if value = 0 (log2(0) is undefined)
+ */
+ unsigned int msb;
+ unsigned int logentry;
+ unsigned int significand;
+ unsigned int interpolation;
+
+ if (unlikely(value == 0)) {
+ WARN_ON(1);
+ return 0;
+ }
+
+ /* first detect the msb (count begins at 0) */
+ msb = fls(value) - 1;
+
+ /**
+ * now we use a logtable after the following method:
+ *
+ * log2(2^x * y) * 2^24 = x * 2^24 + log2(y) * 2^24
+ * where x = msb and therefore 1 <= y < 2
+ * first y is determined by shifting the value left
+ * so that msb is bit 31
+ * 0x00231f56 -> 0x8C7D5800
+ * the result is y * 2^31 -> "significand"
+ * then the highest 9 bits are used for a table lookup
+ * the highest bit is discarded because it's always set
+ * the highest nine bits in our example are 100011000
+ * so we would use the entry 0x18
+ */
+ significand = value << (31 - msb);
+ logentry = (significand >> 23) % ARRAY_SIZE(logtable);
+
+ /**
+ * last step we do is interpolation because of the
+ * limitations of the log table the error is that part of
+ * the significand which isn't used for lookup then we
+ * compute the ratio between the error and the next table entry
+ * and interpolate it between the log table entry used and the
+ * next one the biggest error possible is 0x7fffff
+ * (in our example it's 0x7D5800)
+ * needed value for next table entry is 0x800000
+ * so the interpolation is
+ * (error / 0x800000) * (logtable_next - logtable_current)
+ * in the implementation the division is moved to the end for
+ * better accuracy there is also an overflow correction if
+ * logtable_next is 256
+ */
+ interpolation = ((significand & 0x7fffff) *
+ ((logtable[(logentry + 1) % ARRAY_SIZE(logtable)] -
+ logtable[logentry]) & 0xffff)) >> 15;
+
+ /* now we return the result */
+ return ((msb << 24) + (logtable[logentry] << 8) + interpolation);
+}
+EXPORT_SYMBOL(intlog2);
+
+unsigned int intlog10(u32 value)
+{
+ /**
+ * returns: log10(value) * 2^24
+ * wrong result if value = 0 (log10(0) is undefined)
+ */
+ u64 log;
+
+ if (unlikely(value == 0)) {
+ WARN_ON(1);
+ return 0;
+ }
+
+ log = intlog2(value);
+
+ /**
+ * we use the following method:
+ * log10(x) = log2(x) * log10(2)
+ */
+
+ return (log * 646456993) >> 31;
+}
+EXPORT_SYMBOL(intlog10);
diff --git a/lib/math/int_pow.c b/lib/math/int_pow.c
index 622fc1ab3c74..0cf426e69bda 100644
--- a/lib/math/int_pow.c
+++ b/lib/math/int_pow.c
@@ -6,7 +6,7 @@
*/
#include <linux/export.h>
-#include <linux/kernel.h>
+#include <linux/math.h>
#include <linux/types.h>
/**
diff --git a/lib/math/int_sqrt.c b/lib/math/int_sqrt.c
index 30e0f9770f88..a8170bb9142f 100644
--- a/lib/math/int_sqrt.c
+++ b/lib/math/int_sqrt.c
@@ -6,9 +6,10 @@
* square root from Guy L. Steele.
*/
-#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/bitops.h>
+#include <linux/limits.h>
+#include <linux/math.h>
/**
* int_sqrt - computes the integer square root
diff --git a/lib/math/prime_numbers.c b/lib/math/prime_numbers.c
index 052f5b727be7..d42cebf7407f 100644
--- a/lib/math/prime_numbers.c
+++ b/lib/math/prime_numbers.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-#define pr_fmt(fmt) "prime numbers: " fmt "\n"
+#define pr_fmt(fmt) "prime numbers: " fmt
#include <linux/module.h>
#include <linux/mutex.h>
@@ -253,7 +253,7 @@ static void dump_primes(void)
if (buf)
bitmap_print_to_pagebuf(true, buf, p->primes, p->sz);
- pr_info("primes.{last=%lu, .sz=%lu, .primes[]=...x%lx} = %s",
+ pr_info("primes.{last=%lu, .sz=%lu, .primes[]=...x%lx} = %s\n",
p->last, p->sz, p->primes[BITS_TO_LONGS(p->sz) - 1], buf);
rcu_read_unlock();
@@ -273,7 +273,7 @@ static int selftest(unsigned long max)
bool fast = is_prime_number(x);
if (slow != fast) {
- pr_err("inconsistent result for is-prime(%lu): slow=%s, fast=%s!",
+ pr_err("inconsistent result for is-prime(%lu): slow=%s, fast=%s!\n",
x, slow ? "yes" : "no", fast ? "yes" : "no");
goto err;
}
@@ -282,14 +282,14 @@ static int selftest(unsigned long max)
continue;
if (next_prime_number(last) != x) {
- pr_err("incorrect result for next-prime(%lu): expected %lu, got %lu",
+ pr_err("incorrect result for next-prime(%lu): expected %lu, got %lu\n",
last, x, next_prime_number(last));
goto err;
}
last = x;
}
- pr_info("selftest(%lu) passed, last prime was %lu", x, last);
+ pr_info("%s(%lu) passed, last prime was %lu\n", __func__, x, last);
return 0;
err:
diff --git a/lib/math/rational-test.c b/lib/math/rational-test.c
new file mode 100644
index 000000000000..01611ddff420
--- /dev/null
+++ b/lib/math/rational-test.c
@@ -0,0 +1,56 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <kunit/test.h>
+
+#include <linux/rational.h>
+
+struct rational_test_param {
+ unsigned long num, den;
+ unsigned long max_num, max_den;
+ unsigned long exp_num, exp_den;
+
+ const char *name;
+};
+
+static const struct rational_test_param test_parameters[] = {
+ { 1230, 10, 100, 20, 100, 1, "Exceeds bounds, semi-convergent term > 1/2 last term" },
+ { 34567,100, 120, 20, 120, 1, "Exceeds bounds, semi-convergent term < 1/2 last term" },
+ { 1, 30, 100, 10, 0, 1, "Closest to zero" },
+ { 1, 19, 100, 10, 1, 10, "Closest to smallest non-zero" },
+ { 27,32, 16, 16, 11, 13, "Use convergent" },
+ { 1155, 7735, 255, 255, 33, 221, "Exact answer" },
+ { 87, 32, 70, 32, 68, 25, "Semiconvergent, numerator limit" },
+ { 14533, 4626, 15000, 2400, 7433, 2366, "Semiconvergent, denominator limit" },
+};
+
+static void get_desc(const struct rational_test_param *param, char *desc)
+{
+ strscpy(desc, param->name, KUNIT_PARAM_DESC_SIZE);
+}
+
+/* Creates function rational_gen_params */
+KUNIT_ARRAY_PARAM(rational, test_parameters, get_desc);
+
+static void rational_test(struct kunit *test)
+{
+ const struct rational_test_param *param = (const struct rational_test_param *)test->param_value;
+ unsigned long n = 0, d = 0;
+
+ rational_best_approximation(param->num, param->den, param->max_num, param->max_den, &n, &d);
+ KUNIT_EXPECT_EQ(test, n, param->exp_num);
+ KUNIT_EXPECT_EQ(test, d, param->exp_den);
+}
+
+static struct kunit_case rational_test_cases[] = {
+ KUNIT_CASE_PARAM(rational_test, rational_gen_params),
+ {}
+};
+
+static struct kunit_suite rational_test_suite = {
+ .name = "rational",
+ .test_cases = rational_test_cases,
+};
+
+kunit_test_suites(&rational_test_suite);
+
+MODULE_LICENSE("GPL v2");
diff --git a/lib/math/rational.c b/lib/math/rational.c
index ba7443677c90..ec59d426ea63 100644
--- a/lib/math/rational.c
+++ b/lib/math/rational.c
@@ -3,6 +3,7 @@
* rational fractions
*
* Copyright (C) 2009 emlix GmbH, Oskar Schirmer <oskar@scara.com>
+ * Copyright (C) 2019 Trent Piepho <tpiepho@gmail.com>
*
* helper functions when coping with rational numbers
*/
@@ -10,6 +11,9 @@
#include <linux/rational.h>
#include <linux/compiler.h>
#include <linux/export.h>
+#include <linux/minmax.h>
+#include <linux/limits.h>
+#include <linux/module.h>
/*
* calculate best rational approximation for a given fraction
@@ -25,7 +29,7 @@
* with the fractional part size described in given_denominator.
*
* for theoretical background, see:
- * http://en.wikipedia.org/wiki/Continued_fraction
+ * https://en.wikipedia.org/wiki/Continued_fraction
*/
void rational_best_approximation(
@@ -33,33 +37,75 @@ void rational_best_approximation(
unsigned long max_numerator, unsigned long max_denominator,
unsigned long *best_numerator, unsigned long *best_denominator)
{
- unsigned long n, d, n0, d0, n1, d1;
+ /* n/d is the starting rational, which is continually
+ * decreased each iteration using the Euclidean algorithm.
+ *
+ * dp is the value of d from the prior iteration.
+ *
+ * n2/d2, n1/d1, and n0/d0 are our successively more accurate
+ * approximations of the rational. They are, respectively,
+ * the current, previous, and two prior iterations of it.
+ *
+ * a is current term of the continued fraction.
+ */
+ unsigned long n, d, n0, d0, n1, d1, n2, d2;
n = given_numerator;
d = given_denominator;
n0 = d1 = 0;
n1 = d0 = 1;
+
for (;;) {
- unsigned long t, a;
- if ((n1 > max_numerator) || (d1 > max_denominator)) {
- n1 = n0;
- d1 = d0;
- break;
- }
+ unsigned long dp, a;
+
if (d == 0)
break;
- t = d;
+ /* Find next term in continued fraction, 'a', via
+ * Euclidean algorithm.
+ */
+ dp = d;
a = n / d;
d = n % d;
- n = t;
- t = n0 + a * n1;
+ n = dp;
+
+ /* Calculate the current rational approximation (aka
+ * convergent), n2/d2, using the term just found and
+ * the two prior approximations.
+ */
+ n2 = n0 + a * n1;
+ d2 = d0 + a * d1;
+
+ /* If the current convergent exceeds the maxes, then
+ * return either the previous convergent or the
+ * largest semi-convergent, the final term of which is
+ * found below as 't'.
+ */
+ if ((n2 > max_numerator) || (d2 > max_denominator)) {
+ unsigned long t = ULONG_MAX;
+
+ if (d1)
+ t = (max_denominator - d0) / d1;
+ if (n1)
+ t = min(t, (max_numerator - n0) / n1);
+
+ /* This tests if the semi-convergent is closer than the previous
+ * convergent. If d1 is zero there is no previous convergent as this
+ * is the 1st iteration, so always choose the semi-convergent.
+ */
+ if (!d1 || 2u * t > a || (2u * t == a && d0 * dp > d1 * d)) {
+ n1 = n0 + t * n1;
+ d1 = d0 + t * d1;
+ }
+ break;
+ }
n0 = n1;
- n1 = t;
- t = d0 + a * d1;
+ n1 = n2;
d0 = d1;
- d1 = t;
+ d1 = d2;
}
*best_numerator = n1;
*best_denominator = d1;
}
EXPORT_SYMBOL(rational_best_approximation);
+
+MODULE_LICENSE("GPL v2");
diff --git a/lib/math/reciprocal_div.c b/lib/math/reciprocal_div.c
index bf043258fa00..6cb4adbb81d2 100644
--- a/lib/math/reciprocal_div.c
+++ b/lib/math/reciprocal_div.c
@@ -1,9 +1,13 @@
// SPDX-License-Identifier: GPL-2.0
+#include <linux/bitops.h>
#include <linux/bug.h>
-#include <linux/kernel.h>
-#include <asm/div64.h>
-#include <linux/reciprocal_div.h>
#include <linux/export.h>
+#include <linux/limits.h>
+#include <linux/math.h>
+#include <linux/minmax.h>
+#include <linux/types.h>
+
+#include <linux/reciprocal_div.h>
/*
* For a description of the algorithm please have a look at
diff --git a/lib/math/test_div64.c b/lib/math/test_div64.c
new file mode 100644
index 000000000000..c15edd688dd2
--- /dev/null
+++ b/lib/math/test_div64.c
@@ -0,0 +1,249 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 Maciej W. Rozycki
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/init.h>
+#include <linux/ktime.h>
+#include <linux/module.h>
+#include <linux/printk.h>
+#include <linux/time64.h>
+#include <linux/types.h>
+
+#include <asm/div64.h>
+
+#define TEST_DIV64_N_ITER 1024
+
+static const u64 test_div64_dividends[] = {
+ 0x00000000ab275080,
+ 0x0000000fe73c1959,
+ 0x000000e54c0a74b1,
+ 0x00000d4398ff1ef9,
+ 0x0000a18c2ee1c097,
+ 0x00079fb80b072e4a,
+ 0x0072db27380dd689,
+ 0x0842f488162e2284,
+ 0xf66745411d8ab063,
+};
+#define SIZE_DIV64_DIVIDENDS ARRAY_SIZE(test_div64_dividends)
+
+#define TEST_DIV64_DIVISOR_0 0x00000009
+#define TEST_DIV64_DIVISOR_1 0x0000007c
+#define TEST_DIV64_DIVISOR_2 0x00000204
+#define TEST_DIV64_DIVISOR_3 0x0000cb5b
+#define TEST_DIV64_DIVISOR_4 0x00010000
+#define TEST_DIV64_DIVISOR_5 0x0008a880
+#define TEST_DIV64_DIVISOR_6 0x003fd3ae
+#define TEST_DIV64_DIVISOR_7 0x0b658fac
+#define TEST_DIV64_DIVISOR_8 0xdc08b349
+
+static const u32 test_div64_divisors[] = {
+ TEST_DIV64_DIVISOR_0,
+ TEST_DIV64_DIVISOR_1,
+ TEST_DIV64_DIVISOR_2,
+ TEST_DIV64_DIVISOR_3,
+ TEST_DIV64_DIVISOR_4,
+ TEST_DIV64_DIVISOR_5,
+ TEST_DIV64_DIVISOR_6,
+ TEST_DIV64_DIVISOR_7,
+ TEST_DIV64_DIVISOR_8,
+};
+#define SIZE_DIV64_DIVISORS ARRAY_SIZE(test_div64_divisors)
+
+static const struct {
+ u64 quotient;
+ u32 remainder;
+} test_div64_results[SIZE_DIV64_DIVISORS][SIZE_DIV64_DIVIDENDS] = {
+ {
+ { 0x0000000013045e47, 0x00000001 },
+ { 0x000000000161596c, 0x00000030 },
+ { 0x000000000054e9d4, 0x00000130 },
+ { 0x000000000000d776, 0x0000278e },
+ { 0x000000000000ab27, 0x00005080 },
+ { 0x00000000000013c4, 0x0004ce80 },
+ { 0x00000000000002ae, 0x001e143c },
+ { 0x000000000000000f, 0x0033e56c },
+ { 0x0000000000000000, 0xab275080 },
+ }, {
+ { 0x00000001c45c02d1, 0x00000000 },
+ { 0x0000000020d5213c, 0x00000049 },
+ { 0x0000000007e3d65f, 0x000001dd },
+ { 0x0000000000140531, 0x000065ee },
+ { 0x00000000000fe73c, 0x00001959 },
+ { 0x000000000001d637, 0x0004e5d9 },
+ { 0x0000000000003fc9, 0x000713bb },
+ { 0x0000000000000165, 0x029abe7d },
+ { 0x0000000000000012, 0x6e9f7e37 },
+ }, {
+ { 0x000000197a3a0cf7, 0x00000002 },
+ { 0x00000001d9632e5c, 0x00000021 },
+ { 0x0000000071c28039, 0x000001cd },
+ { 0x000000000120a844, 0x0000b885 },
+ { 0x0000000000e54c0a, 0x000074b1 },
+ { 0x00000000001a7bb3, 0x00072331 },
+ { 0x00000000000397ad, 0x0002c61b },
+ { 0x000000000000141e, 0x06ea2e89 },
+ { 0x000000000000010a, 0xab002ad7 },
+ }, {
+ { 0x0000017949e37538, 0x00000001 },
+ { 0x0000001b62441f37, 0x00000055 },
+ { 0x0000000694a3391d, 0x00000085 },
+ { 0x0000000010b2a5d2, 0x0000a753 },
+ { 0x000000000d4398ff, 0x00001ef9 },
+ { 0x0000000001882ec6, 0x0005cbf9 },
+ { 0x000000000035333b, 0x0017abdf },
+ { 0x00000000000129f1, 0x0ab4520d },
+ { 0x0000000000000f6e, 0x8ac0ce9b },
+ }, {
+ { 0x000011f321a74e49, 0x00000006 },
+ { 0x0000014d8481d211, 0x0000005b },
+ { 0x0000005025cbd92d, 0x000001e3 },
+ { 0x00000000cb5e71e3, 0x000043e6 },
+ { 0x00000000a18c2ee1, 0x0000c097 },
+ { 0x0000000012a88828, 0x00036c97 },
+ { 0x000000000287f16f, 0x002c2a25 },
+ { 0x00000000000e2cc7, 0x02d581e3 },
+ { 0x000000000000bbf4, 0x1ba08c03 },
+ }, {
+ { 0x0000d8db8f72935d, 0x00000005 },
+ { 0x00000fbd5aed7a2e, 0x00000002 },
+ { 0x000003c84b6ea64a, 0x00000122 },
+ { 0x0000000998fa8829, 0x000044b7 },
+ { 0x000000079fb80b07, 0x00002e4a },
+ { 0x00000000e16b20fa, 0x0002a14a },
+ { 0x000000001e940d22, 0x00353b2e },
+ { 0x0000000000ab40ac, 0x06fba6ba },
+ { 0x000000000008debd, 0x72d98365 },
+ }, {
+ { 0x000cc3045b8fc281, 0x00000000 },
+ { 0x0000ed1f48b5c9fc, 0x00000079 },
+ { 0x000038fb9c63406a, 0x000000e1 },
+ { 0x000000909705b825, 0x00000a62 },
+ { 0x00000072db27380d, 0x0000d689 },
+ { 0x0000000d43fce827, 0x00082b09 },
+ { 0x00000001ccaba11a, 0x0037e8dd },
+ { 0x000000000a13f729, 0x0566dffd },
+ { 0x000000000085a14b, 0x23d36726 },
+ }, {
+ { 0x00eafeb9c993592b, 0x00000001 },
+ { 0x00110e5befa9a991, 0x00000048 },
+ { 0x00041947b4a1d36a, 0x000000dc },
+ { 0x00000a6679327311, 0x0000c079 },
+ { 0x00000842f488162e, 0x00002284 },
+ { 0x000000f4459740fc, 0x00084484 },
+ { 0x0000002122c47bf9, 0x002ca446 },
+ { 0x00000000b9936290, 0x004979c4 },
+ { 0x00000000099ca89d, 0x9db446bf },
+ }, {
+ { 0x1b60cece589da1d2, 0x00000001 },
+ { 0x01fcb42be1453f5b, 0x0000004f },
+ { 0x007a3f2457df0749, 0x0000013f },
+ { 0x0001363130e3ec7b, 0x000017aa },
+ { 0x0000f66745411d8a, 0x0000b063 },
+ { 0x00001c757dfab350, 0x00048863 },
+ { 0x000003dc4979c652, 0x00224ea7 },
+ { 0x000000159edc3144, 0x06409ab3 },
+ { 0x000000011eadfee3, 0xa99c48a8 },
+ },
+};
+
+static inline bool test_div64_verify(u64 quotient, u32 remainder, int i, int j)
+{
+ return (quotient == test_div64_results[i][j].quotient &&
+ remainder == test_div64_results[i][j].remainder);
+}
+
+/*
+ * This needs to be a macro, because we don't want to rely on the compiler
+ * to do constant propagation, and `do_div' may take a different path for
+ * constants, so we do want to verify that as well.
+ */
+#define test_div64_one(dividend, divisor, i, j) ({ \
+ bool result = true; \
+ u64 quotient; \
+ u32 remainder; \
+ \
+ quotient = dividend; \
+ remainder = do_div(quotient, divisor); \
+ if (!test_div64_verify(quotient, remainder, i, j)) { \
+ pr_err("ERROR: %016llx / %08x => %016llx,%08x\n", \
+ dividend, divisor, quotient, remainder); \
+ pr_err("ERROR: expected value => %016llx,%08x\n",\
+ test_div64_results[i][j].quotient, \
+ test_div64_results[i][j].remainder); \
+ result = false; \
+ } \
+ result; \
+})
+
+/*
+ * Run calculation for the same divisor value expressed as a constant
+ * and as a variable, so as to verify the implementation for both cases
+ * should they be handled by different code execution paths.
+ */
+static bool __init test_div64(void)
+{
+ u64 dividend;
+ int i, j;
+
+ for (i = 0; i < SIZE_DIV64_DIVIDENDS; i++) {
+ dividend = test_div64_dividends[i];
+ if (!test_div64_one(dividend, TEST_DIV64_DIVISOR_0, i, 0))
+ return false;
+ if (!test_div64_one(dividend, TEST_DIV64_DIVISOR_1, i, 1))
+ return false;
+ if (!test_div64_one(dividend, TEST_DIV64_DIVISOR_2, i, 2))
+ return false;
+ if (!test_div64_one(dividend, TEST_DIV64_DIVISOR_3, i, 3))
+ return false;
+ if (!test_div64_one(dividend, TEST_DIV64_DIVISOR_4, i, 4))
+ return false;
+ if (!test_div64_one(dividend, TEST_DIV64_DIVISOR_5, i, 5))
+ return false;
+ if (!test_div64_one(dividend, TEST_DIV64_DIVISOR_6, i, 6))
+ return false;
+ if (!test_div64_one(dividend, TEST_DIV64_DIVISOR_7, i, 7))
+ return false;
+ if (!test_div64_one(dividend, TEST_DIV64_DIVISOR_8, i, 8))
+ return false;
+ for (j = 0; j < SIZE_DIV64_DIVISORS; j++) {
+ if (!test_div64_one(dividend, test_div64_divisors[j],
+ i, j))
+ return false;
+ }
+ }
+ return true;
+}
+
+static int __init test_div64_init(void)
+{
+ struct timespec64 ts, ts0, ts1;
+ int i;
+
+ pr_info("Starting 64bit/32bit division and modulo test\n");
+ ktime_get_ts64(&ts0);
+
+ for (i = 0; i < TEST_DIV64_N_ITER; i++)
+ if (!test_div64())
+ break;
+
+ ktime_get_ts64(&ts1);
+ ts = timespec64_sub(ts1, ts0);
+ pr_info("Completed 64bit/32bit division and modulo test, "
+ "%llu.%09lus elapsed\n", ts.tv_sec, ts.tv_nsec);
+
+ return 0;
+}
+
+static void __exit test_div64_exit(void)
+{
+}
+
+module_init(test_div64_init);
+module_exit(test_div64_exit);
+
+MODULE_AUTHOR("Maciej W. Rozycki <macro@orcam.me.uk>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("64bit/32bit division and modulo test module");
diff --git a/lib/memcpy_kunit.c b/lib/memcpy_kunit.c
new file mode 100644
index 000000000000..440aee705ccc
--- /dev/null
+++ b/lib/memcpy_kunit.c
@@ -0,0 +1,569 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Test cases for memcpy(), memmove(), and memset().
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <kunit/test.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/overflow.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/vmalloc.h>
+
+struct some_bytes {
+ union {
+ u8 data[32];
+ struct {
+ u32 one;
+ u16 two;
+ u8 three;
+ /* 1 byte hole */
+ u32 four[4];
+ };
+ };
+};
+
+#define check(instance, v) do { \
+ BUILD_BUG_ON(sizeof(instance.data) != 32); \
+ for (size_t i = 0; i < sizeof(instance.data); i++) { \
+ KUNIT_ASSERT_EQ_MSG(test, instance.data[i], v, \
+ "line %d: '%s' not initialized to 0x%02x @ %d (saw 0x%02x)\n", \
+ __LINE__, #instance, v, i, instance.data[i]); \
+ } \
+} while (0)
+
+#define compare(name, one, two) do { \
+ BUILD_BUG_ON(sizeof(one) != sizeof(two)); \
+ for (size_t i = 0; i < sizeof(one); i++) { \
+ KUNIT_EXPECT_EQ_MSG(test, one.data[i], two.data[i], \
+ "line %d: %s.data[%d] (0x%02x) != %s.data[%d] (0x%02x)\n", \
+ __LINE__, #one, i, one.data[i], #two, i, two.data[i]); \
+ } \
+ kunit_info(test, "ok: " TEST_OP "() " name "\n"); \
+} while (0)
+
+static void memcpy_test(struct kunit *test)
+{
+#define TEST_OP "memcpy"
+ struct some_bytes control = {
+ .data = { 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ },
+ };
+ struct some_bytes zero = { };
+ struct some_bytes middle = {
+ .data = { 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ },
+ };
+ struct some_bytes three = {
+ .data = { 0x00, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x00, 0x00, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ },
+ };
+ struct some_bytes dest = { };
+ int count;
+ u8 *ptr;
+
+ /* Verify static initializers. */
+ check(control, 0x20);
+ check(zero, 0);
+ compare("static initializers", dest, zero);
+
+ /* Verify assignment. */
+ dest = control;
+ compare("direct assignment", dest, control);
+
+ /* Verify complete overwrite. */
+ memcpy(dest.data, zero.data, sizeof(dest.data));
+ compare("complete overwrite", dest, zero);
+
+ /* Verify middle overwrite. */
+ dest = control;
+ memcpy(dest.data + 12, zero.data, 7);
+ compare("middle overwrite", dest, middle);
+
+ /* Verify argument side-effects aren't repeated. */
+ dest = control;
+ ptr = dest.data;
+ count = 1;
+ memcpy(ptr++, zero.data, count++);
+ ptr += 8;
+ memcpy(ptr++, zero.data, count++);
+ compare("argument side-effects", dest, three);
+#undef TEST_OP
+}
+
+static unsigned char larger_array [2048];
+
+static void memmove_test(struct kunit *test)
+{
+#define TEST_OP "memmove"
+ struct some_bytes control = {
+ .data = { 0x99, 0x99, 0x99, 0x99, 0x99, 0x99, 0x99, 0x99,
+ 0x99, 0x99, 0x99, 0x99, 0x99, 0x99, 0x99, 0x99,
+ 0x99, 0x99, 0x99, 0x99, 0x99, 0x99, 0x99, 0x99,
+ 0x99, 0x99, 0x99, 0x99, 0x99, 0x99, 0x99, 0x99,
+ },
+ };
+ struct some_bytes zero = { };
+ struct some_bytes middle = {
+ .data = { 0x99, 0x99, 0x99, 0x99, 0x99, 0x99, 0x99, 0x99,
+ 0x99, 0x99, 0x99, 0x99, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x99, 0x99, 0x99, 0x99, 0x99,
+ 0x99, 0x99, 0x99, 0x99, 0x99, 0x99, 0x99, 0x99,
+ },
+ };
+ struct some_bytes five = {
+ .data = { 0x00, 0x00, 0x99, 0x99, 0x99, 0x99, 0x99, 0x99,
+ 0x99, 0x99, 0x00, 0x00, 0x00, 0x99, 0x99, 0x99,
+ 0x99, 0x99, 0x99, 0x99, 0x99, 0x99, 0x99, 0x99,
+ 0x99, 0x99, 0x99, 0x99, 0x99, 0x99, 0x99, 0x99,
+ },
+ };
+ struct some_bytes overlap = {
+ .data = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
+ 0x99, 0x99, 0x99, 0x99, 0x99, 0x99, 0x99, 0x99,
+ 0x99, 0x99, 0x99, 0x99, 0x99, 0x99, 0x99, 0x99,
+ },
+ };
+ struct some_bytes overlap_expected = {
+ .data = { 0x00, 0x01, 0x00, 0x01, 0x02, 0x03, 0x04, 0x07,
+ 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
+ 0x99, 0x99, 0x99, 0x99, 0x99, 0x99, 0x99, 0x99,
+ 0x99, 0x99, 0x99, 0x99, 0x99, 0x99, 0x99, 0x99,
+ },
+ };
+ struct some_bytes dest = { };
+ int count;
+ u8 *ptr;
+
+ /* Verify static initializers. */
+ check(control, 0x99);
+ check(zero, 0);
+ compare("static initializers", zero, dest);
+
+ /* Verify assignment. */
+ dest = control;
+ compare("direct assignment", dest, control);
+
+ /* Verify complete overwrite. */
+ memmove(dest.data, zero.data, sizeof(dest.data));
+ compare("complete overwrite", dest, zero);
+
+ /* Verify middle overwrite. */
+ dest = control;
+ memmove(dest.data + 12, zero.data, 7);
+ compare("middle overwrite", dest, middle);
+
+ /* Verify argument side-effects aren't repeated. */
+ dest = control;
+ ptr = dest.data;
+ count = 2;
+ memmove(ptr++, zero.data, count++);
+ ptr += 9;
+ memmove(ptr++, zero.data, count++);
+ compare("argument side-effects", dest, five);
+
+ /* Verify overlapping overwrite is correct. */
+ ptr = &overlap.data[2];
+ memmove(ptr, overlap.data, 5);
+ compare("overlapping write", overlap, overlap_expected);
+
+ /* Verify larger overlapping moves. */
+ larger_array[256] = 0xAAu;
+ /*
+ * Test a backwards overlapping memmove first. 256 and 1024 are
+ * important for i386 to use rep movsl.
+ */
+ memmove(larger_array, larger_array + 256, 1024);
+ KUNIT_ASSERT_EQ(test, larger_array[0], 0xAAu);
+ KUNIT_ASSERT_EQ(test, larger_array[256], 0x00);
+ KUNIT_ASSERT_NULL(test,
+ memchr(larger_array + 1, 0xaa, ARRAY_SIZE(larger_array) - 1));
+ /* Test a forwards overlapping memmove. */
+ larger_array[0] = 0xBBu;
+ memmove(larger_array + 256, larger_array, 1024);
+ KUNIT_ASSERT_EQ(test, larger_array[0], 0xBBu);
+ KUNIT_ASSERT_EQ(test, larger_array[256], 0xBBu);
+ KUNIT_ASSERT_NULL(test, memchr(larger_array + 1, 0xBBu, 256 - 1));
+ KUNIT_ASSERT_NULL(test,
+ memchr(larger_array + 257, 0xBBu, ARRAY_SIZE(larger_array) - 257));
+#undef TEST_OP
+}
+
+static void memset_test(struct kunit *test)
+{
+#define TEST_OP "memset"
+ struct some_bytes control = {
+ .data = { 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
+ },
+ };
+ struct some_bytes complete = {
+ .data = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ },
+ };
+ struct some_bytes middle = {
+ .data = { 0x30, 0x30, 0x30, 0x30, 0x31, 0x31, 0x31, 0x31,
+ 0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31,
+ 0x31, 0x31, 0x31, 0x31, 0x30, 0x30, 0x30, 0x30,
+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
+ },
+ };
+ struct some_bytes three = {
+ .data = { 0x60, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
+ 0x30, 0x61, 0x61, 0x30, 0x30, 0x30, 0x30, 0x30,
+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
+ },
+ };
+ struct some_bytes after = {
+ .data = { 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x72,
+ 0x72, 0x72, 0x72, 0x72, 0x72, 0x72, 0x72, 0x72,
+ 0x72, 0x72, 0x72, 0x72, 0x72, 0x72, 0x72, 0x72,
+ 0x72, 0x72, 0x72, 0x72, 0x72, 0x72, 0x72, 0x72,
+ },
+ };
+ struct some_bytes startat = {
+ .data = { 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
+ 0x79, 0x79, 0x79, 0x79, 0x79, 0x79, 0x79, 0x79,
+ 0x79, 0x79, 0x79, 0x79, 0x79, 0x79, 0x79, 0x79,
+ 0x79, 0x79, 0x79, 0x79, 0x79, 0x79, 0x79, 0x79,
+ },
+ };
+ struct some_bytes dest = { };
+ int count, value;
+ u8 *ptr;
+
+ /* Verify static initializers. */
+ check(control, 0x30);
+ check(dest, 0);
+
+ /* Verify assignment. */
+ dest = control;
+ compare("direct assignment", dest, control);
+
+ /* Verify complete overwrite. */
+ memset(dest.data, 0xff, sizeof(dest.data));
+ compare("complete overwrite", dest, complete);
+
+ /* Verify middle overwrite. */
+ dest = control;
+ memset(dest.data + 4, 0x31, 16);
+ compare("middle overwrite", dest, middle);
+
+ /* Verify argument side-effects aren't repeated. */
+ dest = control;
+ ptr = dest.data;
+ value = 0x60;
+ count = 1;
+ memset(ptr++, value++, count++);
+ ptr += 8;
+ memset(ptr++, value++, count++);
+ compare("argument side-effects", dest, three);
+
+ /* Verify memset_after() */
+ dest = control;
+ memset_after(&dest, 0x72, three);
+ compare("memset_after()", dest, after);
+
+ /* Verify memset_startat() */
+ dest = control;
+ memset_startat(&dest, 0x79, four);
+ compare("memset_startat()", dest, startat);
+#undef TEST_OP
+}
+
+static u8 large_src[1024];
+static u8 large_dst[2048];
+static const u8 large_zero[2048];
+
+static void set_random_nonzero(struct kunit *test, u8 *byte)
+{
+ int failed_rng = 0;
+
+ while (*byte == 0) {
+ get_random_bytes(byte, 1);
+ KUNIT_ASSERT_LT_MSG(test, failed_rng++, 100,
+ "Is the RNG broken?");
+ }
+}
+
+static void init_large(struct kunit *test)
+{
+ if (!IS_ENABLED(CONFIG_MEMCPY_SLOW_KUNIT_TEST))
+ kunit_skip(test, "Slow test skipped. Enable with CONFIG_MEMCPY_SLOW_KUNIT_TEST=y");
+
+ /* Get many bit patterns. */
+ get_random_bytes(large_src, ARRAY_SIZE(large_src));
+
+ /* Make sure we have non-zero edges. */
+ set_random_nonzero(test, &large_src[0]);
+ set_random_nonzero(test, &large_src[ARRAY_SIZE(large_src) - 1]);
+
+ /* Explicitly zero the entire destination. */
+ memset(large_dst, 0, ARRAY_SIZE(large_dst));
+}
+
+/*
+ * Instead of an indirect function call for "copy" or a giant macro,
+ * use a bool to pick memcpy or memmove.
+ */
+static void copy_large_test(struct kunit *test, bool use_memmove)
+{
+ init_large(test);
+
+ /* Copy a growing number of non-overlapping bytes ... */
+ for (int bytes = 1; bytes <= ARRAY_SIZE(large_src); bytes++) {
+ /* Over a shifting destination window ... */
+ for (int offset = 0; offset < ARRAY_SIZE(large_src); offset++) {
+ int right_zero_pos = offset + bytes;
+ int right_zero_size = ARRAY_SIZE(large_dst) - right_zero_pos;
+
+ /* Copy! */
+ if (use_memmove)
+ memmove(large_dst + offset, large_src, bytes);
+ else
+ memcpy(large_dst + offset, large_src, bytes);
+
+ /* Did we touch anything before the copy area? */
+ KUNIT_ASSERT_EQ_MSG(test,
+ memcmp(large_dst, large_zero, offset), 0,
+ "with size %d at offset %d", bytes, offset);
+ /* Did we touch anything after the copy area? */
+ KUNIT_ASSERT_EQ_MSG(test,
+ memcmp(&large_dst[right_zero_pos], large_zero, right_zero_size), 0,
+ "with size %d at offset %d", bytes, offset);
+
+ /* Are we byte-for-byte exact across the copy? */
+ KUNIT_ASSERT_EQ_MSG(test,
+ memcmp(large_dst + offset, large_src, bytes), 0,
+ "with size %d at offset %d", bytes, offset);
+
+ /* Zero out what we copied for the next cycle. */
+ memset(large_dst + offset, 0, bytes);
+ }
+ /* Avoid stall warnings if this loop gets slow. */
+ cond_resched();
+ }
+}
+
+static void memcpy_large_test(struct kunit *test)
+{
+ copy_large_test(test, false);
+}
+
+static void memmove_large_test(struct kunit *test)
+{
+ copy_large_test(test, true);
+}
+
+/*
+ * On the assumption that boundary conditions are going to be the most
+ * sensitive, instead of taking a full step (inc) each iteration,
+ * take single index steps for at least the first "inc"-many indexes
+ * from the "start" and at least the last "inc"-many indexes before
+ * the "end". When in the middle, take full "inc"-wide steps. For
+ * example, calling next_step(idx, 1, 15, 3) with idx starting at 0
+ * would see the following pattern: 1 2 3 4 7 10 11 12 13 14 15.
+ */
+static int next_step(int idx, int start, int end, int inc)
+{
+ start += inc;
+ end -= inc;
+
+ if (idx < start || idx + inc > end)
+ inc = 1;
+ return idx + inc;
+}
+
+static void inner_loop(struct kunit *test, int bytes, int d_off, int s_off)
+{
+ int left_zero_pos, left_zero_size;
+ int right_zero_pos, right_zero_size;
+ int src_pos, src_orig_pos, src_size;
+ int pos;
+
+ /* Place the source in the destination buffer. */
+ memcpy(&large_dst[s_off], large_src, bytes);
+
+ /* Copy to destination offset. */
+ memmove(&large_dst[d_off], &large_dst[s_off], bytes);
+
+ /* Make sure destination entirely matches. */
+ KUNIT_ASSERT_EQ_MSG(test, memcmp(&large_dst[d_off], large_src, bytes), 0,
+ "with size %d at src offset %d and dest offset %d",
+ bytes, s_off, d_off);
+
+ /* Calculate the expected zero spans. */
+ if (s_off < d_off) {
+ left_zero_pos = 0;
+ left_zero_size = s_off;
+
+ right_zero_pos = d_off + bytes;
+ right_zero_size = ARRAY_SIZE(large_dst) - right_zero_pos;
+
+ src_pos = s_off;
+ src_orig_pos = 0;
+ src_size = d_off - s_off;
+ } else {
+ left_zero_pos = 0;
+ left_zero_size = d_off;
+
+ right_zero_pos = s_off + bytes;
+ right_zero_size = ARRAY_SIZE(large_dst) - right_zero_pos;
+
+ src_pos = d_off + bytes;
+ src_orig_pos = src_pos - s_off;
+ src_size = right_zero_pos - src_pos;
+ }
+
+ /* Check non-overlapping source is unchanged.*/
+ KUNIT_ASSERT_EQ_MSG(test,
+ memcmp(&large_dst[src_pos], &large_src[src_orig_pos], src_size), 0,
+ "with size %d at src offset %d and dest offset %d",
+ bytes, s_off, d_off);
+
+ /* Check leading buffer contents are zero. */
+ KUNIT_ASSERT_EQ_MSG(test,
+ memcmp(&large_dst[left_zero_pos], large_zero, left_zero_size), 0,
+ "with size %d at src offset %d and dest offset %d",
+ bytes, s_off, d_off);
+ /* Check trailing buffer contents are zero. */
+ KUNIT_ASSERT_EQ_MSG(test,
+ memcmp(&large_dst[right_zero_pos], large_zero, right_zero_size), 0,
+ "with size %d at src offset %d and dest offset %d",
+ bytes, s_off, d_off);
+
+ /* Zero out everything not already zeroed.*/
+ pos = left_zero_pos + left_zero_size;
+ memset(&large_dst[pos], 0, right_zero_pos - pos);
+}
+
+static void memmove_overlap_test(struct kunit *test)
+{
+ /*
+ * Running all possible offset and overlap combinations takes a
+ * very long time. Instead, only check up to 128 bytes offset
+ * into the destination buffer (which should result in crossing
+ * cachelines), with a step size of 1 through 7 to try to skip some
+ * redundancy.
+ */
+ static const int offset_max = 128; /* less than ARRAY_SIZE(large_src); */
+ static const int bytes_step = 7;
+ static const int window_step = 7;
+
+ static const int bytes_start = 1;
+ static const int bytes_end = ARRAY_SIZE(large_src) + 1;
+
+ init_large(test);
+
+ /* Copy a growing number of overlapping bytes ... */
+ for (int bytes = bytes_start; bytes < bytes_end;
+ bytes = next_step(bytes, bytes_start, bytes_end, bytes_step)) {
+
+ /* Over a shifting destination window ... */
+ for (int d_off = 0; d_off < offset_max; d_off++) {
+ int s_start = max(d_off - bytes, 0);
+ int s_end = min_t(int, d_off + bytes, ARRAY_SIZE(large_src));
+
+ /* Over a shifting source window ... */
+ for (int s_off = s_start; s_off < s_end;
+ s_off = next_step(s_off, s_start, s_end, window_step))
+ inner_loop(test, bytes, d_off, s_off);
+
+ /* Avoid stall warnings. */
+ cond_resched();
+ }
+ }
+}
+
+static void strtomem_test(struct kunit *test)
+{
+ static const char input[sizeof(unsigned long)] = "hi";
+ static const char truncate[] = "this is too long";
+ struct {
+ unsigned long canary1;
+ unsigned char output[sizeof(unsigned long)] __nonstring;
+ unsigned long canary2;
+ } wrap;
+
+ memset(&wrap, 0xFF, sizeof(wrap));
+ KUNIT_EXPECT_EQ_MSG(test, wrap.canary1, ULONG_MAX,
+ "bad initial canary value");
+ KUNIT_EXPECT_EQ_MSG(test, wrap.canary2, ULONG_MAX,
+ "bad initial canary value");
+
+ /* Check unpadded copy leaves surroundings untouched. */
+ strtomem(wrap.output, input);
+ KUNIT_EXPECT_EQ(test, wrap.canary1, ULONG_MAX);
+ KUNIT_EXPECT_EQ(test, wrap.output[0], input[0]);
+ KUNIT_EXPECT_EQ(test, wrap.output[1], input[1]);
+ for (size_t i = 2; i < sizeof(wrap.output); i++)
+ KUNIT_EXPECT_EQ(test, wrap.output[i], 0xFF);
+ KUNIT_EXPECT_EQ(test, wrap.canary2, ULONG_MAX);
+
+ /* Check truncated copy leaves surroundings untouched. */
+ memset(&wrap, 0xFF, sizeof(wrap));
+ strtomem(wrap.output, truncate);
+ KUNIT_EXPECT_EQ(test, wrap.canary1, ULONG_MAX);
+ for (size_t i = 0; i < sizeof(wrap.output); i++)
+ KUNIT_EXPECT_EQ(test, wrap.output[i], truncate[i]);
+ KUNIT_EXPECT_EQ(test, wrap.canary2, ULONG_MAX);
+
+ /* Check padded copy leaves only string padded. */
+ memset(&wrap, 0xFF, sizeof(wrap));
+ strtomem_pad(wrap.output, input, 0xAA);
+ KUNIT_EXPECT_EQ(test, wrap.canary1, ULONG_MAX);
+ KUNIT_EXPECT_EQ(test, wrap.output[0], input[0]);
+ KUNIT_EXPECT_EQ(test, wrap.output[1], input[1]);
+ for (size_t i = 2; i < sizeof(wrap.output); i++)
+ KUNIT_EXPECT_EQ(test, wrap.output[i], 0xAA);
+ KUNIT_EXPECT_EQ(test, wrap.canary2, ULONG_MAX);
+
+ /* Check truncated padded copy has no padding. */
+ memset(&wrap, 0xFF, sizeof(wrap));
+ strtomem(wrap.output, truncate);
+ KUNIT_EXPECT_EQ(test, wrap.canary1, ULONG_MAX);
+ for (size_t i = 0; i < sizeof(wrap.output); i++)
+ KUNIT_EXPECT_EQ(test, wrap.output[i], truncate[i]);
+ KUNIT_EXPECT_EQ(test, wrap.canary2, ULONG_MAX);
+}
+
+static struct kunit_case memcpy_test_cases[] = {
+ KUNIT_CASE(memset_test),
+ KUNIT_CASE(memcpy_test),
+ KUNIT_CASE_SLOW(memcpy_large_test),
+ KUNIT_CASE_SLOW(memmove_test),
+ KUNIT_CASE_SLOW(memmove_large_test),
+ KUNIT_CASE_SLOW(memmove_overlap_test),
+ KUNIT_CASE(strtomem_test),
+ {}
+};
+
+static struct kunit_suite memcpy_test_suite = {
+ .name = "memcpy",
+ .test_cases = memcpy_test_cases,
+};
+
+kunit_test_suite(memcpy_test_suite);
+
+MODULE_LICENSE("GPL");
diff --git a/lib/memregion.c b/lib/memregion.c
new file mode 100644
index 000000000000..be5cfa5a3b57
--- /dev/null
+++ b/lib/memregion.c
@@ -0,0 +1,19 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* identifiers for device / performance-differentiated memory regions */
+#include <linux/idr.h>
+#include <linux/types.h>
+#include <linux/memregion.h>
+
+static DEFINE_IDA(memregion_ids);
+
+int memregion_alloc(gfp_t gfp)
+{
+ return ida_alloc(&memregion_ids, gfp);
+}
+EXPORT_SYMBOL(memregion_alloc);
+
+void memregion_free(int id)
+{
+ ida_free(&memregion_ids, id);
+}
+EXPORT_SYMBOL(memregion_free);
diff --git a/lib/mpi/mpi-bit.c b/lib/mpi/mpi-bit.c
deleted file mode 100644
index 503537e08436..000000000000
--- a/lib/mpi/mpi-bit.c
+++ /dev/null
@@ -1,56 +0,0 @@
-/* mpi-bit.c - MPI bit level fucntions
- * Copyright (C) 1998, 1999 Free Software Foundation, Inc.
- *
- * This file is part of GnuPG.
- *
- * GnuPG is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * GnuPG is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
- */
-
-#include "mpi-internal.h"
-#include "longlong.h"
-
-#define A_LIMB_1 ((mpi_limb_t) 1)
-
-/****************
- * Sometimes we have MSL (most significant limbs) which are 0;
- * this is for some reasons not good, so this function removes them.
- */
-void mpi_normalize(MPI a)
-{
- for (; a->nlimbs && !a->d[a->nlimbs - 1]; a->nlimbs--)
- ;
-}
-
-/****************
- * Return the number of bits in A.
- */
-unsigned mpi_get_nbits(MPI a)
-{
- unsigned n;
-
- mpi_normalize(a);
-
- if (a->nlimbs) {
- mpi_limb_t alimb = a->d[a->nlimbs - 1];
- if (alimb)
- n = count_leading_zeros(alimb);
- else
- n = BITS_PER_MPI_LIMB;
- n = BITS_PER_MPI_LIMB - n + (a->nlimbs - 1) * BITS_PER_MPI_LIMB;
- } else
- n = 0;
- return n;
-}
-EXPORT_SYMBOL_GPL(mpi_get_nbits);
diff --git a/lib/mpi/mpih-div.c b/lib/mpi/mpih-div.c
deleted file mode 100644
index 913a519eb005..000000000000
--- a/lib/mpi/mpih-div.c
+++ /dev/null
@@ -1,223 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/* mpihelp-div.c - MPI helper functions
- * Copyright (C) 1994, 1996 Free Software Foundation, Inc.
- * Copyright (C) 1998, 1999 Free Software Foundation, Inc.
- *
- * This file is part of GnuPG.
- *
- * Note: This code is heavily based on the GNU MP Library.
- * Actually it's the same code with only minor changes in the
- * way the data is stored; this is to support the abstraction
- * of an optional secure memory allocation which may be used
- * to avoid revealing of sensitive data due to paging etc.
- * The GNU MP Library itself is published under the LGPL;
- * however I decided to publish this code under the plain GPL.
- */
-
-#include "mpi-internal.h"
-#include "longlong.h"
-
-#ifndef UMUL_TIME
-#define UMUL_TIME 1
-#endif
-#ifndef UDIV_TIME
-#define UDIV_TIME UMUL_TIME
-#endif
-
-/* Divide num (NP/NSIZE) by den (DP/DSIZE) and write
- * the NSIZE-DSIZE least significant quotient limbs at QP
- * and the DSIZE long remainder at NP. If QEXTRA_LIMBS is
- * non-zero, generate that many fraction bits and append them after the
- * other quotient limbs.
- * Return the most significant limb of the quotient, this is always 0 or 1.
- *
- * Preconditions:
- * 0. NSIZE >= DSIZE.
- * 1. The most significant bit of the divisor must be set.
- * 2. QP must either not overlap with the input operands at all, or
- * QP + DSIZE >= NP must hold true. (This means that it's
- * possible to put the quotient in the high part of NUM, right after the
- * remainder in NUM.
- * 3. NSIZE >= DSIZE, even if QEXTRA_LIMBS is non-zero.
- */
-
-mpi_limb_t
-mpihelp_divrem(mpi_ptr_t qp, mpi_size_t qextra_limbs,
- mpi_ptr_t np, mpi_size_t nsize, mpi_ptr_t dp, mpi_size_t dsize)
-{
- mpi_limb_t most_significant_q_limb = 0;
-
- switch (dsize) {
- case 0:
- /* We are asked to divide by zero, so go ahead and do it! (To make
- the compiler not remove this statement, return the value.) */
- /*
- * existing clients of this function have been modified
- * not to call it with dsize == 0, so this should not happen
- */
- return 1 / dsize;
-
- case 1:
- {
- mpi_size_t i;
- mpi_limb_t n1;
- mpi_limb_t d;
-
- d = dp[0];
- n1 = np[nsize - 1];
-
- if (n1 >= d) {
- n1 -= d;
- most_significant_q_limb = 1;
- }
-
- qp += qextra_limbs;
- for (i = nsize - 2; i >= 0; i--)
- udiv_qrnnd(qp[i], n1, n1, np[i], d);
- qp -= qextra_limbs;
-
- for (i = qextra_limbs - 1; i >= 0; i--)
- udiv_qrnnd(qp[i], n1, n1, 0, d);
-
- np[0] = n1;
- }
- break;
-
- case 2:
- {
- mpi_size_t i;
- mpi_limb_t n1, n0, n2;
- mpi_limb_t d1, d0;
-
- np += nsize - 2;
- d1 = dp[1];
- d0 = dp[0];
- n1 = np[1];
- n0 = np[0];
-
- if (n1 >= d1 && (n1 > d1 || n0 >= d0)) {
- sub_ddmmss(n1, n0, n1, n0, d1, d0);
- most_significant_q_limb = 1;
- }
-
- for (i = qextra_limbs + nsize - 2 - 1; i >= 0; i--) {
- mpi_limb_t q;
- mpi_limb_t r;
-
- if (i >= qextra_limbs)
- np--;
- else
- np[0] = 0;
-
- if (n1 == d1) {
- /* Q should be either 111..111 or 111..110. Need special
- * treatment of this rare case as normal division would
- * give overflow. */
- q = ~(mpi_limb_t) 0;
-
- r = n0 + d1;
- if (r < d1) { /* Carry in the addition? */
- add_ssaaaa(n1, n0, r - d0,
- np[0], 0, d0);
- qp[i] = q;
- continue;
- }
- n1 = d0 - (d0 != 0 ? 1 : 0);
- n0 = -d0;
- } else {
- udiv_qrnnd(q, r, n1, n0, d1);
- umul_ppmm(n1, n0, d0, q);
- }
-
- n2 = np[0];
-q_test:
- if (n1 > r || (n1 == r && n0 > n2)) {
- /* The estimated Q was too large. */
- q--;
- sub_ddmmss(n1, n0, n1, n0, 0, d0);
- r += d1;
- if (r >= d1) /* If not carry, test Q again. */
- goto q_test;
- }
-
- qp[i] = q;
- sub_ddmmss(n1, n0, r, n2, n1, n0);
- }
- np[1] = n1;
- np[0] = n0;
- }
- break;
-
- default:
- {
- mpi_size_t i;
- mpi_limb_t dX, d1, n0;
-
- np += nsize - dsize;
- dX = dp[dsize - 1];
- d1 = dp[dsize - 2];
- n0 = np[dsize - 1];
-
- if (n0 >= dX) {
- if (n0 > dX
- || mpihelp_cmp(np, dp, dsize - 1) >= 0) {
- mpihelp_sub_n(np, np, dp, dsize);
- n0 = np[dsize - 1];
- most_significant_q_limb = 1;
- }
- }
-
- for (i = qextra_limbs + nsize - dsize - 1; i >= 0; i--) {
- mpi_limb_t q;
- mpi_limb_t n1, n2;
- mpi_limb_t cy_limb;
-
- if (i >= qextra_limbs) {
- np--;
- n2 = np[dsize];
- } else {
- n2 = np[dsize - 1];
- MPN_COPY_DECR(np + 1, np, dsize - 1);
- np[0] = 0;
- }
-
- if (n0 == dX) {
- /* This might over-estimate q, but it's probably not worth
- * the extra code here to find out. */
- q = ~(mpi_limb_t) 0;
- } else {
- mpi_limb_t r;
-
- udiv_qrnnd(q, r, n0, np[dsize - 1], dX);
- umul_ppmm(n1, n0, d1, q);
-
- while (n1 > r
- || (n1 == r
- && n0 > np[dsize - 2])) {
- q--;
- r += dX;
- if (r < dX) /* I.e. "carry in previous addition?" */
- break;
- n1 -= n0 < d1;
- n0 -= d1;
- }
- }
-
- /* Possible optimization: We already have (q * n0) and (1 * n1)
- * after the calculation of q. Taking advantage of that, we
- * could make this loop make two iterations less. */
- cy_limb = mpihelp_submul_1(np, dp, dsize, q);
-
- if (n2 != cy_limb) {
- mpihelp_add_n(np, np, dp, dsize);
- q--;
- }
-
- qp[i] = q;
- n0 = np[dsize - 1];
- }
- }
- }
-
- return most_significant_q_limb;
-}
diff --git a/lib/mpi/mpiutil.c b/lib/mpi/mpiutil.c
deleted file mode 100644
index 20ed0f766787..000000000000
--- a/lib/mpi/mpiutil.c
+++ /dev/null
@@ -1,126 +0,0 @@
-/* mpiutil.ac - Utility functions for MPI
- * Copyright (C) 1998, 1999 Free Software Foundation, Inc.
- *
- * This file is part of GnuPG.
- *
- * GnuPG is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * GnuPG is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
- */
-
-#include "mpi-internal.h"
-
-/****************
- * Note: It was a bad idea to use the number of limbs to allocate
- * because on a alpha the limbs are large but we normally need
- * integers of n bits - So we should chnage this to bits (or bytes).
- *
- * But mpi_alloc is used in a lot of places :-)
- */
-MPI mpi_alloc(unsigned nlimbs)
-{
- MPI a;
-
- a = kmalloc(sizeof *a, GFP_KERNEL);
- if (!a)
- return a;
-
- if (nlimbs) {
- a->d = mpi_alloc_limb_space(nlimbs);
- if (!a->d) {
- kfree(a);
- return NULL;
- }
- } else {
- a->d = NULL;
- }
-
- a->alloced = nlimbs;
- a->nlimbs = 0;
- a->sign = 0;
- a->flags = 0;
- a->nbits = 0;
- return a;
-}
-EXPORT_SYMBOL_GPL(mpi_alloc);
-
-mpi_ptr_t mpi_alloc_limb_space(unsigned nlimbs)
-{
- size_t len = nlimbs * sizeof(mpi_limb_t);
-
- if (!len)
- return NULL;
-
- return kmalloc(len, GFP_KERNEL);
-}
-
-void mpi_free_limb_space(mpi_ptr_t a)
-{
- if (!a)
- return;
-
- kzfree(a);
-}
-
-void mpi_assign_limb_space(MPI a, mpi_ptr_t ap, unsigned nlimbs)
-{
- mpi_free_limb_space(a->d);
- a->d = ap;
- a->alloced = nlimbs;
-}
-
-/****************
- * Resize the array of A to NLIMBS. the additional space is cleared
- * (set to 0) [done by m_realloc()]
- */
-int mpi_resize(MPI a, unsigned nlimbs)
-{
- void *p;
-
- if (nlimbs <= a->alloced)
- return 0; /* no need to do it */
-
- if (a->d) {
- p = kmalloc_array(nlimbs, sizeof(mpi_limb_t), GFP_KERNEL);
- if (!p)
- return -ENOMEM;
- memcpy(p, a->d, a->alloced * sizeof(mpi_limb_t));
- kzfree(a->d);
- a->d = p;
- } else {
- a->d = kcalloc(nlimbs, sizeof(mpi_limb_t), GFP_KERNEL);
- if (!a->d)
- return -ENOMEM;
- }
- a->alloced = nlimbs;
- return 0;
-}
-
-void mpi_free(MPI a)
-{
- if (!a)
- return;
-
- if (a->flags & 4)
- kzfree(a->d);
- else
- mpi_free_limb_space(a->d);
-
- if (a->flags & ~7)
- pr_info("invalid flag value in mpi\n");
- kfree(a);
-}
-EXPORT_SYMBOL_GPL(mpi_free);
-
-MODULE_DESCRIPTION("Multiprecision maths library");
-MODULE_LICENSE("GPL");
diff --git a/lib/net_utils.c b/lib/net_utils.c
index af525353395d..42bb0473fb22 100644
--- a/lib/net_utils.c
+++ b/lib/net_utils.c
@@ -2,14 +2,16 @@
#include <linux/string.h>
#include <linux/if_ether.h>
#include <linux/ctype.h>
-#include <linux/kernel.h>
+#include <linux/export.h>
+#include <linux/hex.h>
bool mac_pton(const char *s, u8 *mac)
{
+ size_t maxlen = 3 * ETH_ALEN - 1;
int i;
/* XX:XX:XX:XX:XX:XX */
- if (strlen(s) < 3 * ETH_ALEN - 1)
+ if (strnlen(s, maxlen) < maxlen)
return false;
/* Don't dirty result unless string is valid MAC. */
diff --git a/lib/nlattr.c b/lib/nlattr.c
index cace9b307781..be9c576b6e2d 100644
--- a/lib/nlattr.c
+++ b/lib/nlattr.c
@@ -10,6 +10,7 @@
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/jiffies.h>
+#include <linux/nospec.h>
#include <linux/skbuff.h>
#include <linux/string.h>
#include <linux/types.h>
@@ -29,6 +30,8 @@ static const u8 nla_attr_len[NLA_TYPE_MAX+1] = {
[NLA_S16] = sizeof(s16),
[NLA_S32] = sizeof(s32),
[NLA_S64] = sizeof(s64),
+ [NLA_BE16] = sizeof(__be16),
+ [NLA_BE32] = sizeof(__be32),
};
static const u8 nla_attr_minlen[NLA_TYPE_MAX+1] = {
@@ -42,10 +45,26 @@ static const u8 nla_attr_minlen[NLA_TYPE_MAX+1] = {
[NLA_S16] = sizeof(s16),
[NLA_S32] = sizeof(s32),
[NLA_S64] = sizeof(s64),
+ [NLA_BE16] = sizeof(__be16),
+ [NLA_BE32] = sizeof(__be32),
};
+/*
+ * Nested policies might refer back to the original
+ * policy in some cases, and userspace could try to
+ * abuse that and recurse by nesting in the right
+ * ways. Limit recursion to avoid this problem.
+ */
+#define MAX_POLICY_RECURSION_DEPTH 10
+
+static int __nla_validate_parse(const struct nlattr *head, int len, int maxtype,
+ const struct nla_policy *policy,
+ unsigned int validate,
+ struct netlink_ext_ack *extack,
+ struct nlattr **tb, unsigned int depth);
+
static int validate_nla_bitfield32(const struct nlattr *nla,
- const u32 *valid_flags_mask)
+ const u32 valid_flags_mask)
{
const struct nla_bitfield32 *bf = nla_data(nla);
@@ -53,11 +72,11 @@ static int validate_nla_bitfield32(const struct nlattr *nla,
return -EINVAL;
/*disallow invalid bit selector */
- if (bf->selector & ~*valid_flags_mask)
+ if (bf->selector & ~valid_flags_mask)
return -EINVAL;
/*disallow invalid bit values */
- if (bf->value & ~*valid_flags_mask)
+ if (bf->value & ~valid_flags_mask)
return -EINVAL;
/*disallow valid bit values that are not selected*/
@@ -70,7 +89,7 @@ static int validate_nla_bitfield32(const struct nlattr *nla,
static int nla_validate_array(const struct nlattr *head, int len, int maxtype,
const struct nla_policy *policy,
struct netlink_ext_ack *extack,
- unsigned int validate)
+ unsigned int validate, unsigned int depth)
{
const struct nlattr *entry;
int rem;
@@ -82,13 +101,14 @@ static int nla_validate_array(const struct nlattr *head, int len, int maxtype,
continue;
if (nla_len(entry) < NLA_HDRLEN) {
- NL_SET_ERR_MSG_ATTR(extack, entry,
- "Array element too short");
+ NL_SET_ERR_MSG_ATTR_POL(extack, entry, policy,
+ "Array element too short");
return -ERANGE;
}
- ret = __nla_validate(nla_data(entry), nla_len(entry),
- maxtype, policy, validate, extack);
+ ret = __nla_validate_parse(nla_data(entry), nla_len(entry),
+ maxtype, policy, validate, extack,
+ NULL, depth + 1);
if (ret < 0)
return ret;
}
@@ -96,17 +116,64 @@ static int nla_validate_array(const struct nlattr *head, int len, int maxtype,
return 0;
}
-static int nla_validate_int_range(const struct nla_policy *pt,
- const struct nlattr *nla,
- struct netlink_ext_ack *extack)
+void nla_get_range_unsigned(const struct nla_policy *pt,
+ struct netlink_range_validation *range)
{
- bool validate_min, validate_max;
- s64 value;
+ WARN_ON_ONCE(pt->validation_type != NLA_VALIDATE_RANGE_PTR &&
+ (pt->min < 0 || pt->max < 0));
+
+ range->min = 0;
+
+ switch (pt->type) {
+ case NLA_U8:
+ range->max = U8_MAX;
+ break;
+ case NLA_U16:
+ case NLA_BE16:
+ case NLA_BINARY:
+ range->max = U16_MAX;
+ break;
+ case NLA_U32:
+ case NLA_BE32:
+ range->max = U32_MAX;
+ break;
+ case NLA_U64:
+ case NLA_UINT:
+ case NLA_MSECS:
+ range->max = U64_MAX;
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ return;
+ }
+
+ switch (pt->validation_type) {
+ case NLA_VALIDATE_RANGE:
+ case NLA_VALIDATE_RANGE_WARN_TOO_LONG:
+ range->min = pt->min;
+ range->max = pt->max;
+ break;
+ case NLA_VALIDATE_RANGE_PTR:
+ *range = *pt->range;
+ break;
+ case NLA_VALIDATE_MIN:
+ range->min = pt->min;
+ break;
+ case NLA_VALIDATE_MAX:
+ range->max = pt->max;
+ break;
+ default:
+ break;
+ }
+}
- validate_min = pt->validation_type == NLA_VALIDATE_RANGE ||
- pt->validation_type == NLA_VALIDATE_MIN;
- validate_max = pt->validation_type == NLA_VALIDATE_RANGE ||
- pt->validation_type == NLA_VALIDATE_MAX;
+static int nla_validate_range_unsigned(const struct nla_policy *pt,
+ const struct nlattr *nla,
+ struct netlink_ext_ack *extack,
+ unsigned int validate)
+{
+ struct netlink_range_validation range;
+ u64 value;
switch (pt->type) {
case NLA_U8:
@@ -118,6 +185,113 @@ static int nla_validate_int_range(const struct nla_policy *pt,
case NLA_U32:
value = nla_get_u32(nla);
break;
+ case NLA_U64:
+ value = nla_get_u64(nla);
+ break;
+ case NLA_UINT:
+ value = nla_get_uint(nla);
+ break;
+ case NLA_MSECS:
+ value = nla_get_u64(nla);
+ break;
+ case NLA_BINARY:
+ value = nla_len(nla);
+ break;
+ case NLA_BE16:
+ value = ntohs(nla_get_be16(nla));
+ break;
+ case NLA_BE32:
+ value = ntohl(nla_get_be32(nla));
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ nla_get_range_unsigned(pt, &range);
+
+ if (pt->validation_type == NLA_VALIDATE_RANGE_WARN_TOO_LONG &&
+ pt->type == NLA_BINARY && value > range.max) {
+ pr_warn_ratelimited("netlink: '%s': attribute type %d has an invalid length.\n",
+ current->comm, pt->type);
+ if (validate & NL_VALIDATE_STRICT_ATTRS) {
+ NL_SET_ERR_MSG_ATTR_POL(extack, nla, pt,
+ "invalid attribute length");
+ return -EINVAL;
+ }
+
+ /* this assumes min <= max (don't validate against min) */
+ return 0;
+ }
+
+ if (value < range.min || value > range.max) {
+ bool binary = pt->type == NLA_BINARY;
+
+ if (binary)
+ NL_SET_ERR_MSG_ATTR_POL(extack, nla, pt,
+ "binary attribute size out of range");
+ else
+ NL_SET_ERR_MSG_ATTR_POL(extack, nla, pt,
+ "integer out of range");
+
+ return -ERANGE;
+ }
+
+ return 0;
+}
+
+void nla_get_range_signed(const struct nla_policy *pt,
+ struct netlink_range_validation_signed *range)
+{
+ switch (pt->type) {
+ case NLA_S8:
+ range->min = S8_MIN;
+ range->max = S8_MAX;
+ break;
+ case NLA_S16:
+ range->min = S16_MIN;
+ range->max = S16_MAX;
+ break;
+ case NLA_S32:
+ range->min = S32_MIN;
+ range->max = S32_MAX;
+ break;
+ case NLA_S64:
+ case NLA_SINT:
+ range->min = S64_MIN;
+ range->max = S64_MAX;
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ return;
+ }
+
+ switch (pt->validation_type) {
+ case NLA_VALIDATE_RANGE:
+ range->min = pt->min;
+ range->max = pt->max;
+ break;
+ case NLA_VALIDATE_RANGE_PTR:
+ *range = *pt->range_signed;
+ break;
+ case NLA_VALIDATE_MIN:
+ range->min = pt->min;
+ break;
+ case NLA_VALIDATE_MAX:
+ range->max = pt->max;
+ break;
+ default:
+ break;
+ }
+}
+
+static int nla_validate_int_range_signed(const struct nla_policy *pt,
+ const struct nlattr *nla,
+ struct netlink_ext_ack *extack)
+{
+ struct netlink_range_validation_signed range;
+ s64 value;
+
+ switch (pt->type) {
case NLA_S8:
value = nla_get_s8(nla);
break;
@@ -130,25 +304,87 @@ static int nla_validate_int_range(const struct nla_policy *pt,
case NLA_S64:
value = nla_get_s64(nla);
break;
+ case NLA_SINT:
+ value = nla_get_sint(nla);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ nla_get_range_signed(pt, &range);
+
+ if (value < range.min || value > range.max) {
+ NL_SET_ERR_MSG_ATTR_POL(extack, nla, pt,
+ "integer out of range");
+ return -ERANGE;
+ }
+
+ return 0;
+}
+
+static int nla_validate_int_range(const struct nla_policy *pt,
+ const struct nlattr *nla,
+ struct netlink_ext_ack *extack,
+ unsigned int validate)
+{
+ switch (pt->type) {
+ case NLA_U8:
+ case NLA_U16:
+ case NLA_U32:
case NLA_U64:
- /* treat this one specially, since it may not fit into s64 */
- if ((validate_min && nla_get_u64(nla) < pt->min) ||
- (validate_max && nla_get_u64(nla) > pt->max)) {
- NL_SET_ERR_MSG_ATTR(extack, nla,
- "integer out of range");
- return -ERANGE;
- }
- return 0;
+ case NLA_UINT:
+ case NLA_MSECS:
+ case NLA_BINARY:
+ case NLA_BE16:
+ case NLA_BE32:
+ return nla_validate_range_unsigned(pt, nla, extack, validate);
+ case NLA_S8:
+ case NLA_S16:
+ case NLA_S32:
+ case NLA_S64:
+ case NLA_SINT:
+ return nla_validate_int_range_signed(pt, nla, extack);
default:
WARN_ON(1);
return -EINVAL;
}
+}
- if ((validate_min && value < pt->min) ||
- (validate_max && value > pt->max)) {
- NL_SET_ERR_MSG_ATTR(extack, nla,
- "integer out of range");
- return -ERANGE;
+static int nla_validate_mask(const struct nla_policy *pt,
+ const struct nlattr *nla,
+ struct netlink_ext_ack *extack)
+{
+ u64 value;
+
+ switch (pt->type) {
+ case NLA_U8:
+ value = nla_get_u8(nla);
+ break;
+ case NLA_U16:
+ value = nla_get_u16(nla);
+ break;
+ case NLA_U32:
+ value = nla_get_u32(nla);
+ break;
+ case NLA_U64:
+ value = nla_get_u64(nla);
+ break;
+ case NLA_UINT:
+ value = nla_get_uint(nla);
+ break;
+ case NLA_BE16:
+ value = ntohs(nla_get_be16(nla));
+ break;
+ case NLA_BE32:
+ value = ntohl(nla_get_be32(nla));
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (value & ~(u64)pt->mask) {
+ NL_SET_ERR_MSG_ATTR(extack, nla, "reserved bit set");
+ return -EINVAL;
}
return 0;
@@ -156,7 +392,7 @@ static int nla_validate_int_range(const struct nla_policy *pt,
static int validate_nla(const struct nlattr *nla, int maxtype,
const struct nla_policy *policy, unsigned int validate,
- struct netlink_ext_ack *extack)
+ struct netlink_ext_ack *extack, unsigned int depth)
{
u16 strict_start_type = policy[0].strict_start_type;
const struct nla_policy *pt;
@@ -169,17 +405,17 @@ static int validate_nla(const struct nlattr *nla, int maxtype,
if (type <= 0 || type > maxtype)
return 0;
+ type = array_index_nospec(type, maxtype + 1);
pt = &policy[type];
BUG_ON(pt->type > NLA_TYPE_MAX);
- if ((nla_attr_len[pt->type] && attrlen != nla_attr_len[pt->type]) ||
- (pt->type == NLA_EXACT_LEN_WARN && attrlen != pt->len)) {
+ if (nla_attr_len[pt->type] && attrlen != nla_attr_len[pt->type]) {
pr_warn_ratelimited("netlink: '%s': attribute type %d has an invalid length.\n",
current->comm, type);
if (validate & NL_VALIDATE_STRICT_ATTRS) {
- NL_SET_ERR_MSG_ATTR(extack, nla,
- "invalid attribute length");
+ NL_SET_ERR_MSG_ATTR_POL(extack, nla, pt,
+ "invalid attribute length");
return -EINVAL;
}
}
@@ -187,28 +423,23 @@ static int validate_nla(const struct nlattr *nla, int maxtype,
if (validate & NL_VALIDATE_NESTED) {
if ((pt->type == NLA_NESTED || pt->type == NLA_NESTED_ARRAY) &&
!(nla->nla_type & NLA_F_NESTED)) {
- NL_SET_ERR_MSG_ATTR(extack, nla,
- "NLA_F_NESTED is missing");
+ NL_SET_ERR_MSG_ATTR_POL(extack, nla, pt,
+ "NLA_F_NESTED is missing");
return -EINVAL;
}
if (pt->type != NLA_NESTED && pt->type != NLA_NESTED_ARRAY &&
pt->type != NLA_UNSPEC && (nla->nla_type & NLA_F_NESTED)) {
- NL_SET_ERR_MSG_ATTR(extack, nla,
- "NLA_F_NESTED not expected");
+ NL_SET_ERR_MSG_ATTR_POL(extack, nla, pt,
+ "NLA_F_NESTED not expected");
return -EINVAL;
}
}
switch (pt->type) {
- case NLA_EXACT_LEN:
- if (attrlen != pt->len)
- goto out_err;
- break;
-
case NLA_REJECT:
- if (extack && pt->validation_data) {
+ if (extack && pt->reject_message) {
NL_SET_BAD_ATTR(extack, nla);
- extack->_msg = pt->validation_data;
+ extack->_msg = pt->reject_message;
return -EINVAL;
}
err = -EINVAL;
@@ -219,11 +450,20 @@ static int validate_nla(const struct nlattr *nla, int maxtype,
goto out_err;
break;
+ case NLA_SINT:
+ case NLA_UINT:
+ if (attrlen != sizeof(u32) && attrlen != sizeof(u64)) {
+ NL_SET_ERR_MSG_ATTR_POL(extack, nla, pt,
+ "invalid attribute length");
+ return -EINVAL;
+ }
+ break;
+
case NLA_BITFIELD32:
if (attrlen != sizeof(struct nla_bitfield32))
goto out_err;
- err = validate_nla_bitfield32(nla, pt->validation_data);
+ err = validate_nla_bitfield32(nla, pt->bitfield32_valid);
if (err)
goto out_err;
break;
@@ -238,7 +478,7 @@ static int validate_nla(const struct nlattr *nla, int maxtype,
err = -EINVAL;
goto out_err;
}
- /* fall through */
+ fallthrough;
case NLA_STRING:
if (attrlen < 1)
@@ -268,10 +508,11 @@ static int validate_nla(const struct nlattr *nla, int maxtype,
break;
if (attrlen < NLA_HDRLEN)
goto out_err;
- if (pt->validation_data) {
- err = __nla_validate(nla_data(nla), nla_len(nla), pt->len,
- pt->validation_data, validate,
- extack);
+ if (pt->nested_policy) {
+ err = __nla_validate_parse(nla_data(nla), nla_len(nla),
+ pt->len, pt->nested_policy,
+ validate, extack, NULL,
+ depth + 1);
if (err < 0) {
/*
* return directly to preserve the inner
@@ -289,12 +530,12 @@ static int validate_nla(const struct nlattr *nla, int maxtype,
break;
if (attrlen < NLA_HDRLEN)
goto out_err;
- if (pt->validation_data) {
+ if (pt->nested_policy) {
int err;
err = nla_validate_array(nla_data(nla), nla_len(nla),
- pt->len, pt->validation_data,
- extack, validate);
+ pt->len, pt->nested_policy,
+ extack, validate, depth);
if (err < 0) {
/*
* return directly to preserve the inner
@@ -311,8 +552,6 @@ static int validate_nla(const struct nlattr *nla, int maxtype,
"Unsupported attribute");
return -EINVAL;
}
- /* fall through */
- case NLA_MIN_LEN:
if (attrlen < pt->len)
goto out_err;
break;
@@ -332,10 +571,17 @@ static int validate_nla(const struct nlattr *nla, int maxtype,
case NLA_VALIDATE_NONE:
/* nothing to do */
break;
+ case NLA_VALIDATE_RANGE_PTR:
case NLA_VALIDATE_RANGE:
+ case NLA_VALIDATE_RANGE_WARN_TOO_LONG:
case NLA_VALIDATE_MIN:
case NLA_VALIDATE_MAX:
- err = nla_validate_int_range(pt, nla, extack);
+ err = nla_validate_int_range(pt, nla, extack, validate);
+ if (err)
+ return err;
+ break;
+ case NLA_VALIDATE_MASK:
+ err = nla_validate_mask(pt, nla, extack);
if (err)
return err;
break;
@@ -350,7 +596,8 @@ static int validate_nla(const struct nlattr *nla, int maxtype,
return 0;
out_err:
- NL_SET_ERR_MSG_ATTR(extack, nla, "Attribute failed policy validation");
+ NL_SET_ERR_MSG_ATTR_POL(extack, nla, pt,
+ "Attribute failed policy validation");
return err;
}
@@ -358,11 +605,17 @@ static int __nla_validate_parse(const struct nlattr *head, int len, int maxtype,
const struct nla_policy *policy,
unsigned int validate,
struct netlink_ext_ack *extack,
- struct nlattr **tb)
+ struct nlattr **tb, unsigned int depth)
{
const struct nlattr *nla;
int rem;
+ if (depth >= MAX_POLICY_RECURSION_DEPTH) {
+ NL_SET_ERR_MSG(extack,
+ "allowed policy recursion depth exceeded");
+ return -EINVAL;
+ }
+
if (tb)
memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1));
@@ -377,9 +630,10 @@ static int __nla_validate_parse(const struct nlattr *head, int len, int maxtype,
}
continue;
}
+ type = array_index_nospec(type, maxtype + 1);
if (policy) {
int err = validate_nla(nla, maxtype, policy,
- validate, extack);
+ validate, extack, depth);
if (err < 0)
return err;
@@ -412,7 +666,7 @@ static int __nla_validate_parse(const struct nlattr *head, int len, int maxtype,
* Validates all attributes in the specified attribute stream against the
* specified policy. Validation depends on the validate flags passed, see
* &enum netlink_validation for more details on that.
- * See documenation of struct nla_policy for more details.
+ * See documentation of struct nla_policy for more details.
*
* Returns 0 on success or a negative error code.
*/
@@ -421,13 +675,13 @@ int __nla_validate(const struct nlattr *head, int len, int maxtype,
struct netlink_ext_ack *extack)
{
return __nla_validate_parse(head, len, maxtype, policy, validate,
- extack, NULL);
+ extack, NULL, 0);
}
EXPORT_SYMBOL(__nla_validate);
/**
- * nla_policy_len - Determin the max. length of a policy
- * @policy: policy to use
+ * nla_policy_len - Determine the max. length of a policy
+ * @p: policy to use
* @n: number of policies
*
* Determines the max. length of the policy. It is currently used
@@ -476,7 +730,7 @@ int __nla_parse(struct nlattr **tb, int maxtype,
struct netlink_ext_ack *extack)
{
return __nla_validate_parse(head, len, maxtype, policy, validate,
- extack, tb);
+ extack, tb, 0);
}
EXPORT_SYMBOL(__nla_parse);
@@ -502,35 +756,47 @@ struct nlattr *nla_find(const struct nlattr *head, int len, int attrtype)
EXPORT_SYMBOL(nla_find);
/**
- * nla_strlcpy - Copy string attribute payload into a sized buffer
- * @dst: where to copy the string to
- * @nla: attribute to copy the string from
- * @dstsize: size of destination buffer
+ * nla_strscpy - Copy string attribute payload into a sized buffer
+ * @dst: Where to copy the string to.
+ * @nla: Attribute to copy the string from.
+ * @dstsize: Size of destination buffer.
*
* Copies at most dstsize - 1 bytes into the destination buffer.
- * The result is always a valid NUL-terminated string. Unlike
- * strlcpy the destination buffer is always padded out.
+ * Unlike strscpy() the destination buffer is always padded out.
*
- * Returns the length of the source buffer.
+ * Return:
+ * * srclen - Returns @nla length (not including the trailing %NUL).
+ * * -E2BIG - If @dstsize is 0 or greater than U16_MAX or @nla length greater
+ * than @dstsize.
*/
-size_t nla_strlcpy(char *dst, const struct nlattr *nla, size_t dstsize)
+ssize_t nla_strscpy(char *dst, const struct nlattr *nla, size_t dstsize)
{
size_t srclen = nla_len(nla);
char *src = nla_data(nla);
+ ssize_t ret;
+ size_t len;
+
+ if (dstsize == 0 || WARN_ON_ONCE(dstsize > U16_MAX))
+ return -E2BIG;
if (srclen > 0 && src[srclen - 1] == '\0')
srclen--;
- if (dstsize > 0) {
- size_t len = (srclen >= dstsize) ? dstsize - 1 : srclen;
-
- memset(dst, 0, dstsize);
- memcpy(dst, src, len);
+ if (srclen >= dstsize) {
+ len = dstsize - 1;
+ ret = -E2BIG;
+ } else {
+ len = srclen;
+ ret = len;
}
- return srclen;
+ memcpy(dst, src, len);
+ /* Zero pad end of dst. */
+ memset(dst + len, 0, dstsize - len);
+
+ return ret;
}
-EXPORT_SYMBOL(nla_strlcpy);
+EXPORT_SYMBOL(nla_strscpy);
/**
* nla_strdup - Copy string attribute payload into a newly allocated buffer
@@ -609,7 +875,7 @@ int nla_strcmp(const struct nlattr *nla, const char *str)
int attrlen = nla_len(nla);
int d;
- if (attrlen > 0 && buf[attrlen - 1] == '\0')
+ while (attrlen > 0 && buf[attrlen - 1] == '\0')
attrlen--;
d = attrlen - len;
@@ -664,8 +930,7 @@ EXPORT_SYMBOL(__nla_reserve);
struct nlattr *__nla_reserve_64bit(struct sk_buff *skb, int attrtype,
int attrlen, int padattr)
{
- if (nla_need_padding_for_64bit(skb))
- nla_align_64bit(skb, padattr);
+ nla_align_64bit(skb, padattr);
return __nla_reserve(skb, attrtype, attrlen);
}
diff --git a/lib/nmi_backtrace.c b/lib/nmi_backtrace.c
index 15ca78e1c7d4..33c154264bfe 100644
--- a/lib/nmi_backtrace.c
+++ b/lib/nmi_backtrace.c
@@ -34,7 +34,7 @@ static unsigned long backtrace_flag;
* they are passed being updated as a side effect of this call.
*/
void nmi_trigger_cpumask_backtrace(const cpumask_t *mask,
- bool exclude_self,
+ int exclude_cpu,
void (*raise)(cpumask_t *mask))
{
int i, this_cpu = get_cpu();
@@ -49,8 +49,8 @@ void nmi_trigger_cpumask_backtrace(const cpumask_t *mask,
}
cpumask_copy(to_cpumask(backtrace_mask), mask);
- if (exclude_self)
- cpumask_clear_cpu(this_cpu, to_cpumask(backtrace_mask));
+ if (exclude_cpu != -1)
+ cpumask_clear_cpu(exclude_cpu, to_cpumask(backtrace_mask));
/*
* Don't try to send an NMI to this cpu; it may work on some
@@ -64,6 +64,7 @@ void nmi_trigger_cpumask_backtrace(const cpumask_t *mask,
if (!cpumask_empty(to_cpumask(backtrace_mask))) {
pr_info("Sending NMI from CPU %d to CPUs %*pbl:\n",
this_cpu, nr_cpumask_bits, to_cpumask(backtrace_mask));
+ nmi_backtrace_stall_snap(to_cpumask(backtrace_mask));
raise(to_cpumask(backtrace_mask));
}
@@ -74,23 +75,34 @@ void nmi_trigger_cpumask_backtrace(const cpumask_t *mask,
mdelay(1);
touch_softlockup_watchdog();
}
+ nmi_backtrace_stall_check(to_cpumask(backtrace_mask));
/*
* Force flush any remote buffers that might be stuck in IRQ context
* and therefore could not run their irq_work.
*/
- printk_safe_flush();
+ printk_trigger_flush();
clear_bit_unlock(0, &backtrace_flag);
put_cpu();
}
+// Dump stacks even for idle CPUs.
+static bool backtrace_idle;
+module_param(backtrace_idle, bool, 0644);
+
bool nmi_cpu_backtrace(struct pt_regs *regs)
{
int cpu = smp_processor_id();
+ unsigned long flags;
if (cpumask_test_cpu(cpu, to_cpumask(backtrace_mask))) {
- if (regs && cpu_in_idle(instruction_pointer(regs))) {
+ /*
+ * Allow nested NMI backtraces while serializing
+ * against other CPUs.
+ */
+ printk_cpu_sync_get_irqsave(flags);
+ if (!READ_ONCE(backtrace_idle) && regs && cpu_in_idle(instruction_pointer(regs))) {
pr_warn("NMI backtrace for cpu %d skipped: idling at %pS\n",
cpu, (void *)instruction_pointer(regs));
} else {
@@ -100,6 +112,7 @@ bool nmi_cpu_backtrace(struct pt_regs *regs)
else
dump_stack();
}
+ printk_cpu_sync_put_irqrestore(flags);
cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask));
return true;
}
diff --git a/lib/nodemask.c b/lib/nodemask.c
deleted file mode 100644
index 3aa454c54c0d..000000000000
--- a/lib/nodemask.c
+++ /dev/null
@@ -1,31 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <linux/nodemask.h>
-#include <linux/module.h>
-#include <linux/random.h>
-
-int __next_node_in(int node, const nodemask_t *srcp)
-{
- int ret = __next_node(node, srcp);
-
- if (ret == MAX_NUMNODES)
- ret = __first_node(srcp);
- return ret;
-}
-EXPORT_SYMBOL(__next_node_in);
-
-#ifdef CONFIG_NUMA
-/*
- * Return the bit number of a random bit set in the nodemask.
- * (returns NUMA_NO_NODE if nodemask is empty)
- */
-int node_random(const nodemask_t *maskp)
-{
- int w, bit = NUMA_NO_NODE;
-
- w = nodes_weight(*maskp);
- if (w)
- bit = bitmap_ord_to_pos(maskp->bits,
- get_random_int() % w, MAX_NUMNODES);
- return bit;
-}
-#endif
diff --git a/lib/notifier-error-inject.c b/lib/notifier-error-inject.c
index 3d2ba7cf83f4..954c3412d22d 100644
--- a/lib/notifier-error-inject.c
+++ b/lib/notifier-error-inject.c
@@ -15,7 +15,7 @@ static int debugfs_errno_get(void *data, u64 *val)
return 0;
}
-DEFINE_SIMPLE_ATTRIBUTE(fops_errno, debugfs_errno_get, debugfs_errno_set,
+DEFINE_SIMPLE_ATTRIBUTE_SIGNED(fops_errno, debugfs_errno_get, debugfs_errno_set,
"%lld\n");
static struct dentry *debugfs_create_errno(const char *name, umode_t mode,
@@ -59,33 +59,22 @@ struct dentry *notifier_err_inject_init(const char *name, struct dentry *parent,
err_inject->nb.priority = priority;
dir = debugfs_create_dir(name, parent);
- if (!dir)
- return ERR_PTR(-ENOMEM);
actions_dir = debugfs_create_dir("actions", dir);
- if (!actions_dir)
- goto fail;
for (action = err_inject->actions; action->name; action++) {
struct dentry *action_dir;
action_dir = debugfs_create_dir(action->name, actions_dir);
- if (!action_dir)
- goto fail;
/*
* Create debugfs r/w file containing action->error. If
* notifier call chain is called with action->val, it will
* fail with the error code
*/
- if (!debugfs_create_errno("error", mode, action_dir,
- &action->error))
- goto fail;
+ debugfs_create_errno("error", mode, action_dir, &action->error);
}
return dir;
-fail:
- debugfs_remove_recursive(dir);
- return ERR_PTR(-ENOMEM);
}
EXPORT_SYMBOL_GPL(notifier_err_inject_init);
@@ -94,9 +83,6 @@ static int __init err_inject_init(void)
notifier_err_inject_dir =
debugfs_create_dir("notifier-error-inject", NULL);
- if (!notifier_err_inject_dir)
- return -ENOMEM;
-
return 0;
}
diff --git a/lib/objagg.c b/lib/objagg.c
index 576be22e86de..1e248629ed64 100644
--- a/lib/objagg.c
+++ b/lib/objagg.c
@@ -28,7 +28,7 @@ struct objagg_hints_node {
struct objagg_hints_node *parent;
unsigned int root_id;
struct objagg_obj_stats_info stats_info;
- unsigned long obj[0];
+ unsigned long obj[];
};
static struct objagg_hints_node *
@@ -66,7 +66,7 @@ struct objagg_obj {
* including nested objects
*/
struct objagg_obj_stats stats;
- unsigned long obj[0];
+ unsigned long obj[];
};
static unsigned int objagg_obj_ref_inc(struct objagg_obj *objagg_obj)
@@ -605,12 +605,10 @@ const struct objagg_stats *objagg_stats_get(struct objagg *objagg)
{
struct objagg_stats *objagg_stats;
struct objagg_obj *objagg_obj;
- size_t alloc_size;
int i;
- alloc_size = sizeof(*objagg_stats) +
- sizeof(objagg_stats->stats_info[0]) * objagg->obj_count;
- objagg_stats = kzalloc(alloc_size, GFP_KERNEL);
+ objagg_stats = kzalloc(struct_size(objagg_stats, stats_info,
+ objagg->obj_count), GFP_KERNEL);
if (!objagg_stats)
return ERR_PTR(-ENOMEM);
@@ -783,7 +781,6 @@ static struct objagg_tmp_graph *objagg_tmp_graph_create(struct objagg *objagg)
struct objagg_tmp_node *node;
struct objagg_tmp_node *pnode;
struct objagg_obj *objagg_obj;
- size_t alloc_size;
int i, j;
graph = kzalloc(sizeof(*graph), GFP_KERNEL);
@@ -795,9 +792,7 @@ static struct objagg_tmp_graph *objagg_tmp_graph_create(struct objagg *objagg)
goto err_nodes_alloc;
graph->nodes_count = nodes_count;
- alloc_size = BITS_TO_LONGS(nodes_count * nodes_count) *
- sizeof(unsigned long);
- graph->edges = kzalloc(alloc_size, GFP_KERNEL);
+ graph->edges = bitmap_zalloc(nodes_count * nodes_count, GFP_KERNEL);
if (!graph->edges)
goto err_edges_alloc;
@@ -835,7 +830,7 @@ err_nodes_alloc:
static void objagg_tmp_graph_destroy(struct objagg_tmp_graph *graph)
{
- kfree(graph->edges);
+ bitmap_free(graph->edges);
kfree(graph->nodes);
kfree(graph);
}
diff --git a/lib/objpool.c b/lib/objpool.c
new file mode 100644
index 000000000000..cfdc02420884
--- /dev/null
+++ b/lib/objpool.c
@@ -0,0 +1,297 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/objpool.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/atomic.h>
+#include <linux/irqflags.h>
+#include <linux/cpumask.h>
+#include <linux/log2.h>
+
+/*
+ * objpool: ring-array based lockless MPMC/FIFO queues
+ *
+ * Copyright: wuqiang.matt@bytedance.com,mhiramat@kernel.org
+ */
+
+/* initialize percpu objpool_slot */
+static int
+objpool_init_percpu_slot(struct objpool_head *pool,
+ struct objpool_slot *slot,
+ int nodes, void *context,
+ objpool_init_obj_cb objinit)
+{
+ void *obj = (void *)&slot->entries[pool->capacity];
+ int i;
+
+ /* initialize elements of percpu objpool_slot */
+ slot->mask = pool->capacity - 1;
+
+ for (i = 0; i < nodes; i++) {
+ if (objinit) {
+ int rc = objinit(obj, context);
+ if (rc)
+ return rc;
+ }
+ slot->entries[slot->tail & slot->mask] = obj;
+ obj = obj + pool->obj_size;
+ slot->tail++;
+ slot->last = slot->tail;
+ pool->nr_objs++;
+ }
+
+ return 0;
+}
+
+/* allocate and initialize percpu slots */
+static int
+objpool_init_percpu_slots(struct objpool_head *pool, int nr_objs,
+ void *context, objpool_init_obj_cb objinit)
+{
+ int i, cpu_count = 0;
+
+ for (i = 0; i < pool->nr_cpus; i++) {
+
+ struct objpool_slot *slot;
+ int nodes, size, rc;
+
+ /* skip the cpu node which could never be present */
+ if (!cpu_possible(i))
+ continue;
+
+ /* compute how many objects to be allocated with this slot */
+ nodes = nr_objs / num_possible_cpus();
+ if (cpu_count < (nr_objs % num_possible_cpus()))
+ nodes++;
+ cpu_count++;
+
+ size = struct_size(slot, entries, pool->capacity) +
+ pool->obj_size * nodes;
+
+ /*
+ * here we allocate percpu-slot & objs together in a single
+ * allocation to make it more compact, taking advantage of
+ * warm caches and TLB hits. in default vmalloc is used to
+ * reduce the pressure of kernel slab system. as we know,
+ * mimimal size of vmalloc is one page since vmalloc would
+ * always align the requested size to page size
+ */
+ if (pool->gfp & GFP_ATOMIC)
+ slot = kmalloc_node(size, pool->gfp, cpu_to_node(i));
+ else
+ slot = __vmalloc_node(size, sizeof(void *), pool->gfp,
+ cpu_to_node(i), __builtin_return_address(0));
+ if (!slot)
+ return -ENOMEM;
+ memset(slot, 0, size);
+ pool->cpu_slots[i] = slot;
+
+ /* initialize the objpool_slot of cpu node i */
+ rc = objpool_init_percpu_slot(pool, slot, nodes, context, objinit);
+ if (rc)
+ return rc;
+ }
+
+ return 0;
+}
+
+/* cleanup all percpu slots of the object pool */
+static void objpool_fini_percpu_slots(struct objpool_head *pool)
+{
+ int i;
+
+ if (!pool->cpu_slots)
+ return;
+
+ for (i = 0; i < pool->nr_cpus; i++)
+ kvfree(pool->cpu_slots[i]);
+ kfree(pool->cpu_slots);
+}
+
+/* initialize object pool and pre-allocate objects */
+int objpool_init(struct objpool_head *pool, int nr_objs, int object_size,
+ gfp_t gfp, void *context, objpool_init_obj_cb objinit,
+ objpool_fini_cb release)
+{
+ int rc, capacity, slot_size;
+
+ /* check input parameters */
+ if (nr_objs <= 0 || nr_objs > OBJPOOL_NR_OBJECT_MAX ||
+ object_size <= 0 || object_size > OBJPOOL_OBJECT_SIZE_MAX)
+ return -EINVAL;
+
+ /* align up to unsigned long size */
+ object_size = ALIGN(object_size, sizeof(long));
+
+ /* calculate capacity of percpu objpool_slot */
+ capacity = roundup_pow_of_two(nr_objs);
+ if (!capacity)
+ return -EINVAL;
+
+ /* initialize objpool pool */
+ memset(pool, 0, sizeof(struct objpool_head));
+ pool->nr_cpus = nr_cpu_ids;
+ pool->obj_size = object_size;
+ pool->capacity = capacity;
+ pool->gfp = gfp & ~__GFP_ZERO;
+ pool->context = context;
+ pool->release = release;
+ slot_size = pool->nr_cpus * sizeof(struct objpool_slot);
+ pool->cpu_slots = kzalloc(slot_size, pool->gfp);
+ if (!pool->cpu_slots)
+ return -ENOMEM;
+
+ /* initialize per-cpu slots */
+ rc = objpool_init_percpu_slots(pool, nr_objs, context, objinit);
+ if (rc)
+ objpool_fini_percpu_slots(pool);
+ else
+ refcount_set(&pool->ref, pool->nr_objs + 1);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(objpool_init);
+
+/* adding object to slot, abort if the slot was already full */
+static inline int
+objpool_try_add_slot(void *obj, struct objpool_head *pool, int cpu)
+{
+ struct objpool_slot *slot = pool->cpu_slots[cpu];
+ uint32_t head, tail;
+
+ /* loading tail and head as a local snapshot, tail first */
+ tail = READ_ONCE(slot->tail);
+
+ do {
+ head = READ_ONCE(slot->head);
+ /* fault caught: something must be wrong */
+ WARN_ON_ONCE(tail - head > pool->nr_objs);
+ } while (!try_cmpxchg_acquire(&slot->tail, &tail, tail + 1));
+
+ /* now the tail position is reserved for the given obj */
+ WRITE_ONCE(slot->entries[tail & slot->mask], obj);
+ /* update sequence to make this obj available for pop() */
+ smp_store_release(&slot->last, tail + 1);
+
+ return 0;
+}
+
+/* reclaim an object to object pool */
+int objpool_push(void *obj, struct objpool_head *pool)
+{
+ unsigned long flags;
+ int rc;
+
+ /* disable local irq to avoid preemption & interruption */
+ raw_local_irq_save(flags);
+ rc = objpool_try_add_slot(obj, pool, raw_smp_processor_id());
+ raw_local_irq_restore(flags);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(objpool_push);
+
+/* try to retrieve object from slot */
+static inline void *objpool_try_get_slot(struct objpool_head *pool, int cpu)
+{
+ struct objpool_slot *slot = pool->cpu_slots[cpu];
+ /* load head snapshot, other cpus may change it */
+ uint32_t head = smp_load_acquire(&slot->head);
+
+ while (head != READ_ONCE(slot->last)) {
+ void *obj;
+
+ /*
+ * data visibility of 'last' and 'head' could be out of
+ * order since memory updating of 'last' and 'head' are
+ * performed in push() and pop() independently
+ *
+ * before any retrieving attempts, pop() must guarantee
+ * 'last' is behind 'head', that is to say, there must
+ * be available objects in slot, which could be ensured
+ * by condition 'last != head && last - head <= nr_objs'
+ * that is equivalent to 'last - head - 1 < nr_objs' as
+ * 'last' and 'head' are both unsigned int32
+ */
+ if (READ_ONCE(slot->last) - head - 1 >= pool->nr_objs) {
+ head = READ_ONCE(slot->head);
+ continue;
+ }
+
+ /* obj must be retrieved before moving forward head */
+ obj = READ_ONCE(slot->entries[head & slot->mask]);
+
+ /* move head forward to mark it's consumption */
+ if (try_cmpxchg_release(&slot->head, &head, head + 1))
+ return obj;
+ }
+
+ return NULL;
+}
+
+/* allocate an object from object pool */
+void *objpool_pop(struct objpool_head *pool)
+{
+ void *obj = NULL;
+ unsigned long flags;
+ int i, cpu;
+
+ /* disable local irq to avoid preemption & interruption */
+ raw_local_irq_save(flags);
+
+ cpu = raw_smp_processor_id();
+ for (i = 0; i < num_possible_cpus(); i++) {
+ obj = objpool_try_get_slot(pool, cpu);
+ if (obj)
+ break;
+ cpu = cpumask_next_wrap(cpu, cpu_possible_mask, -1, 1);
+ }
+ raw_local_irq_restore(flags);
+
+ return obj;
+}
+EXPORT_SYMBOL_GPL(objpool_pop);
+
+/* release whole objpool forcely */
+void objpool_free(struct objpool_head *pool)
+{
+ if (!pool->cpu_slots)
+ return;
+
+ /* release percpu slots */
+ objpool_fini_percpu_slots(pool);
+
+ /* call user's cleanup callback if provided */
+ if (pool->release)
+ pool->release(pool, pool->context);
+}
+EXPORT_SYMBOL_GPL(objpool_free);
+
+/* drop the allocated object, rather reclaim it to objpool */
+int objpool_drop(void *obj, struct objpool_head *pool)
+{
+ if (!obj || !pool)
+ return -EINVAL;
+
+ if (refcount_dec_and_test(&pool->ref)) {
+ objpool_free(pool);
+ return 0;
+ }
+
+ return -EAGAIN;
+}
+EXPORT_SYMBOL_GPL(objpool_drop);
+
+/* drop unused objects and defref objpool for releasing */
+void objpool_fini(struct objpool_head *pool)
+{
+ int count = 1; /* extra ref for objpool itself */
+
+ /* drop all remained objects from objpool */
+ while (objpool_pop(pool))
+ count++;
+
+ if (refcount_sub_and_test(count, &pool->ref))
+ objpool_free(pool);
+}
+EXPORT_SYMBOL_GPL(objpool_fini);
diff --git a/lib/oid_registry.c b/lib/oid_registry.c
index f7ad43f28579..fe6705cfd780 100644
--- a/lib/oid_registry.c
+++ b/lib/oid_registry.c
@@ -11,6 +11,7 @@
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/bug.h>
+#include <linux/asn1.h>
#include "oid_registry_data.c"
MODULE_DESCRIPTION("OID Registry");
@@ -92,6 +93,29 @@ enum OID look_up_OID(const void *data, size_t datasize)
}
EXPORT_SYMBOL_GPL(look_up_OID);
+/**
+ * parse_OID - Parse an OID from a bytestream
+ * @data: Binary representation of the header + OID
+ * @datasize: Size of the binary representation
+ * @oid: Pointer to oid to return result
+ *
+ * Parse an OID from a bytestream that holds the OID in the format
+ * ASN1_OID | length | oid. The length indicator must equal to datasize - 2.
+ * -EBADMSG is returned if the bytestream is too short.
+ */
+int parse_OID(const void *data, size_t datasize, enum OID *oid)
+{
+ const unsigned char *v = data;
+
+ /* we need 2 bytes of header and at least 1 byte for oid */
+ if (datasize < 3 || v[0] != ASN1_OID || v[1] != datasize - 2)
+ return -EBADMSG;
+
+ *oid = look_up_OID(data + 2, datasize - 2);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(parse_OID);
+
/*
* sprint_OID - Print an Object Identifier into a buffer
* @data: The encoded OID to print
@@ -100,7 +124,7 @@ EXPORT_SYMBOL_GPL(look_up_OID);
* @bufsize: The size of the buffer
*
* The OID is rendered into the buffer in "a.b.c.d" format and the number of
- * bytes is returned. -EBADMSG is returned if the data could not be intepreted
+ * bytes is returned. -EBADMSG is returned if the data could not be interpreted
* and -ENOBUFS if the buffer was too small.
*/
int sprint_oid(const void *data, size_t datasize, char *buffer, size_t bufsize)
@@ -122,7 +146,6 @@ int sprint_oid(const void *data, size_t datasize, char *buffer, size_t bufsize)
bufsize -= count;
while (v < end) {
- num = 0;
n = *v++;
if (!(n & 0x80)) {
num = n;
diff --git a/lib/once.c b/lib/once.c
index 8b7d6235217e..2c306f0e891e 100644
--- a/lib/once.c
+++ b/lib/once.c
@@ -3,10 +3,12 @@
#include <linux/spinlock.h>
#include <linux/once.h>
#include <linux/random.h>
+#include <linux/module.h>
struct once_work {
struct work_struct work;
struct static_key_true *key;
+ struct module *module;
};
static void once_deferred(struct work_struct *w)
@@ -16,10 +18,11 @@ static void once_deferred(struct work_struct *w)
work = container_of(w, struct once_work, work);
BUG_ON(!static_key_enabled(work->key));
static_branch_disable(work->key);
+ module_put(work->module);
kfree(work);
}
-static void once_disable_jump(struct static_key_true *key)
+static void once_disable_jump(struct static_key_true *key, struct module *mod)
{
struct once_work *w;
@@ -29,6 +32,8 @@ static void once_disable_jump(struct static_key_true *key)
INIT_WORK(&w->work, once_deferred);
w->key = key;
+ w->module = mod;
+ __module_get(mod);
schedule_work(&w->work);
}
@@ -53,11 +58,41 @@ bool __do_once_start(bool *done, unsigned long *flags)
EXPORT_SYMBOL(__do_once_start);
void __do_once_done(bool *done, struct static_key_true *once_key,
- unsigned long *flags)
+ unsigned long *flags, struct module *mod)
__releases(once_lock)
{
*done = true;
spin_unlock_irqrestore(&once_lock, *flags);
- once_disable_jump(once_key);
+ once_disable_jump(once_key, mod);
}
EXPORT_SYMBOL(__do_once_done);
+
+static DEFINE_MUTEX(once_mutex);
+
+bool __do_once_sleepable_start(bool *done)
+ __acquires(once_mutex)
+{
+ mutex_lock(&once_mutex);
+ if (*done) {
+ mutex_unlock(&once_mutex);
+ /* Keep sparse happy by restoring an even lock count on
+ * this mutex. In case we return here, we don't call into
+ * __do_once_done but return early in the DO_ONCE_SLEEPABLE() macro.
+ */
+ __acquire(once_mutex);
+ return false;
+ }
+
+ return true;
+}
+EXPORT_SYMBOL(__do_once_sleepable_start);
+
+void __do_once_sleepable_done(bool *done, struct static_key_true *once_key,
+ struct module *mod)
+ __releases(once_mutex)
+{
+ *done = true;
+ mutex_unlock(&once_mutex);
+ once_disable_jump(once_key, mod);
+}
+EXPORT_SYMBOL(__do_once_sleepable_done);
diff --git a/lib/overflow_kunit.c b/lib/overflow_kunit.c
new file mode 100644
index 000000000000..c527f6b75789
--- /dev/null
+++ b/lib/overflow_kunit.c
@@ -0,0 +1,1148 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/*
+ * Test cases for arithmetic overflow checks. See:
+ * "Running tests with kunit_tool" at Documentation/dev-tools/kunit/start.rst
+ * ./tools/testing/kunit/kunit.py run overflow [--raw_output]
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <kunit/device.h>
+#include <kunit/test.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/overflow.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/vmalloc.h>
+
+#define SKIP(cond, reason) do { \
+ if (cond) { \
+ kunit_skip(test, reason); \
+ return; \
+ } \
+} while (0)
+
+/*
+ * Clang 11 and earlier generate unwanted libcalls for signed output
+ * on unsigned input.
+ */
+#if defined(CONFIG_CC_IS_CLANG) && __clang_major__ <= 11
+# define SKIP_SIGN_MISMATCH(t) SKIP(t, "Clang 11 unwanted libcalls")
+#else
+# define SKIP_SIGN_MISMATCH(t) do { } while (0)
+#endif
+
+/*
+ * Clang 13 and earlier generate unwanted libcalls for 64-bit tests on
+ * 32-bit hosts.
+ */
+#if defined(CONFIG_CC_IS_CLANG) && __clang_major__ <= 13 && \
+ BITS_PER_LONG != 64
+# define SKIP_64_ON_32(t) SKIP(t, "Clang 13 unwanted libcalls")
+#else
+# define SKIP_64_ON_32(t) do { } while (0)
+#endif
+
+#define DEFINE_TEST_ARRAY_TYPED(t1, t2, t) \
+ static const struct test_ ## t1 ## _ ## t2 ## __ ## t { \
+ t1 a; \
+ t2 b; \
+ t sum, diff, prod; \
+ bool s_of, d_of, p_of; \
+ } t1 ## _ ## t2 ## __ ## t ## _tests[]
+
+#define DEFINE_TEST_ARRAY(t) DEFINE_TEST_ARRAY_TYPED(t, t, t)
+
+DEFINE_TEST_ARRAY(u8) = {
+ {0, 0, 0, 0, 0, false, false, false},
+ {1, 1, 2, 0, 1, false, false, false},
+ {0, 1, 1, U8_MAX, 0, false, true, false},
+ {1, 0, 1, 1, 0, false, false, false},
+ {0, U8_MAX, U8_MAX, 1, 0, false, true, false},
+ {U8_MAX, 0, U8_MAX, U8_MAX, 0, false, false, false},
+ {1, U8_MAX, 0, 2, U8_MAX, true, true, false},
+ {U8_MAX, 1, 0, U8_MAX-1, U8_MAX, true, false, false},
+ {U8_MAX, U8_MAX, U8_MAX-1, 0, 1, true, false, true},
+
+ {U8_MAX, U8_MAX-1, U8_MAX-2, 1, 2, true, false, true},
+ {U8_MAX-1, U8_MAX, U8_MAX-2, U8_MAX, 2, true, true, true},
+
+ {1U << 3, 1U << 3, 1U << 4, 0, 1U << 6, false, false, false},
+ {1U << 4, 1U << 4, 1U << 5, 0, 0, false, false, true},
+ {1U << 4, 1U << 3, 3*(1U << 3), 1U << 3, 1U << 7, false, false, false},
+ {1U << 7, 1U << 7, 0, 0, 0, true, false, true},
+
+ {48, 32, 80, 16, 0, false, false, true},
+ {128, 128, 0, 0, 0, true, false, true},
+ {123, 234, 101, 145, 110, true, true, true},
+};
+DEFINE_TEST_ARRAY(u16) = {
+ {0, 0, 0, 0, 0, false, false, false},
+ {1, 1, 2, 0, 1, false, false, false},
+ {0, 1, 1, U16_MAX, 0, false, true, false},
+ {1, 0, 1, 1, 0, false, false, false},
+ {0, U16_MAX, U16_MAX, 1, 0, false, true, false},
+ {U16_MAX, 0, U16_MAX, U16_MAX, 0, false, false, false},
+ {1, U16_MAX, 0, 2, U16_MAX, true, true, false},
+ {U16_MAX, 1, 0, U16_MAX-1, U16_MAX, true, false, false},
+ {U16_MAX, U16_MAX, U16_MAX-1, 0, 1, true, false, true},
+
+ {U16_MAX, U16_MAX-1, U16_MAX-2, 1, 2, true, false, true},
+ {U16_MAX-1, U16_MAX, U16_MAX-2, U16_MAX, 2, true, true, true},
+
+ {1U << 7, 1U << 7, 1U << 8, 0, 1U << 14, false, false, false},
+ {1U << 8, 1U << 8, 1U << 9, 0, 0, false, false, true},
+ {1U << 8, 1U << 7, 3*(1U << 7), 1U << 7, 1U << 15, false, false, false},
+ {1U << 15, 1U << 15, 0, 0, 0, true, false, true},
+
+ {123, 234, 357, 65425, 28782, false, true, false},
+ {1234, 2345, 3579, 64425, 10146, false, true, true},
+};
+DEFINE_TEST_ARRAY(u32) = {
+ {0, 0, 0, 0, 0, false, false, false},
+ {1, 1, 2, 0, 1, false, false, false},
+ {0, 1, 1, U32_MAX, 0, false, true, false},
+ {1, 0, 1, 1, 0, false, false, false},
+ {0, U32_MAX, U32_MAX, 1, 0, false, true, false},
+ {U32_MAX, 0, U32_MAX, U32_MAX, 0, false, false, false},
+ {1, U32_MAX, 0, 2, U32_MAX, true, true, false},
+ {U32_MAX, 1, 0, U32_MAX-1, U32_MAX, true, false, false},
+ {U32_MAX, U32_MAX, U32_MAX-1, 0, 1, true, false, true},
+
+ {U32_MAX, U32_MAX-1, U32_MAX-2, 1, 2, true, false, true},
+ {U32_MAX-1, U32_MAX, U32_MAX-2, U32_MAX, 2, true, true, true},
+
+ {1U << 15, 1U << 15, 1U << 16, 0, 1U << 30, false, false, false},
+ {1U << 16, 1U << 16, 1U << 17, 0, 0, false, false, true},
+ {1U << 16, 1U << 15, 3*(1U << 15), 1U << 15, 1U << 31, false, false, false},
+ {1U << 31, 1U << 31, 0, 0, 0, true, false, true},
+
+ {-2U, 1U, -1U, -3U, -2U, false, false, false},
+ {-4U, 5U, 1U, -9U, -20U, true, false, true},
+};
+
+DEFINE_TEST_ARRAY(u64) = {
+ {0, 0, 0, 0, 0, false, false, false},
+ {1, 1, 2, 0, 1, false, false, false},
+ {0, 1, 1, U64_MAX, 0, false, true, false},
+ {1, 0, 1, 1, 0, false, false, false},
+ {0, U64_MAX, U64_MAX, 1, 0, false, true, false},
+ {U64_MAX, 0, U64_MAX, U64_MAX, 0, false, false, false},
+ {1, U64_MAX, 0, 2, U64_MAX, true, true, false},
+ {U64_MAX, 1, 0, U64_MAX-1, U64_MAX, true, false, false},
+ {U64_MAX, U64_MAX, U64_MAX-1, 0, 1, true, false, true},
+
+ {U64_MAX, U64_MAX-1, U64_MAX-2, 1, 2, true, false, true},
+ {U64_MAX-1, U64_MAX, U64_MAX-2, U64_MAX, 2, true, true, true},
+
+ {1ULL << 31, 1ULL << 31, 1ULL << 32, 0, 1ULL << 62, false, false, false},
+ {1ULL << 32, 1ULL << 32, 1ULL << 33, 0, 0, false, false, true},
+ {1ULL << 32, 1ULL << 31, 3*(1ULL << 31), 1ULL << 31, 1ULL << 63, false, false, false},
+ {1ULL << 63, 1ULL << 63, 0, 0, 0, true, false, true},
+ {1000000000ULL /* 10^9 */, 10000000000ULL /* 10^10 */,
+ 11000000000ULL, 18446744064709551616ULL, 10000000000000000000ULL,
+ false, true, false},
+ {-15ULL, 10ULL, -5ULL, -25ULL, -150ULL, false, false, true},
+};
+
+DEFINE_TEST_ARRAY(s8) = {
+ {0, 0, 0, 0, 0, false, false, false},
+
+ {0, S8_MAX, S8_MAX, -S8_MAX, 0, false, false, false},
+ {S8_MAX, 0, S8_MAX, S8_MAX, 0, false, false, false},
+ {0, S8_MIN, S8_MIN, S8_MIN, 0, false, true, false},
+ {S8_MIN, 0, S8_MIN, S8_MIN, 0, false, false, false},
+
+ {-1, S8_MIN, S8_MAX, S8_MAX, S8_MIN, true, false, true},
+ {S8_MIN, -1, S8_MAX, -S8_MAX, S8_MIN, true, false, true},
+ {-1, S8_MAX, S8_MAX-1, S8_MIN, -S8_MAX, false, false, false},
+ {S8_MAX, -1, S8_MAX-1, S8_MIN, -S8_MAX, false, true, false},
+ {-1, -S8_MAX, S8_MIN, S8_MAX-1, S8_MAX, false, false, false},
+ {-S8_MAX, -1, S8_MIN, S8_MIN+2, S8_MAX, false, false, false},
+
+ {1, S8_MIN, -S8_MAX, -S8_MAX, S8_MIN, false, true, false},
+ {S8_MIN, 1, -S8_MAX, S8_MAX, S8_MIN, false, true, false},
+ {1, S8_MAX, S8_MIN, S8_MIN+2, S8_MAX, true, false, false},
+ {S8_MAX, 1, S8_MIN, S8_MAX-1, S8_MAX, true, false, false},
+
+ {S8_MIN, S8_MIN, 0, 0, 0, true, false, true},
+ {S8_MAX, S8_MAX, -2, 0, 1, true, false, true},
+
+ {-4, -32, -36, 28, -128, false, false, true},
+ {-4, 32, 28, -36, -128, false, false, false},
+};
+
+DEFINE_TEST_ARRAY(s16) = {
+ {0, 0, 0, 0, 0, false, false, false},
+
+ {0, S16_MAX, S16_MAX, -S16_MAX, 0, false, false, false},
+ {S16_MAX, 0, S16_MAX, S16_MAX, 0, false, false, false},
+ {0, S16_MIN, S16_MIN, S16_MIN, 0, false, true, false},
+ {S16_MIN, 0, S16_MIN, S16_MIN, 0, false, false, false},
+
+ {-1, S16_MIN, S16_MAX, S16_MAX, S16_MIN, true, false, true},
+ {S16_MIN, -1, S16_MAX, -S16_MAX, S16_MIN, true, false, true},
+ {-1, S16_MAX, S16_MAX-1, S16_MIN, -S16_MAX, false, false, false},
+ {S16_MAX, -1, S16_MAX-1, S16_MIN, -S16_MAX, false, true, false},
+ {-1, -S16_MAX, S16_MIN, S16_MAX-1, S16_MAX, false, false, false},
+ {-S16_MAX, -1, S16_MIN, S16_MIN+2, S16_MAX, false, false, false},
+
+ {1, S16_MIN, -S16_MAX, -S16_MAX, S16_MIN, false, true, false},
+ {S16_MIN, 1, -S16_MAX, S16_MAX, S16_MIN, false, true, false},
+ {1, S16_MAX, S16_MIN, S16_MIN+2, S16_MAX, true, false, false},
+ {S16_MAX, 1, S16_MIN, S16_MAX-1, S16_MAX, true, false, false},
+
+ {S16_MIN, S16_MIN, 0, 0, 0, true, false, true},
+ {S16_MAX, S16_MAX, -2, 0, 1, true, false, true},
+};
+DEFINE_TEST_ARRAY(s32) = {
+ {0, 0, 0, 0, 0, false, false, false},
+
+ {0, S32_MAX, S32_MAX, -S32_MAX, 0, false, false, false},
+ {S32_MAX, 0, S32_MAX, S32_MAX, 0, false, false, false},
+ {0, S32_MIN, S32_MIN, S32_MIN, 0, false, true, false},
+ {S32_MIN, 0, S32_MIN, S32_MIN, 0, false, false, false},
+
+ {-1, S32_MIN, S32_MAX, S32_MAX, S32_MIN, true, false, true},
+ {S32_MIN, -1, S32_MAX, -S32_MAX, S32_MIN, true, false, true},
+ {-1, S32_MAX, S32_MAX-1, S32_MIN, -S32_MAX, false, false, false},
+ {S32_MAX, -1, S32_MAX-1, S32_MIN, -S32_MAX, false, true, false},
+ {-1, -S32_MAX, S32_MIN, S32_MAX-1, S32_MAX, false, false, false},
+ {-S32_MAX, -1, S32_MIN, S32_MIN+2, S32_MAX, false, false, false},
+
+ {1, S32_MIN, -S32_MAX, -S32_MAX, S32_MIN, false, true, false},
+ {S32_MIN, 1, -S32_MAX, S32_MAX, S32_MIN, false, true, false},
+ {1, S32_MAX, S32_MIN, S32_MIN+2, S32_MAX, true, false, false},
+ {S32_MAX, 1, S32_MIN, S32_MAX-1, S32_MAX, true, false, false},
+
+ {S32_MIN, S32_MIN, 0, 0, 0, true, false, true},
+ {S32_MAX, S32_MAX, -2, 0, 1, true, false, true},
+};
+
+DEFINE_TEST_ARRAY(s64) = {
+ {0, 0, 0, 0, 0, false, false, false},
+
+ {0, S64_MAX, S64_MAX, -S64_MAX, 0, false, false, false},
+ {S64_MAX, 0, S64_MAX, S64_MAX, 0, false, false, false},
+ {0, S64_MIN, S64_MIN, S64_MIN, 0, false, true, false},
+ {S64_MIN, 0, S64_MIN, S64_MIN, 0, false, false, false},
+
+ {-1, S64_MIN, S64_MAX, S64_MAX, S64_MIN, true, false, true},
+ {S64_MIN, -1, S64_MAX, -S64_MAX, S64_MIN, true, false, true},
+ {-1, S64_MAX, S64_MAX-1, S64_MIN, -S64_MAX, false, false, false},
+ {S64_MAX, -1, S64_MAX-1, S64_MIN, -S64_MAX, false, true, false},
+ {-1, -S64_MAX, S64_MIN, S64_MAX-1, S64_MAX, false, false, false},
+ {-S64_MAX, -1, S64_MIN, S64_MIN+2, S64_MAX, false, false, false},
+
+ {1, S64_MIN, -S64_MAX, -S64_MAX, S64_MIN, false, true, false},
+ {S64_MIN, 1, -S64_MAX, S64_MAX, S64_MIN, false, true, false},
+ {1, S64_MAX, S64_MIN, S64_MIN+2, S64_MAX, true, false, false},
+ {S64_MAX, 1, S64_MIN, S64_MAX-1, S64_MAX, true, false, false},
+
+ {S64_MIN, S64_MIN, 0, 0, 0, true, false, true},
+ {S64_MAX, S64_MAX, -2, 0, 1, true, false, true},
+
+ {-1, -1, -2, 0, 1, false, false, false},
+ {-1, -128, -129, 127, 128, false, false, false},
+ {-128, -1, -129, -127, 128, false, false, false},
+ {0, -S64_MAX, -S64_MAX, S64_MAX, 0, false, false, false},
+};
+
+#define check_one_op(t, fmt, op, sym, a, b, r, of) do { \
+ int _a_orig = a, _a_bump = a + 1; \
+ int _b_orig = b, _b_bump = b + 1; \
+ bool _of; \
+ t _r; \
+ \
+ _of = check_ ## op ## _overflow(a, b, &_r); \
+ KUNIT_EXPECT_EQ_MSG(test, _of, of, \
+ "expected "fmt" "sym" "fmt" to%s overflow (type %s)\n", \
+ a, b, of ? "" : " not", #t); \
+ KUNIT_EXPECT_EQ_MSG(test, _r, r, \
+ "expected "fmt" "sym" "fmt" == "fmt", got "fmt" (type %s)\n", \
+ a, b, r, _r, #t); \
+ /* Check for internal macro side-effects. */ \
+ _of = check_ ## op ## _overflow(_a_orig++, _b_orig++, &_r); \
+ KUNIT_EXPECT_EQ_MSG(test, _a_orig, _a_bump, "Unexpected " #op " macro side-effect!\n"); \
+ KUNIT_EXPECT_EQ_MSG(test, _b_orig, _b_bump, "Unexpected " #op " macro side-effect!\n"); \
+} while (0)
+
+#define DEFINE_TEST_FUNC_TYPED(n, t, fmt) \
+static void do_test_ ## n(struct kunit *test, const struct test_ ## n *p) \
+{ \
+ check_one_op(t, fmt, add, "+", p->a, p->b, p->sum, p->s_of); \
+ check_one_op(t, fmt, add, "+", p->b, p->a, p->sum, p->s_of); \
+ check_one_op(t, fmt, sub, "-", p->a, p->b, p->diff, p->d_of); \
+ check_one_op(t, fmt, mul, "*", p->a, p->b, p->prod, p->p_of); \
+ check_one_op(t, fmt, mul, "*", p->b, p->a, p->prod, p->p_of); \
+} \
+ \
+static void n ## _overflow_test(struct kunit *test) { \
+ unsigned i; \
+ \
+ SKIP_64_ON_32(__same_type(t, u64)); \
+ SKIP_64_ON_32(__same_type(t, s64)); \
+ SKIP_SIGN_MISMATCH(__same_type(n ## _tests[0].a, u32) && \
+ __same_type(n ## _tests[0].b, u32) && \
+ __same_type(n ## _tests[0].sum, int)); \
+ \
+ for (i = 0; i < ARRAY_SIZE(n ## _tests); ++i) \
+ do_test_ ## n(test, &n ## _tests[i]); \
+ kunit_info(test, "%zu %s arithmetic tests finished\n", \
+ ARRAY_SIZE(n ## _tests), #n); \
+}
+
+#define DEFINE_TEST_FUNC(t, fmt) \
+ DEFINE_TEST_FUNC_TYPED(t ## _ ## t ## __ ## t, t, fmt)
+
+DEFINE_TEST_FUNC(u8, "%d");
+DEFINE_TEST_FUNC(s8, "%d");
+DEFINE_TEST_FUNC(u16, "%d");
+DEFINE_TEST_FUNC(s16, "%d");
+DEFINE_TEST_FUNC(u32, "%u");
+DEFINE_TEST_FUNC(s32, "%d");
+DEFINE_TEST_FUNC(u64, "%llu");
+DEFINE_TEST_FUNC(s64, "%lld");
+
+DEFINE_TEST_ARRAY_TYPED(u32, u32, u8) = {
+ {0, 0, 0, 0, 0, false, false, false},
+ {U8_MAX, 2, 1, U8_MAX - 2, U8_MAX - 1, true, false, true},
+ {U8_MAX + 1, 0, 0, 0, 0, true, true, false},
+};
+DEFINE_TEST_FUNC_TYPED(u32_u32__u8, u8, "%d");
+
+DEFINE_TEST_ARRAY_TYPED(u32, u32, int) = {
+ {0, 0, 0, 0, 0, false, false, false},
+ {U32_MAX, 0, -1, -1, 0, true, true, false},
+};
+DEFINE_TEST_FUNC_TYPED(u32_u32__int, int, "%d");
+
+DEFINE_TEST_ARRAY_TYPED(u8, u8, int) = {
+ {0, 0, 0, 0, 0, false, false, false},
+ {U8_MAX, U8_MAX, 2 * U8_MAX, 0, U8_MAX * U8_MAX, false, false, false},
+ {1, 2, 3, -1, 2, false, false, false},
+};
+DEFINE_TEST_FUNC_TYPED(u8_u8__int, int, "%d");
+
+DEFINE_TEST_ARRAY_TYPED(int, int, u8) = {
+ {0, 0, 0, 0, 0, false, false, false},
+ {1, 2, 3, U8_MAX, 2, false, true, false},
+ {-1, 0, U8_MAX, U8_MAX, 0, true, true, false},
+};
+DEFINE_TEST_FUNC_TYPED(int_int__u8, u8, "%d");
+
+/* Args are: value, shift, type, expected result, overflow expected */
+#define TEST_ONE_SHIFT(a, s, t, expect, of) do { \
+ typeof(a) __a = (a); \
+ typeof(s) __s = (s); \
+ t __e = (expect); \
+ t __d; \
+ bool __of = check_shl_overflow(__a, __s, &__d); \
+ if (__of != of) { \
+ KUNIT_EXPECT_EQ_MSG(test, __of, of, \
+ "expected (%s)(%s << %s) to%s overflow\n", \
+ #t, #a, #s, of ? "" : " not"); \
+ } else if (!__of && __d != __e) { \
+ KUNIT_EXPECT_EQ_MSG(test, __d, __e, \
+ "expected (%s)(%s << %s) == %s\n", \
+ #t, #a, #s, #expect); \
+ if ((t)-1 < 0) \
+ kunit_info(test, "got %lld\n", (s64)__d); \
+ else \
+ kunit_info(test, "got %llu\n", (u64)__d); \
+ } \
+ count++; \
+} while (0)
+
+static void shift_sane_test(struct kunit *test)
+{
+ int count = 0;
+
+ /* Sane shifts. */
+ TEST_ONE_SHIFT(1, 0, u8, 1 << 0, false);
+ TEST_ONE_SHIFT(1, 4, u8, 1 << 4, false);
+ TEST_ONE_SHIFT(1, 7, u8, 1 << 7, false);
+ TEST_ONE_SHIFT(0xF, 4, u8, 0xF << 4, false);
+ TEST_ONE_SHIFT(1, 0, u16, 1 << 0, false);
+ TEST_ONE_SHIFT(1, 10, u16, 1 << 10, false);
+ TEST_ONE_SHIFT(1, 15, u16, 1 << 15, false);
+ TEST_ONE_SHIFT(0xFF, 8, u16, 0xFF << 8, false);
+ TEST_ONE_SHIFT(1, 0, int, 1 << 0, false);
+ TEST_ONE_SHIFT(1, 16, int, 1 << 16, false);
+ TEST_ONE_SHIFT(1, 30, int, 1 << 30, false);
+ TEST_ONE_SHIFT(1, 0, s32, 1 << 0, false);
+ TEST_ONE_SHIFT(1, 16, s32, 1 << 16, false);
+ TEST_ONE_SHIFT(1, 30, s32, 1 << 30, false);
+ TEST_ONE_SHIFT(1, 0, unsigned int, 1U << 0, false);
+ TEST_ONE_SHIFT(1, 20, unsigned int, 1U << 20, false);
+ TEST_ONE_SHIFT(1, 31, unsigned int, 1U << 31, false);
+ TEST_ONE_SHIFT(0xFFFFU, 16, unsigned int, 0xFFFFU << 16, false);
+ TEST_ONE_SHIFT(1, 0, u32, 1U << 0, false);
+ TEST_ONE_SHIFT(1, 20, u32, 1U << 20, false);
+ TEST_ONE_SHIFT(1, 31, u32, 1U << 31, false);
+ TEST_ONE_SHIFT(0xFFFFU, 16, u32, 0xFFFFU << 16, false);
+ TEST_ONE_SHIFT(1, 0, u64, 1ULL << 0, false);
+ TEST_ONE_SHIFT(1, 40, u64, 1ULL << 40, false);
+ TEST_ONE_SHIFT(1, 63, u64, 1ULL << 63, false);
+ TEST_ONE_SHIFT(0xFFFFFFFFULL, 32, u64, 0xFFFFFFFFULL << 32, false);
+
+ /* Sane shift: start and end with 0, without a too-wide shift. */
+ TEST_ONE_SHIFT(0, 7, u8, 0, false);
+ TEST_ONE_SHIFT(0, 15, u16, 0, false);
+ TEST_ONE_SHIFT(0, 31, unsigned int, 0, false);
+ TEST_ONE_SHIFT(0, 31, u32, 0, false);
+ TEST_ONE_SHIFT(0, 63, u64, 0, false);
+
+ /* Sane shift: start and end with 0, without reaching signed bit. */
+ TEST_ONE_SHIFT(0, 6, s8, 0, false);
+ TEST_ONE_SHIFT(0, 14, s16, 0, false);
+ TEST_ONE_SHIFT(0, 30, int, 0, false);
+ TEST_ONE_SHIFT(0, 30, s32, 0, false);
+ TEST_ONE_SHIFT(0, 62, s64, 0, false);
+
+ kunit_info(test, "%d sane shift tests finished\n", count);
+}
+
+static void shift_overflow_test(struct kunit *test)
+{
+ int count = 0;
+
+ /* Overflow: shifted the bit off the end. */
+ TEST_ONE_SHIFT(1, 8, u8, 0, true);
+ TEST_ONE_SHIFT(1, 16, u16, 0, true);
+ TEST_ONE_SHIFT(1, 32, unsigned int, 0, true);
+ TEST_ONE_SHIFT(1, 32, u32, 0, true);
+ TEST_ONE_SHIFT(1, 64, u64, 0, true);
+
+ /* Overflow: shifted into the signed bit. */
+ TEST_ONE_SHIFT(1, 7, s8, 0, true);
+ TEST_ONE_SHIFT(1, 15, s16, 0, true);
+ TEST_ONE_SHIFT(1, 31, int, 0, true);
+ TEST_ONE_SHIFT(1, 31, s32, 0, true);
+ TEST_ONE_SHIFT(1, 63, s64, 0, true);
+
+ /* Overflow: high bit falls off unsigned types. */
+ /* 10010110 */
+ TEST_ONE_SHIFT(150, 1, u8, 0, true);
+ /* 1000100010010110 */
+ TEST_ONE_SHIFT(34966, 1, u16, 0, true);
+ /* 10000100000010001000100010010110 */
+ TEST_ONE_SHIFT(2215151766U, 1, u32, 0, true);
+ TEST_ONE_SHIFT(2215151766U, 1, unsigned int, 0, true);
+ /* 1000001000010000010000000100000010000100000010001000100010010110 */
+ TEST_ONE_SHIFT(9372061470395238550ULL, 1, u64, 0, true);
+
+ /* Overflow: bit shifted into signed bit on signed types. */
+ /* 01001011 */
+ TEST_ONE_SHIFT(75, 1, s8, 0, true);
+ /* 0100010001001011 */
+ TEST_ONE_SHIFT(17483, 1, s16, 0, true);
+ /* 01000010000001000100010001001011 */
+ TEST_ONE_SHIFT(1107575883, 1, s32, 0, true);
+ TEST_ONE_SHIFT(1107575883, 1, int, 0, true);
+ /* 0100000100001000001000000010000001000010000001000100010001001011 */
+ TEST_ONE_SHIFT(4686030735197619275LL, 1, s64, 0, true);
+
+ /* Overflow: bit shifted past signed bit on signed types. */
+ /* 01001011 */
+ TEST_ONE_SHIFT(75, 2, s8, 0, true);
+ /* 0100010001001011 */
+ TEST_ONE_SHIFT(17483, 2, s16, 0, true);
+ /* 01000010000001000100010001001011 */
+ TEST_ONE_SHIFT(1107575883, 2, s32, 0, true);
+ TEST_ONE_SHIFT(1107575883, 2, int, 0, true);
+ /* 0100000100001000001000000010000001000010000001000100010001001011 */
+ TEST_ONE_SHIFT(4686030735197619275LL, 2, s64, 0, true);
+
+ kunit_info(test, "%d overflow shift tests finished\n", count);
+}
+
+static void shift_truncate_test(struct kunit *test)
+{
+ int count = 0;
+
+ /* Overflow: values larger than destination type. */
+ TEST_ONE_SHIFT(0x100, 0, u8, 0, true);
+ TEST_ONE_SHIFT(0xFF, 0, s8, 0, true);
+ TEST_ONE_SHIFT(0x10000U, 0, u16, 0, true);
+ TEST_ONE_SHIFT(0xFFFFU, 0, s16, 0, true);
+ TEST_ONE_SHIFT(0x100000000ULL, 0, u32, 0, true);
+ TEST_ONE_SHIFT(0x100000000ULL, 0, unsigned int, 0, true);
+ TEST_ONE_SHIFT(0xFFFFFFFFUL, 0, s32, 0, true);
+ TEST_ONE_SHIFT(0xFFFFFFFFUL, 0, int, 0, true);
+ TEST_ONE_SHIFT(0xFFFFFFFFFFFFFFFFULL, 0, s64, 0, true);
+
+ /* Overflow: shifted at or beyond entire type's bit width. */
+ TEST_ONE_SHIFT(0, 8, u8, 0, true);
+ TEST_ONE_SHIFT(0, 9, u8, 0, true);
+ TEST_ONE_SHIFT(0, 8, s8, 0, true);
+ TEST_ONE_SHIFT(0, 9, s8, 0, true);
+ TEST_ONE_SHIFT(0, 16, u16, 0, true);
+ TEST_ONE_SHIFT(0, 17, u16, 0, true);
+ TEST_ONE_SHIFT(0, 16, s16, 0, true);
+ TEST_ONE_SHIFT(0, 17, s16, 0, true);
+ TEST_ONE_SHIFT(0, 32, u32, 0, true);
+ TEST_ONE_SHIFT(0, 33, u32, 0, true);
+ TEST_ONE_SHIFT(0, 32, int, 0, true);
+ TEST_ONE_SHIFT(0, 33, int, 0, true);
+ TEST_ONE_SHIFT(0, 32, s32, 0, true);
+ TEST_ONE_SHIFT(0, 33, s32, 0, true);
+ TEST_ONE_SHIFT(0, 64, u64, 0, true);
+ TEST_ONE_SHIFT(0, 65, u64, 0, true);
+ TEST_ONE_SHIFT(0, 64, s64, 0, true);
+ TEST_ONE_SHIFT(0, 65, s64, 0, true);
+
+ kunit_info(test, "%d truncate shift tests finished\n", count);
+}
+
+static void shift_nonsense_test(struct kunit *test)
+{
+ int count = 0;
+
+ /* Nonsense: negative initial value. */
+ TEST_ONE_SHIFT(-1, 0, s8, 0, true);
+ TEST_ONE_SHIFT(-1, 0, u8, 0, true);
+ TEST_ONE_SHIFT(-5, 0, s16, 0, true);
+ TEST_ONE_SHIFT(-5, 0, u16, 0, true);
+ TEST_ONE_SHIFT(-10, 0, int, 0, true);
+ TEST_ONE_SHIFT(-10, 0, unsigned int, 0, true);
+ TEST_ONE_SHIFT(-100, 0, s32, 0, true);
+ TEST_ONE_SHIFT(-100, 0, u32, 0, true);
+ TEST_ONE_SHIFT(-10000, 0, s64, 0, true);
+ TEST_ONE_SHIFT(-10000, 0, u64, 0, true);
+
+ /* Nonsense: negative shift values. */
+ TEST_ONE_SHIFT(0, -5, s8, 0, true);
+ TEST_ONE_SHIFT(0, -5, u8, 0, true);
+ TEST_ONE_SHIFT(0, -10, s16, 0, true);
+ TEST_ONE_SHIFT(0, -10, u16, 0, true);
+ TEST_ONE_SHIFT(0, -15, int, 0, true);
+ TEST_ONE_SHIFT(0, -15, unsigned int, 0, true);
+ TEST_ONE_SHIFT(0, -20, s32, 0, true);
+ TEST_ONE_SHIFT(0, -20, u32, 0, true);
+ TEST_ONE_SHIFT(0, -30, s64, 0, true);
+ TEST_ONE_SHIFT(0, -30, u64, 0, true);
+
+ /*
+ * Corner case: for unsigned types, we fail when we've shifted
+ * through the entire width of bits. For signed types, we might
+ * want to match this behavior, but that would mean noticing if
+ * we shift through all but the signed bit, and this is not
+ * currently detected (but we'll notice an overflow into the
+ * signed bit). So, for now, we will test this condition but
+ * mark it as not expected to overflow.
+ */
+ TEST_ONE_SHIFT(0, 7, s8, 0, false);
+ TEST_ONE_SHIFT(0, 15, s16, 0, false);
+ TEST_ONE_SHIFT(0, 31, int, 0, false);
+ TEST_ONE_SHIFT(0, 31, s32, 0, false);
+ TEST_ONE_SHIFT(0, 63, s64, 0, false);
+
+ kunit_info(test, "%d nonsense shift tests finished\n", count);
+}
+#undef TEST_ONE_SHIFT
+
+/*
+ * Deal with the various forms of allocator arguments. See comments above
+ * the DEFINE_TEST_ALLOC() instances for mapping of the "bits".
+ */
+#define alloc_GFP (GFP_KERNEL | __GFP_NOWARN)
+#define alloc010(alloc, arg, sz) alloc(sz, alloc_GFP)
+#define alloc011(alloc, arg, sz) alloc(sz, alloc_GFP, NUMA_NO_NODE)
+#define alloc000(alloc, arg, sz) alloc(sz)
+#define alloc001(alloc, arg, sz) alloc(sz, NUMA_NO_NODE)
+#define alloc110(alloc, arg, sz) alloc(arg, sz, alloc_GFP)
+#define free0(free, arg, ptr) free(ptr)
+#define free1(free, arg, ptr) free(arg, ptr)
+
+/* Wrap around to 16K */
+#define TEST_SIZE (5 * 4096)
+
+#define DEFINE_TEST_ALLOC(func, free_func, want_arg, want_gfp, want_node)\
+static void test_ ## func (struct kunit *test, void *arg) \
+{ \
+ volatile size_t a = TEST_SIZE; \
+ volatile size_t b = (SIZE_MAX / TEST_SIZE) + 1; \
+ void *ptr; \
+ \
+ /* Tiny allocation test. */ \
+ ptr = alloc ## want_arg ## want_gfp ## want_node (func, arg, 1);\
+ KUNIT_ASSERT_NOT_ERR_OR_NULL_MSG(test, ptr, \
+ #func " failed regular allocation?!\n"); \
+ free ## want_arg (free_func, arg, ptr); \
+ \
+ /* Wrapped allocation test. */ \
+ ptr = alloc ## want_arg ## want_gfp ## want_node (func, arg, \
+ a * b); \
+ KUNIT_ASSERT_NOT_ERR_OR_NULL_MSG(test, ptr, \
+ #func " unexpectedly failed bad wrapping?!\n"); \
+ free ## want_arg (free_func, arg, ptr); \
+ \
+ /* Saturated allocation test. */ \
+ ptr = alloc ## want_arg ## want_gfp ## want_node (func, arg, \
+ array_size(a, b)); \
+ if (ptr) { \
+ KUNIT_FAIL(test, #func " missed saturation!\n"); \
+ free ## want_arg (free_func, arg, ptr); \
+ } \
+}
+
+/*
+ * Allocator uses a trailing node argument --------+ (e.g. kmalloc_node())
+ * Allocator uses the gfp_t argument -----------+ | (e.g. kmalloc())
+ * Allocator uses a special leading argument + | | (e.g. devm_kmalloc())
+ * | | |
+ */
+DEFINE_TEST_ALLOC(kmalloc, kfree, 0, 1, 0);
+DEFINE_TEST_ALLOC(kmalloc_node, kfree, 0, 1, 1);
+DEFINE_TEST_ALLOC(kzalloc, kfree, 0, 1, 0);
+DEFINE_TEST_ALLOC(kzalloc_node, kfree, 0, 1, 1);
+DEFINE_TEST_ALLOC(__vmalloc, vfree, 0, 1, 0);
+DEFINE_TEST_ALLOC(kvmalloc, kvfree, 0, 1, 0);
+DEFINE_TEST_ALLOC(kvmalloc_node, kvfree, 0, 1, 1);
+DEFINE_TEST_ALLOC(kvzalloc, kvfree, 0, 1, 0);
+DEFINE_TEST_ALLOC(kvzalloc_node, kvfree, 0, 1, 1);
+DEFINE_TEST_ALLOC(devm_kmalloc, devm_kfree, 1, 1, 0);
+DEFINE_TEST_ALLOC(devm_kzalloc, devm_kfree, 1, 1, 0);
+
+static void overflow_allocation_test(struct kunit *test)
+{
+ const char device_name[] = "overflow-test";
+ struct device *dev;
+ int count = 0;
+
+#define check_allocation_overflow(alloc) do { \
+ count++; \
+ test_ ## alloc(test, dev); \
+} while (0)
+
+ /* Create dummy device for devm_kmalloc()-family tests. */
+ dev = kunit_device_register(test, device_name);
+ KUNIT_ASSERT_FALSE_MSG(test, IS_ERR(dev),
+ "Cannot register test device\n");
+
+ check_allocation_overflow(kmalloc);
+ check_allocation_overflow(kmalloc_node);
+ check_allocation_overflow(kzalloc);
+ check_allocation_overflow(kzalloc_node);
+ check_allocation_overflow(__vmalloc);
+ check_allocation_overflow(kvmalloc);
+ check_allocation_overflow(kvmalloc_node);
+ check_allocation_overflow(kvzalloc);
+ check_allocation_overflow(kvzalloc_node);
+ check_allocation_overflow(devm_kmalloc);
+ check_allocation_overflow(devm_kzalloc);
+
+ kunit_info(test, "%d allocation overflow tests finished\n", count);
+#undef check_allocation_overflow
+}
+
+struct __test_flex_array {
+ unsigned long flags;
+ size_t count;
+ unsigned long data[];
+};
+
+static void overflow_size_helpers_test(struct kunit *test)
+{
+ /* Make sure struct_size() can be used in a constant expression. */
+ u8 ce_array[struct_size_t(struct __test_flex_array, data, 55)];
+ struct __test_flex_array *obj;
+ int count = 0;
+ int var;
+ volatile int unconst = 0;
+
+ /* Verify constant expression against runtime version. */
+ var = 55;
+ OPTIMIZER_HIDE_VAR(var);
+ KUNIT_EXPECT_EQ(test, sizeof(ce_array), struct_size(obj, data, var));
+
+#define check_one_size_helper(expected, func, args...) do { \
+ size_t _r = func(args); \
+ KUNIT_EXPECT_EQ_MSG(test, _r, expected, \
+ "expected " #func "(" #args ") to return %zu but got %zu instead\n", \
+ (size_t)(expected), _r); \
+ count++; \
+} while (0)
+
+ var = 4;
+ check_one_size_helper(20, size_mul, var++, 5);
+ check_one_size_helper(20, size_mul, 4, var++);
+ check_one_size_helper(0, size_mul, 0, 3);
+ check_one_size_helper(0, size_mul, 3, 0);
+ check_one_size_helper(6, size_mul, 2, 3);
+ check_one_size_helper(SIZE_MAX, size_mul, SIZE_MAX, 1);
+ check_one_size_helper(SIZE_MAX, size_mul, SIZE_MAX, 3);
+ check_one_size_helper(SIZE_MAX, size_mul, SIZE_MAX, -3);
+
+ var = 4;
+ check_one_size_helper(9, size_add, var++, 5);
+ check_one_size_helper(9, size_add, 4, var++);
+ check_one_size_helper(9, size_add, 9, 0);
+ check_one_size_helper(9, size_add, 0, 9);
+ check_one_size_helper(5, size_add, 2, 3);
+ check_one_size_helper(SIZE_MAX, size_add, SIZE_MAX, 1);
+ check_one_size_helper(SIZE_MAX, size_add, SIZE_MAX, 3);
+ check_one_size_helper(SIZE_MAX, size_add, SIZE_MAX, -3);
+
+ var = 4;
+ check_one_size_helper(1, size_sub, var--, 3);
+ check_one_size_helper(1, size_sub, 4, var--);
+ check_one_size_helper(1, size_sub, 3, 2);
+ check_one_size_helper(9, size_sub, 9, 0);
+ check_one_size_helper(SIZE_MAX, size_sub, 9, -3);
+ check_one_size_helper(SIZE_MAX, size_sub, 0, 9);
+ check_one_size_helper(SIZE_MAX, size_sub, 2, 3);
+ check_one_size_helper(SIZE_MAX, size_sub, SIZE_MAX, 0);
+ check_one_size_helper(SIZE_MAX, size_sub, SIZE_MAX, 10);
+ check_one_size_helper(SIZE_MAX, size_sub, 0, SIZE_MAX);
+ check_one_size_helper(SIZE_MAX, size_sub, 14, SIZE_MAX);
+ check_one_size_helper(SIZE_MAX - 2, size_sub, SIZE_MAX - 1, 1);
+ check_one_size_helper(SIZE_MAX - 4, size_sub, SIZE_MAX - 1, 3);
+ check_one_size_helper(1, size_sub, SIZE_MAX - 1, -3);
+
+ var = 4;
+ check_one_size_helper(4 * sizeof(*obj->data),
+ flex_array_size, obj, data, var++);
+ check_one_size_helper(5 * sizeof(*obj->data),
+ flex_array_size, obj, data, var++);
+ check_one_size_helper(0, flex_array_size, obj, data, 0 + unconst);
+ check_one_size_helper(sizeof(*obj->data),
+ flex_array_size, obj, data, 1 + unconst);
+ check_one_size_helper(7 * sizeof(*obj->data),
+ flex_array_size, obj, data, 7 + unconst);
+ check_one_size_helper(SIZE_MAX,
+ flex_array_size, obj, data, -1 + unconst);
+ check_one_size_helper(SIZE_MAX,
+ flex_array_size, obj, data, SIZE_MAX - 4 + unconst);
+
+ var = 4;
+ check_one_size_helper(sizeof(*obj) + (4 * sizeof(*obj->data)),
+ struct_size, obj, data, var++);
+ check_one_size_helper(sizeof(*obj) + (5 * sizeof(*obj->data)),
+ struct_size, obj, data, var++);
+ check_one_size_helper(sizeof(*obj), struct_size, obj, data, 0 + unconst);
+ check_one_size_helper(sizeof(*obj) + sizeof(*obj->data),
+ struct_size, obj, data, 1 + unconst);
+ check_one_size_helper(SIZE_MAX,
+ struct_size, obj, data, -3 + unconst);
+ check_one_size_helper(SIZE_MAX,
+ struct_size, obj, data, SIZE_MAX - 3 + unconst);
+
+ kunit_info(test, "%d overflow size helper tests finished\n", count);
+#undef check_one_size_helper
+}
+
+static void overflows_type_test(struct kunit *test)
+{
+ int count = 0;
+ unsigned int var;
+
+#define __TEST_OVERFLOWS_TYPE(func, arg1, arg2, of) do { \
+ bool __of = func(arg1, arg2); \
+ KUNIT_EXPECT_EQ_MSG(test, __of, of, \
+ "expected " #func "(" #arg1 ", " #arg2 " to%s overflow\n",\
+ of ? "" : " not"); \
+ count++; \
+} while (0)
+
+/* Args are: first type, second type, value, overflow expected */
+#define TEST_OVERFLOWS_TYPE(__t1, __t2, v, of) do { \
+ __t1 t1 = (v); \
+ __t2 t2; \
+ __TEST_OVERFLOWS_TYPE(__overflows_type, t1, t2, of); \
+ __TEST_OVERFLOWS_TYPE(__overflows_type, t1, __t2, of); \
+ __TEST_OVERFLOWS_TYPE(__overflows_type_constexpr, t1, t2, of); \
+ __TEST_OVERFLOWS_TYPE(__overflows_type_constexpr, t1, __t2, of);\
+} while (0)
+
+ TEST_OVERFLOWS_TYPE(u8, u8, U8_MAX, false);
+ TEST_OVERFLOWS_TYPE(u8, u16, U8_MAX, false);
+ TEST_OVERFLOWS_TYPE(u8, s8, U8_MAX, true);
+ TEST_OVERFLOWS_TYPE(u8, s8, S8_MAX, false);
+ TEST_OVERFLOWS_TYPE(u8, s8, (u8)S8_MAX + 1, true);
+ TEST_OVERFLOWS_TYPE(u8, s16, U8_MAX, false);
+ TEST_OVERFLOWS_TYPE(s8, u8, S8_MAX, false);
+ TEST_OVERFLOWS_TYPE(s8, u8, -1, true);
+ TEST_OVERFLOWS_TYPE(s8, u8, S8_MIN, true);
+ TEST_OVERFLOWS_TYPE(s8, u16, S8_MAX, false);
+ TEST_OVERFLOWS_TYPE(s8, u16, -1, true);
+ TEST_OVERFLOWS_TYPE(s8, u16, S8_MIN, true);
+ TEST_OVERFLOWS_TYPE(s8, u32, S8_MAX, false);
+ TEST_OVERFLOWS_TYPE(s8, u32, -1, true);
+ TEST_OVERFLOWS_TYPE(s8, u32, S8_MIN, true);
+#if BITS_PER_LONG == 64
+ TEST_OVERFLOWS_TYPE(s8, u64, S8_MAX, false);
+ TEST_OVERFLOWS_TYPE(s8, u64, -1, true);
+ TEST_OVERFLOWS_TYPE(s8, u64, S8_MIN, true);
+#endif
+ TEST_OVERFLOWS_TYPE(s8, s8, S8_MAX, false);
+ TEST_OVERFLOWS_TYPE(s8, s8, S8_MIN, false);
+ TEST_OVERFLOWS_TYPE(s8, s16, S8_MAX, false);
+ TEST_OVERFLOWS_TYPE(s8, s16, S8_MIN, false);
+ TEST_OVERFLOWS_TYPE(u16, u8, U8_MAX, false);
+ TEST_OVERFLOWS_TYPE(u16, u8, (u16)U8_MAX + 1, true);
+ TEST_OVERFLOWS_TYPE(u16, u8, U16_MAX, true);
+ TEST_OVERFLOWS_TYPE(u16, s8, S8_MAX, false);
+ TEST_OVERFLOWS_TYPE(u16, s8, (u16)S8_MAX + 1, true);
+ TEST_OVERFLOWS_TYPE(u16, s8, U16_MAX, true);
+ TEST_OVERFLOWS_TYPE(u16, s16, S16_MAX, false);
+ TEST_OVERFLOWS_TYPE(u16, s16, (u16)S16_MAX + 1, true);
+ TEST_OVERFLOWS_TYPE(u16, s16, U16_MAX, true);
+ TEST_OVERFLOWS_TYPE(u16, u32, U16_MAX, false);
+ TEST_OVERFLOWS_TYPE(u16, s32, U16_MAX, false);
+ TEST_OVERFLOWS_TYPE(s16, u8, U8_MAX, false);
+ TEST_OVERFLOWS_TYPE(s16, u8, (s16)U8_MAX + 1, true);
+ TEST_OVERFLOWS_TYPE(s16, u8, -1, true);
+ TEST_OVERFLOWS_TYPE(s16, u8, S16_MIN, true);
+ TEST_OVERFLOWS_TYPE(s16, u16, S16_MAX, false);
+ TEST_OVERFLOWS_TYPE(s16, u16, -1, true);
+ TEST_OVERFLOWS_TYPE(s16, u16, S16_MIN, true);
+ TEST_OVERFLOWS_TYPE(s16, u32, S16_MAX, false);
+ TEST_OVERFLOWS_TYPE(s16, u32, -1, true);
+ TEST_OVERFLOWS_TYPE(s16, u32, S16_MIN, true);
+#if BITS_PER_LONG == 64
+ TEST_OVERFLOWS_TYPE(s16, u64, S16_MAX, false);
+ TEST_OVERFLOWS_TYPE(s16, u64, -1, true);
+ TEST_OVERFLOWS_TYPE(s16, u64, S16_MIN, true);
+#endif
+ TEST_OVERFLOWS_TYPE(s16, s8, S8_MAX, false);
+ TEST_OVERFLOWS_TYPE(s16, s8, S8_MIN, false);
+ TEST_OVERFLOWS_TYPE(s16, s8, (s16)S8_MAX + 1, true);
+ TEST_OVERFLOWS_TYPE(s16, s8, (s16)S8_MIN - 1, true);
+ TEST_OVERFLOWS_TYPE(s16, s8, S16_MAX, true);
+ TEST_OVERFLOWS_TYPE(s16, s8, S16_MIN, true);
+ TEST_OVERFLOWS_TYPE(s16, s16, S16_MAX, false);
+ TEST_OVERFLOWS_TYPE(s16, s16, S16_MIN, false);
+ TEST_OVERFLOWS_TYPE(s16, s32, S16_MAX, false);
+ TEST_OVERFLOWS_TYPE(s16, s32, S16_MIN, false);
+ TEST_OVERFLOWS_TYPE(u32, u8, U8_MAX, false);
+ TEST_OVERFLOWS_TYPE(u32, u8, (u32)U8_MAX + 1, true);
+ TEST_OVERFLOWS_TYPE(u32, u8, U32_MAX, true);
+ TEST_OVERFLOWS_TYPE(u32, s8, S8_MAX, false);
+ TEST_OVERFLOWS_TYPE(u32, s8, (u32)S8_MAX + 1, true);
+ TEST_OVERFLOWS_TYPE(u32, s8, U32_MAX, true);
+ TEST_OVERFLOWS_TYPE(u32, u16, U16_MAX, false);
+ TEST_OVERFLOWS_TYPE(u32, u16, U16_MAX + 1, true);
+ TEST_OVERFLOWS_TYPE(u32, u16, U32_MAX, true);
+ TEST_OVERFLOWS_TYPE(u32, s16, S16_MAX, false);
+ TEST_OVERFLOWS_TYPE(u32, s16, (u32)S16_MAX + 1, true);
+ TEST_OVERFLOWS_TYPE(u32, s16, U32_MAX, true);
+ TEST_OVERFLOWS_TYPE(u32, u32, U32_MAX, false);
+ TEST_OVERFLOWS_TYPE(u32, s32, S32_MAX, false);
+ TEST_OVERFLOWS_TYPE(u32, s32, U32_MAX, true);
+ TEST_OVERFLOWS_TYPE(u32, s32, (u32)S32_MAX + 1, true);
+#if BITS_PER_LONG == 64
+ TEST_OVERFLOWS_TYPE(u32, u64, U32_MAX, false);
+ TEST_OVERFLOWS_TYPE(u32, s64, U32_MAX, false);
+#endif
+ TEST_OVERFLOWS_TYPE(s32, u8, U8_MAX, false);
+ TEST_OVERFLOWS_TYPE(s32, u8, (s32)U8_MAX + 1, true);
+ TEST_OVERFLOWS_TYPE(s32, u16, S32_MAX, true);
+ TEST_OVERFLOWS_TYPE(s32, u8, -1, true);
+ TEST_OVERFLOWS_TYPE(s32, u8, S32_MIN, true);
+ TEST_OVERFLOWS_TYPE(s32, u16, U16_MAX, false);
+ TEST_OVERFLOWS_TYPE(s32, u16, (s32)U16_MAX + 1, true);
+ TEST_OVERFLOWS_TYPE(s32, u16, S32_MAX, true);
+ TEST_OVERFLOWS_TYPE(s32, u16, -1, true);
+ TEST_OVERFLOWS_TYPE(s32, u16, S32_MIN, true);
+ TEST_OVERFLOWS_TYPE(s32, u32, S32_MAX, false);
+ TEST_OVERFLOWS_TYPE(s32, u32, -1, true);
+ TEST_OVERFLOWS_TYPE(s32, u32, S32_MIN, true);
+#if BITS_PER_LONG == 64
+ TEST_OVERFLOWS_TYPE(s32, u64, S32_MAX, false);
+ TEST_OVERFLOWS_TYPE(s32, u64, -1, true);
+ TEST_OVERFLOWS_TYPE(s32, u64, S32_MIN, true);
+#endif
+ TEST_OVERFLOWS_TYPE(s32, s8, S8_MAX, false);
+ TEST_OVERFLOWS_TYPE(s32, s8, S8_MIN, false);
+ TEST_OVERFLOWS_TYPE(s32, s8, (s32)S8_MAX + 1, true);
+ TEST_OVERFLOWS_TYPE(s32, s8, (s32)S8_MIN - 1, true);
+ TEST_OVERFLOWS_TYPE(s32, s8, S32_MAX, true);
+ TEST_OVERFLOWS_TYPE(s32, s8, S32_MIN, true);
+ TEST_OVERFLOWS_TYPE(s32, s16, S16_MAX, false);
+ TEST_OVERFLOWS_TYPE(s32, s16, S16_MIN, false);
+ TEST_OVERFLOWS_TYPE(s32, s16, (s32)S16_MAX + 1, true);
+ TEST_OVERFLOWS_TYPE(s32, s16, (s32)S16_MIN - 1, true);
+ TEST_OVERFLOWS_TYPE(s32, s16, S32_MAX, true);
+ TEST_OVERFLOWS_TYPE(s32, s16, S32_MIN, true);
+ TEST_OVERFLOWS_TYPE(s32, s32, S32_MAX, false);
+ TEST_OVERFLOWS_TYPE(s32, s32, S32_MIN, false);
+#if BITS_PER_LONG == 64
+ TEST_OVERFLOWS_TYPE(s32, s64, S32_MAX, false);
+ TEST_OVERFLOWS_TYPE(s32, s64, S32_MIN, false);
+ TEST_OVERFLOWS_TYPE(u64, u8, U64_MAX, true);
+ TEST_OVERFLOWS_TYPE(u64, u8, U8_MAX, false);
+ TEST_OVERFLOWS_TYPE(u64, u8, (u64)U8_MAX + 1, true);
+ TEST_OVERFLOWS_TYPE(u64, u16, U64_MAX, true);
+ TEST_OVERFLOWS_TYPE(u64, u16, U16_MAX, false);
+ TEST_OVERFLOWS_TYPE(u64, u16, (u64)U16_MAX + 1, true);
+ TEST_OVERFLOWS_TYPE(u64, u32, U64_MAX, true);
+ TEST_OVERFLOWS_TYPE(u64, u32, U32_MAX, false);
+ TEST_OVERFLOWS_TYPE(u64, u32, (u64)U32_MAX + 1, true);
+ TEST_OVERFLOWS_TYPE(u64, u64, U64_MAX, false);
+ TEST_OVERFLOWS_TYPE(u64, s8, S8_MAX, false);
+ TEST_OVERFLOWS_TYPE(u64, s8, (u64)S8_MAX + 1, true);
+ TEST_OVERFLOWS_TYPE(u64, s8, U64_MAX, true);
+ TEST_OVERFLOWS_TYPE(u64, s16, S16_MAX, false);
+ TEST_OVERFLOWS_TYPE(u64, s16, (u64)S16_MAX + 1, true);
+ TEST_OVERFLOWS_TYPE(u64, s16, U64_MAX, true);
+ TEST_OVERFLOWS_TYPE(u64, s32, S32_MAX, false);
+ TEST_OVERFLOWS_TYPE(u64, s32, (u64)S32_MAX + 1, true);
+ TEST_OVERFLOWS_TYPE(u64, s32, U64_MAX, true);
+ TEST_OVERFLOWS_TYPE(u64, s64, S64_MAX, false);
+ TEST_OVERFLOWS_TYPE(u64, s64, U64_MAX, true);
+ TEST_OVERFLOWS_TYPE(u64, s64, (u64)S64_MAX + 1, true);
+ TEST_OVERFLOWS_TYPE(s64, u8, S64_MAX, true);
+ TEST_OVERFLOWS_TYPE(s64, u8, S64_MIN, true);
+ TEST_OVERFLOWS_TYPE(s64, u8, -1, true);
+ TEST_OVERFLOWS_TYPE(s64, u8, U8_MAX, false);
+ TEST_OVERFLOWS_TYPE(s64, u8, (s64)U8_MAX + 1, true);
+ TEST_OVERFLOWS_TYPE(s64, u16, S64_MAX, true);
+ TEST_OVERFLOWS_TYPE(s64, u16, S64_MIN, true);
+ TEST_OVERFLOWS_TYPE(s64, u16, -1, true);
+ TEST_OVERFLOWS_TYPE(s64, u16, U16_MAX, false);
+ TEST_OVERFLOWS_TYPE(s64, u16, (s64)U16_MAX + 1, true);
+ TEST_OVERFLOWS_TYPE(s64, u32, S64_MAX, true);
+ TEST_OVERFLOWS_TYPE(s64, u32, S64_MIN, true);
+ TEST_OVERFLOWS_TYPE(s64, u32, -1, true);
+ TEST_OVERFLOWS_TYPE(s64, u32, U32_MAX, false);
+ TEST_OVERFLOWS_TYPE(s64, u32, (s64)U32_MAX + 1, true);
+ TEST_OVERFLOWS_TYPE(s64, u64, S64_MAX, false);
+ TEST_OVERFLOWS_TYPE(s64, u64, S64_MIN, true);
+ TEST_OVERFLOWS_TYPE(s64, u64, -1, true);
+ TEST_OVERFLOWS_TYPE(s64, s8, S8_MAX, false);
+ TEST_OVERFLOWS_TYPE(s64, s8, S8_MIN, false);
+ TEST_OVERFLOWS_TYPE(s64, s8, (s64)S8_MAX + 1, true);
+ TEST_OVERFLOWS_TYPE(s64, s8, (s64)S8_MIN - 1, true);
+ TEST_OVERFLOWS_TYPE(s64, s8, S64_MAX, true);
+ TEST_OVERFLOWS_TYPE(s64, s16, S16_MAX, false);
+ TEST_OVERFLOWS_TYPE(s64, s16, S16_MIN, false);
+ TEST_OVERFLOWS_TYPE(s64, s16, (s64)S16_MAX + 1, true);
+ TEST_OVERFLOWS_TYPE(s64, s16, (s64)S16_MIN - 1, true);
+ TEST_OVERFLOWS_TYPE(s64, s16, S64_MAX, true);
+ TEST_OVERFLOWS_TYPE(s64, s32, S32_MAX, false);
+ TEST_OVERFLOWS_TYPE(s64, s32, S32_MIN, false);
+ TEST_OVERFLOWS_TYPE(s64, s32, (s64)S32_MAX + 1, true);
+ TEST_OVERFLOWS_TYPE(s64, s32, (s64)S32_MIN - 1, true);
+ TEST_OVERFLOWS_TYPE(s64, s32, S64_MAX, true);
+ TEST_OVERFLOWS_TYPE(s64, s64, S64_MAX, false);
+ TEST_OVERFLOWS_TYPE(s64, s64, S64_MIN, false);
+#endif
+
+ /* Check for macro side-effects. */
+ var = INT_MAX - 1;
+ __TEST_OVERFLOWS_TYPE(__overflows_type, var++, int, false);
+ __TEST_OVERFLOWS_TYPE(__overflows_type, var++, int, false);
+ __TEST_OVERFLOWS_TYPE(__overflows_type, var++, int, true);
+ var = INT_MAX - 1;
+ __TEST_OVERFLOWS_TYPE(overflows_type, var++, int, false);
+ __TEST_OVERFLOWS_TYPE(overflows_type, var++, int, false);
+ __TEST_OVERFLOWS_TYPE(overflows_type, var++, int, true);
+
+ kunit_info(test, "%d overflows_type() tests finished\n", count);
+#undef TEST_OVERFLOWS_TYPE
+#undef __TEST_OVERFLOWS_TYPE
+}
+
+static void same_type_test(struct kunit *test)
+{
+ int count = 0;
+ int var;
+
+#define TEST_SAME_TYPE(t1, t2, same) do { \
+ typeof(t1) __t1h = type_max(t1); \
+ typeof(t1) __t1l = type_min(t1); \
+ typeof(t2) __t2h = type_max(t2); \
+ typeof(t2) __t2l = type_min(t2); \
+ KUNIT_EXPECT_EQ(test, true, __same_type(t1, __t1h)); \
+ KUNIT_EXPECT_EQ(test, true, __same_type(t1, __t1l)); \
+ KUNIT_EXPECT_EQ(test, true, __same_type(__t1h, t1)); \
+ KUNIT_EXPECT_EQ(test, true, __same_type(__t1l, t1)); \
+ KUNIT_EXPECT_EQ(test, true, __same_type(t2, __t2h)); \
+ KUNIT_EXPECT_EQ(test, true, __same_type(t2, __t2l)); \
+ KUNIT_EXPECT_EQ(test, true, __same_type(__t2h, t2)); \
+ KUNIT_EXPECT_EQ(test, true, __same_type(__t2l, t2)); \
+ KUNIT_EXPECT_EQ(test, same, __same_type(t1, t2)); \
+ KUNIT_EXPECT_EQ(test, same, __same_type(t2, __t1h)); \
+ KUNIT_EXPECT_EQ(test, same, __same_type(t2, __t1l)); \
+ KUNIT_EXPECT_EQ(test, same, __same_type(__t1h, t2)); \
+ KUNIT_EXPECT_EQ(test, same, __same_type(__t1l, t2)); \
+ KUNIT_EXPECT_EQ(test, same, __same_type(t1, __t2h)); \
+ KUNIT_EXPECT_EQ(test, same, __same_type(t1, __t2l)); \
+ KUNIT_EXPECT_EQ(test, same, __same_type(__t2h, t1)); \
+ KUNIT_EXPECT_EQ(test, same, __same_type(__t2l, t1)); \
+} while (0)
+
+#if BITS_PER_LONG == 64
+# define TEST_SAME_TYPE64(base, t, m) TEST_SAME_TYPE(base, t, m)
+#else
+# define TEST_SAME_TYPE64(base, t, m) do { } while (0)
+#endif
+
+#define TEST_TYPE_SETS(base, mu8, mu16, mu32, ms8, ms16, ms32, mu64, ms64) \
+do { \
+ TEST_SAME_TYPE(base, u8, mu8); \
+ TEST_SAME_TYPE(base, u16, mu16); \
+ TEST_SAME_TYPE(base, u32, mu32); \
+ TEST_SAME_TYPE(base, s8, ms8); \
+ TEST_SAME_TYPE(base, s16, ms16); \
+ TEST_SAME_TYPE(base, s32, ms32); \
+ TEST_SAME_TYPE64(base, u64, mu64); \
+ TEST_SAME_TYPE64(base, s64, ms64); \
+} while (0)
+
+ TEST_TYPE_SETS(u8, true, false, false, false, false, false, false, false);
+ TEST_TYPE_SETS(u16, false, true, false, false, false, false, false, false);
+ TEST_TYPE_SETS(u32, false, false, true, false, false, false, false, false);
+ TEST_TYPE_SETS(s8, false, false, false, true, false, false, false, false);
+ TEST_TYPE_SETS(s16, false, false, false, false, true, false, false, false);
+ TEST_TYPE_SETS(s32, false, false, false, false, false, true, false, false);
+#if BITS_PER_LONG == 64
+ TEST_TYPE_SETS(u64, false, false, false, false, false, false, true, false);
+ TEST_TYPE_SETS(s64, false, false, false, false, false, false, false, true);
+#endif
+
+ /* Check for macro side-effects. */
+ var = 4;
+ KUNIT_EXPECT_EQ(test, var, 4);
+ KUNIT_EXPECT_TRUE(test, __same_type(var++, int));
+ KUNIT_EXPECT_EQ(test, var, 4);
+ KUNIT_EXPECT_TRUE(test, __same_type(int, var++));
+ KUNIT_EXPECT_EQ(test, var, 4);
+ KUNIT_EXPECT_TRUE(test, __same_type(var++, var++));
+ KUNIT_EXPECT_EQ(test, var, 4);
+
+ kunit_info(test, "%d __same_type() tests finished\n", count);
+
+#undef TEST_TYPE_SETS
+#undef TEST_SAME_TYPE64
+#undef TEST_SAME_TYPE
+}
+
+static void castable_to_type_test(struct kunit *test)
+{
+ int count = 0;
+
+#define TEST_CASTABLE_TO_TYPE(arg1, arg2, pass) do { \
+ bool __pass = castable_to_type(arg1, arg2); \
+ KUNIT_EXPECT_EQ_MSG(test, __pass, pass, \
+ "expected castable_to_type(" #arg1 ", " #arg2 ") to%s pass\n",\
+ pass ? "" : " not"); \
+ count++; \
+} while (0)
+
+ TEST_CASTABLE_TO_TYPE(16, u8, true);
+ TEST_CASTABLE_TO_TYPE(16, u16, true);
+ TEST_CASTABLE_TO_TYPE(16, u32, true);
+ TEST_CASTABLE_TO_TYPE(16, s8, true);
+ TEST_CASTABLE_TO_TYPE(16, s16, true);
+ TEST_CASTABLE_TO_TYPE(16, s32, true);
+ TEST_CASTABLE_TO_TYPE(-16, s8, true);
+ TEST_CASTABLE_TO_TYPE(-16, s16, true);
+ TEST_CASTABLE_TO_TYPE(-16, s32, true);
+#if BITS_PER_LONG == 64
+ TEST_CASTABLE_TO_TYPE(16, u64, true);
+ TEST_CASTABLE_TO_TYPE(-16, s64, true);
+#endif
+
+#define TEST_CASTABLE_TO_TYPE_VAR(width) do { \
+ u ## width u ## width ## var = 0; \
+ s ## width s ## width ## var = 0; \
+ \
+ /* Constant expressions that fit types. */ \
+ TEST_CASTABLE_TO_TYPE(type_max(u ## width), u ## width, true); \
+ TEST_CASTABLE_TO_TYPE(type_min(u ## width), u ## width, true); \
+ TEST_CASTABLE_TO_TYPE(type_max(u ## width), u ## width ## var, true); \
+ TEST_CASTABLE_TO_TYPE(type_min(u ## width), u ## width ## var, true); \
+ TEST_CASTABLE_TO_TYPE(type_max(s ## width), s ## width, true); \
+ TEST_CASTABLE_TO_TYPE(type_min(s ## width), s ## width, true); \
+ TEST_CASTABLE_TO_TYPE(type_max(s ## width), s ## width ## var, true); \
+ TEST_CASTABLE_TO_TYPE(type_min(u ## width), s ## width ## var, true); \
+ /* Constant expressions that do not fit types. */ \
+ TEST_CASTABLE_TO_TYPE(type_max(u ## width), s ## width, false); \
+ TEST_CASTABLE_TO_TYPE(type_max(u ## width), s ## width ## var, false); \
+ TEST_CASTABLE_TO_TYPE(type_min(s ## width), u ## width, false); \
+ TEST_CASTABLE_TO_TYPE(type_min(s ## width), u ## width ## var, false); \
+ /* Non-constant expression with mismatched type. */ \
+ TEST_CASTABLE_TO_TYPE(s ## width ## var, u ## width, false); \
+ TEST_CASTABLE_TO_TYPE(u ## width ## var, s ## width, false); \
+} while (0)
+
+#define TEST_CASTABLE_TO_TYPE_RANGE(width) do { \
+ unsigned long big = U ## width ## _MAX; \
+ signed long small = S ## width ## _MIN; \
+ u ## width u ## width ## var = 0; \
+ s ## width s ## width ## var = 0; \
+ \
+ /* Constant expression in range. */ \
+ TEST_CASTABLE_TO_TYPE(U ## width ## _MAX, u ## width, true); \
+ TEST_CASTABLE_TO_TYPE(U ## width ## _MAX, u ## width ## var, true); \
+ TEST_CASTABLE_TO_TYPE(S ## width ## _MIN, s ## width, true); \
+ TEST_CASTABLE_TO_TYPE(S ## width ## _MIN, s ## width ## var, true); \
+ /* Constant expression out of range. */ \
+ TEST_CASTABLE_TO_TYPE((unsigned long)U ## width ## _MAX + 1, u ## width, false); \
+ TEST_CASTABLE_TO_TYPE((unsigned long)U ## width ## _MAX + 1, u ## width ## var, false); \
+ TEST_CASTABLE_TO_TYPE((signed long)S ## width ## _MIN - 1, s ## width, false); \
+ TEST_CASTABLE_TO_TYPE((signed long)S ## width ## _MIN - 1, s ## width ## var, false); \
+ /* Non-constant expression with mismatched type. */ \
+ TEST_CASTABLE_TO_TYPE(big, u ## width, false); \
+ TEST_CASTABLE_TO_TYPE(big, u ## width ## var, false); \
+ TEST_CASTABLE_TO_TYPE(small, s ## width, false); \
+ TEST_CASTABLE_TO_TYPE(small, s ## width ## var, false); \
+} while (0)
+
+ TEST_CASTABLE_TO_TYPE_VAR(8);
+ TEST_CASTABLE_TO_TYPE_VAR(16);
+ TEST_CASTABLE_TO_TYPE_VAR(32);
+#if BITS_PER_LONG == 64
+ TEST_CASTABLE_TO_TYPE_VAR(64);
+#endif
+
+ TEST_CASTABLE_TO_TYPE_RANGE(8);
+ TEST_CASTABLE_TO_TYPE_RANGE(16);
+#if BITS_PER_LONG == 64
+ TEST_CASTABLE_TO_TYPE_RANGE(32);
+#endif
+ kunit_info(test, "%d castable_to_type() tests finished\n", count);
+
+#undef TEST_CASTABLE_TO_TYPE_RANGE
+#undef TEST_CASTABLE_TO_TYPE_VAR
+#undef TEST_CASTABLE_TO_TYPE
+}
+
+static struct kunit_case overflow_test_cases[] = {
+ KUNIT_CASE(u8_u8__u8_overflow_test),
+ KUNIT_CASE(s8_s8__s8_overflow_test),
+ KUNIT_CASE(u16_u16__u16_overflow_test),
+ KUNIT_CASE(s16_s16__s16_overflow_test),
+ KUNIT_CASE(u32_u32__u32_overflow_test),
+ KUNIT_CASE(s32_s32__s32_overflow_test),
+ KUNIT_CASE(u64_u64__u64_overflow_test),
+ KUNIT_CASE(s64_s64__s64_overflow_test),
+ KUNIT_CASE(u32_u32__int_overflow_test),
+ KUNIT_CASE(u32_u32__u8_overflow_test),
+ KUNIT_CASE(u8_u8__int_overflow_test),
+ KUNIT_CASE(int_int__u8_overflow_test),
+ KUNIT_CASE(shift_sane_test),
+ KUNIT_CASE(shift_overflow_test),
+ KUNIT_CASE(shift_truncate_test),
+ KUNIT_CASE(shift_nonsense_test),
+ KUNIT_CASE(overflow_allocation_test),
+ KUNIT_CASE(overflow_size_helpers_test),
+ KUNIT_CASE(overflows_type_test),
+ KUNIT_CASE(same_type_test),
+ KUNIT_CASE(castable_to_type_test),
+ {}
+};
+
+static struct kunit_suite overflow_test_suite = {
+ .name = "overflow",
+ .test_cases = overflow_test_cases,
+};
+
+kunit_test_suite(overflow_test_suite);
+
+MODULE_LICENSE("Dual MIT/GPL");
diff --git a/lib/packing.c b/lib/packing.c
index 50d1e9f2f5a7..3f656167c17e 100644
--- a/lib/packing.c
+++ b/lib/packing.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
-/* Copyright (c) 2016-2018, NXP Semiconductors
+/* Copyright 2016-2018 NXP
* Copyright (c) 2018-2019, Vladimir Oltean <olteanv@gmail.com>
*/
#include <linux/packing.h>
@@ -7,6 +7,7 @@
#include <linux/bitops.h>
#include <linux/errno.h>
#include <linux/types.h>
+#include <linux/bitrev.h>
static int get_le_offset(int offset)
{
@@ -29,19 +30,6 @@ static int get_reverse_lsw32_offset(int offset, size_t len)
return word_index * 4 + offset;
}
-static u64 bit_reverse(u64 val, unsigned int width)
-{
- u64 new_val = 0;
- unsigned int bit;
- unsigned int i;
-
- for (i = 0; i < width; i++) {
- bit = (val & (1 << i)) != 0;
- new_val |= (bit << (width - i - 1));
- }
- return new_val;
-}
-
static void adjust_for_msb_right_quirk(u64 *to_write, int *box_start_bit,
int *box_end_bit, u8 *box_mask)
{
@@ -49,7 +37,7 @@ static void adjust_for_msb_right_quirk(u64 *to_write, int *box_start_bit,
int new_box_start_bit, new_box_end_bit;
*to_write >>= *box_end_bit;
- *to_write = bit_reverse(*to_write, box_bit_width);
+ *to_write = bitrev8(*to_write) >> (8 - box_bit_width);
*to_write <<= *box_end_bit;
new_box_end_bit = box_bit_width - *box_start_bit - 1;
@@ -73,6 +61,7 @@ static void adjust_for_msb_right_quirk(u64 *to_write, int *box_start_bit,
* @endbit: The index (in logical notation, compensated for quirks) where
* the packed value ends within pbuf. Must be smaller than, or equal
* to, startbit.
+ * @pbuflen: The length in bytes of the packed buffer pointed to by @pbuf.
* @op: If PACK, then uval will be treated as const pointer and copied (packed)
* into pbuf, between startbit and endbit.
* If UNPACK, then pbuf will be treated as const pointer and the logical
@@ -209,5 +198,4 @@ int packing(void *pbuf, u64 *uval, int startbit, int endbit, size_t pbuflen,
}
EXPORT_SYMBOL(packing);
-MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Generic bitfield packing and unpacking");
diff --git a/lib/parman.c b/lib/parman.c
index c6e42a8db824..3f8f8d422e62 100644
--- a/lib/parman.c
+++ b/lib/parman.c
@@ -85,7 +85,6 @@ static int parman_shrink(struct parman *parman)
}
static bool parman_prio_used(struct parman_prio *prio)
-
{
return !list_empty(&prio->item_list);
}
@@ -298,7 +297,7 @@ EXPORT_SYMBOL(parman_destroy);
* parman_prio_init - initializes a parman priority chunk
* @parman: parman instance
* @prio: parman prio structure to be initialized
- * @prority: desired priority of the chunk
+ * @priority: desired priority of the chunk
*
* Note: all locking must be provided by the caller.
*
@@ -357,7 +356,7 @@ int parman_item_add(struct parman *parman, struct parman_prio *prio,
EXPORT_SYMBOL(parman_item_add);
/**
- * parman_item_del - deletes parman item
+ * parman_item_remove - deletes parman item
* @parman: parman instance
* @prio: parman prio instance to delete the item from
* @item: parman item instance
diff --git a/lib/parser.c b/lib/parser.c
index f5b3e5d7a7f9..f4eafb9d74e6 100644
--- a/lib/parser.c
+++ b/lib/parser.c
@@ -6,12 +6,22 @@
#include <linux/ctype.h>
#include <linux/types.h>
#include <linux/export.h>
+#include <linux/kstrtox.h>
#include <linux/parser.h>
#include <linux/slab.h>
#include <linux/string.h>
+/*
+ * max size needed by different bases to express U64
+ * HEX: "0xFFFFFFFFFFFFFFFF" --> 18
+ * DEC: "18446744073709551615" --> 20
+ * OCT: "01777777777777777777777" --> 23
+ * pick the max one to define NUMBER_BUF_LEN
+ */
+#define NUMBER_BUF_LEN 24
+
/**
- * match_one: - Determines if a string matches a simple pattern
+ * match_one - Determines if a string matches a simple pattern
* @s: the string to examine for presence of the pattern
* @p: the string containing the pattern
* @args: array of %MAX_OPT_ARGS &substring_t elements. Used to return match
@@ -89,7 +99,7 @@ static int match_one(char *s, const char *p, substring_t args[])
}
/**
- * match_token: - Find a token (and optional args) in a string
+ * match_token - Find a token (and optional args) in a string
* @s: the string to examine for token/argument pairs
* @table: match_table_t describing the set of allowed option tokens and the
* arguments that may be associated with them. Must be terminated with a
@@ -98,7 +108,7 @@ static int match_one(char *s, const char *p, substring_t args[])
* locations.
*
* Description: Detects which if any of a set of token strings has been passed
- * to it. Tokens can include up to MAX_OPT_ARGS instances of basic c-style
+ * to it. Tokens can include up to %MAX_OPT_ARGS instances of basic c-style
* format identifiers which will be taken into account when matching the
* tokens, and whose locations will be returned in the @args array.
*/
@@ -114,26 +124,26 @@ int match_token(char *s, const match_table_t table, substring_t args[])
EXPORT_SYMBOL(match_token);
/**
- * match_number: scan a number in the given base from a substring_t
+ * match_number - scan a number in the given base from a substring_t
* @s: substring to be scanned
* @result: resulting integer on success
* @base: base to use when converting string
*
* Description: Given a &substring_t and a base, attempts to parse the substring
- * as a number in that base. On success, sets @result to the integer represented
- * by the string and returns 0. Returns -ENOMEM, -EINVAL, or -ERANGE on failure.
+ * as a number in that base.
+ *
+ * Return: On success, sets @result to the integer represented by the
+ * string and returns 0. Returns -EINVAL or -ERANGE on failure.
*/
static int match_number(substring_t *s, int *result, int base)
{
char *endp;
- char *buf;
+ char buf[NUMBER_BUF_LEN];
int ret;
long val;
- buf = match_strdup(s);
- if (!buf)
- return -ENOMEM;
-
+ if (match_strlcpy(buf, s, NUMBER_BUF_LEN) >= NUMBER_BUF_LEN)
+ return -ERANGE;
ret = 0;
val = simple_strtol(buf, &endp, base);
if (endp == buf)
@@ -142,45 +152,44 @@ static int match_number(substring_t *s, int *result, int base)
ret = -ERANGE;
else
*result = (int) val;
- kfree(buf);
return ret;
}
/**
- * match_u64int: scan a number in the given base from a substring_t
+ * match_u64int - scan a number in the given base from a substring_t
* @s: substring to be scanned
* @result: resulting u64 on success
* @base: base to use when converting string
*
* Description: Given a &substring_t and a base, attempts to parse the substring
- * as a number in that base. On success, sets @result to the integer represented
- * by the string and returns 0. Returns -ENOMEM, -EINVAL, or -ERANGE on failure.
+ * as a number in that base.
+ *
+ * Return: On success, sets @result to the integer represented by the
+ * string and returns 0. Returns -EINVAL or -ERANGE on failure.
*/
static int match_u64int(substring_t *s, u64 *result, int base)
{
- char *buf;
+ char buf[NUMBER_BUF_LEN];
int ret;
u64 val;
- buf = match_strdup(s);
- if (!buf)
- return -ENOMEM;
-
+ if (match_strlcpy(buf, s, NUMBER_BUF_LEN) >= NUMBER_BUF_LEN)
+ return -ERANGE;
ret = kstrtoull(buf, base, &val);
if (!ret)
*result = val;
- kfree(buf);
return ret;
}
/**
- * match_int: - scan a decimal representation of an integer from a substring_t
+ * match_int - scan a decimal representation of an integer from a substring_t
* @s: substring_t to be scanned
* @result: resulting integer on success
*
- * Description: Attempts to parse the &substring_t @s as a decimal integer. On
- * success, sets @result to the integer represented by the string and returns 0.
- * Returns -ENOMEM, -EINVAL, or -ERANGE on failure.
+ * Description: Attempts to parse the &substring_t @s as a decimal integer.
+ *
+ * Return: On success, sets @result to the integer represented by the string
+ * and returns 0. Returns -EINVAL or -ERANGE on failure.
*/
int match_int(substring_t *s, int *result)
{
@@ -189,15 +198,37 @@ int match_int(substring_t *s, int *result)
EXPORT_SYMBOL(match_int);
/**
- * match_u64: - scan a decimal representation of a u64 from
+ * match_uint - scan a decimal representation of an integer from a substring_t
+ * @s: substring_t to be scanned
+ * @result: resulting integer on success
+ *
+ * Description: Attempts to parse the &substring_t @s as a decimal integer.
+ *
+ * Return: On success, sets @result to the integer represented by the string
+ * and returns 0. Returns -EINVAL or -ERANGE on failure.
+ */
+int match_uint(substring_t *s, unsigned int *result)
+{
+ char buf[NUMBER_BUF_LEN];
+
+ if (match_strlcpy(buf, s, NUMBER_BUF_LEN) >= NUMBER_BUF_LEN)
+ return -ERANGE;
+
+ return kstrtouint(buf, 10, result);
+}
+EXPORT_SYMBOL(match_uint);
+
+/**
+ * match_u64 - scan a decimal representation of a u64 from
* a substring_t
* @s: substring_t to be scanned
* @result: resulting unsigned long long on success
*
* Description: Attempts to parse the &substring_t @s as a long decimal
- * integer. On success, sets @result to the integer represented by the
- * string and returns 0.
- * Returns -ENOMEM, -EINVAL, or -ERANGE on failure.
+ * integer.
+ *
+ * Return: On success, sets @result to the integer represented by the string
+ * and returns 0. Returns -EINVAL or -ERANGE on failure.
*/
int match_u64(substring_t *s, u64 *result)
{
@@ -206,13 +237,14 @@ int match_u64(substring_t *s, u64 *result)
EXPORT_SYMBOL(match_u64);
/**
- * match_octal: - scan an octal representation of an integer from a substring_t
+ * match_octal - scan an octal representation of an integer from a substring_t
* @s: substring_t to be scanned
* @result: resulting integer on success
*
- * Description: Attempts to parse the &substring_t @s as an octal integer. On
- * success, sets @result to the integer represented by the string and returns
- * 0. Returns -ENOMEM, -EINVAL, or -ERANGE on failure.
+ * Description: Attempts to parse the &substring_t @s as an octal integer.
+ *
+ * Return: On success, sets @result to the integer represented by the string
+ * and returns 0. Returns -EINVAL or -ERANGE on failure.
*/
int match_octal(substring_t *s, int *result)
{
@@ -221,13 +253,14 @@ int match_octal(substring_t *s, int *result)
EXPORT_SYMBOL(match_octal);
/**
- * match_hex: - scan a hex representation of an integer from a substring_t
+ * match_hex - scan a hex representation of an integer from a substring_t
* @s: substring_t to be scanned
* @result: resulting integer on success
*
* Description: Attempts to parse the &substring_t @s as a hexadecimal integer.
- * On success, sets @result to the integer represented by the string and
- * returns 0. Returns -ENOMEM, -EINVAL, or -ERANGE on failure.
+ *
+ * Return: On success, sets @result to the integer represented by the string
+ * and returns 0. Returns -EINVAL or -ERANGE on failure.
*/
int match_hex(substring_t *s, int *result)
{
@@ -236,15 +269,16 @@ int match_hex(substring_t *s, int *result)
EXPORT_SYMBOL(match_hex);
/**
- * match_wildcard: - parse if a string matches given wildcard pattern
+ * match_wildcard - parse if a string matches given wildcard pattern
* @pattern: wildcard pattern
* @str: the string to be parsed
*
* Description: Parse the string @str to check if matches wildcard
- * pattern @pattern. The pattern may contain two type wildcardes:
+ * pattern @pattern. The pattern may contain two types of wildcards:
* '*' - matches zero or more characters
* '?' - matches one character
- * If it's matched, return true, else return false.
+ *
+ * Return: If the @str matches the @pattern, return true, else return false.
*/
bool match_wildcard(const char *pattern, const char *str)
{
@@ -287,14 +321,16 @@ bool match_wildcard(const char *pattern, const char *str)
EXPORT_SYMBOL(match_wildcard);
/**
- * match_strlcpy: - Copy the characters from a substring_t to a sized buffer
+ * match_strlcpy - Copy the characters from a substring_t to a sized buffer
* @dest: where to copy to
* @src: &substring_t to copy
* @size: size of destination buffer
*
* Description: Copy the characters in &substring_t @src to the
* c-style string @dest. Copy no more than @size - 1 characters, plus
- * the terminating NUL. Return length of @src.
+ * the terminating NUL.
+ *
+ * Return: length of @src.
*/
size_t match_strlcpy(char *dest, const substring_t *src, size_t size)
{
@@ -310,12 +346,15 @@ size_t match_strlcpy(char *dest, const substring_t *src, size_t size)
EXPORT_SYMBOL(match_strlcpy);
/**
- * match_strdup: - allocate a new string with the contents of a substring_t
+ * match_strdup - allocate a new string with the contents of a substring_t
* @s: &substring_t to copy
*
* Description: Allocates and returns a string filled with the contents of
* the &substring_t @s. The caller is responsible for freeing the returned
* string with kfree().
+ *
+ * Return: the address of the newly allocated NUL-terminated string or
+ * %NULL on error.
*/
char *match_strdup(const substring_t *s)
{
diff --git a/lib/pci_iomap.c b/lib/pci_iomap.c
index 2d3eb1cb73b8..ce39ce9f3526 100644
--- a/lib/pci_iomap.c
+++ b/lib/pci_iomap.c
@@ -134,4 +134,47 @@ void __iomem *pci_iomap_wc(struct pci_dev *dev, int bar, unsigned long maxlen)
return pci_iomap_wc_range(dev, bar, 0, maxlen);
}
EXPORT_SYMBOL_GPL(pci_iomap_wc);
+
+/*
+ * pci_iounmap() somewhat illogically comes from lib/iomap.c for the
+ * CONFIG_GENERIC_IOMAP case, because that's the code that knows about
+ * the different IOMAP ranges.
+ *
+ * But if the architecture does not use the generic iomap code, and if
+ * it has _not_ defined it's own private pci_iounmap function, we define
+ * it here.
+ *
+ * NOTE! This default implementation assumes that if the architecture
+ * support ioport mapping (HAS_IOPORT_MAP), the ioport mapping will
+ * be fixed to the range [ PCI_IOBASE, PCI_IOBASE+IO_SPACE_LIMIT [,
+ * and does not need unmapping with 'ioport_unmap()'.
+ *
+ * If you have different rules for your architecture, you need to
+ * implement your own pci_iounmap() that knows the rules for where
+ * and how IO vs MEM get mapped.
+ *
+ * This code is odd, and the ARCH_HAS/ARCH_WANTS #define logic comes
+ * from legacy <asm-generic/io.h> header file behavior. In particular,
+ * it would seem to make sense to do the iounmap(p) for the non-IO-space
+ * case here regardless, but that's not what the old header file code
+ * did. Probably incorrectly, but this is meant to be bug-for-bug
+ * compatible.
+ */
+#if defined(ARCH_WANTS_GENERIC_PCI_IOUNMAP)
+
+void pci_iounmap(struct pci_dev *dev, void __iomem *p)
+{
+#ifdef ARCH_HAS_GENERIC_IOPORT_MAP
+ uintptr_t start = (uintptr_t) PCI_IOBASE;
+ uintptr_t addr = (uintptr_t) p;
+
+ if (addr >= start && addr < start + IO_SPACE_LIMIT)
+ return;
+ iounmap(p);
+#endif
+}
+EXPORT_SYMBOL(pci_iounmap);
+
+#endif /* ARCH_WANTS_GENERIC_PCI_IOUNMAP */
+
#endif /* CONFIG_PCI */
diff --git a/lib/percpu-refcount.c b/lib/percpu-refcount.c
index 071a76c7bac0..668f6aa6a75d 100644
--- a/lib/percpu-refcount.c
+++ b/lib/percpu-refcount.c
@@ -1,9 +1,11 @@
// SPDX-License-Identifier: GPL-2.0-only
-#define pr_fmt(fmt) "%s: " fmt "\n", __func__
+#define pr_fmt(fmt) "%s: " fmt, __func__
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/wait.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
#include <linux/percpu-refcount.h>
/*
@@ -50,9 +52,10 @@ static unsigned long __percpu *percpu_count_ptr(struct percpu_ref *ref)
* @flags: PERCPU_REF_INIT_* flags
* @gfp: allocation mask to use
*
- * Initializes @ref. If @flags is zero, @ref starts in percpu mode with a
- * refcount of 1; analagous to atomic_long_set(ref, 1). See the
- * definitions of PERCPU_REF_INIT_* flags for flag behaviors.
+ * Initializes @ref. @ref starts out in percpu mode with a refcount of 1 unless
+ * @flags contains PERCPU_REF_INIT_ATOMIC or PERCPU_REF_INIT_DEAD. These flags
+ * change the start state to atomic with the latter setting the initial refcount
+ * to 0. See the definitions of PERCPU_REF_INIT_* flags for flag behaviors.
*
* Note that @release must not sleep - it may potentially be called from RCU
* callback context by percpu_ref_kill().
@@ -63,32 +66,57 @@ int percpu_ref_init(struct percpu_ref *ref, percpu_ref_func_t *release,
size_t align = max_t(size_t, 1 << __PERCPU_REF_FLAG_BITS,
__alignof__(unsigned long));
unsigned long start_count = 0;
+ struct percpu_ref_data *data;
ref->percpu_count_ptr = (unsigned long)
__alloc_percpu_gfp(sizeof(unsigned long), align, gfp);
if (!ref->percpu_count_ptr)
return -ENOMEM;
- ref->force_atomic = flags & PERCPU_REF_INIT_ATOMIC;
+ data = kzalloc(sizeof(*ref->data), gfp);
+ if (!data) {
+ free_percpu((void __percpu *)ref->percpu_count_ptr);
+ ref->percpu_count_ptr = 0;
+ return -ENOMEM;
+ }
+
+ data->force_atomic = flags & PERCPU_REF_INIT_ATOMIC;
+ data->allow_reinit = flags & PERCPU_REF_ALLOW_REINIT;
- if (flags & (PERCPU_REF_INIT_ATOMIC | PERCPU_REF_INIT_DEAD))
+ if (flags & (PERCPU_REF_INIT_ATOMIC | PERCPU_REF_INIT_DEAD)) {
ref->percpu_count_ptr |= __PERCPU_REF_ATOMIC;
- else
+ data->allow_reinit = true;
+ } else {
start_count += PERCPU_COUNT_BIAS;
+ }
if (flags & PERCPU_REF_INIT_DEAD)
ref->percpu_count_ptr |= __PERCPU_REF_DEAD;
else
start_count++;
- atomic_long_set(&ref->count, start_count);
+ atomic_long_set(&data->count, start_count);
- ref->release = release;
- ref->confirm_switch = NULL;
+ data->release = release;
+ data->confirm_switch = NULL;
+ data->ref = ref;
+ ref->data = data;
return 0;
}
EXPORT_SYMBOL_GPL(percpu_ref_init);
+static void __percpu_ref_exit(struct percpu_ref *ref)
+{
+ unsigned long __percpu *percpu_count = percpu_count_ptr(ref);
+
+ if (percpu_count) {
+ /* non-NULL confirm_switch indicates switching in progress */
+ WARN_ON_ONCE(ref->data && ref->data->confirm_switch);
+ free_percpu(percpu_count);
+ ref->percpu_count_ptr = __PERCPU_REF_ATOMIC_DEAD;
+ }
+}
+
/**
* percpu_ref_exit - undo percpu_ref_init()
* @ref: percpu_ref to exit
@@ -101,41 +129,56 @@ EXPORT_SYMBOL_GPL(percpu_ref_init);
*/
void percpu_ref_exit(struct percpu_ref *ref)
{
- unsigned long __percpu *percpu_count = percpu_count_ptr(ref);
+ struct percpu_ref_data *data = ref->data;
+ unsigned long flags;
- if (percpu_count) {
- /* non-NULL confirm_switch indicates switching in progress */
- WARN_ON_ONCE(ref->confirm_switch);
- free_percpu(percpu_count);
- ref->percpu_count_ptr = __PERCPU_REF_ATOMIC_DEAD;
- }
+ __percpu_ref_exit(ref);
+
+ if (!data)
+ return;
+
+ spin_lock_irqsave(&percpu_ref_switch_lock, flags);
+ ref->percpu_count_ptr |= atomic_long_read(&ref->data->count) <<
+ __PERCPU_REF_FLAG_BITS;
+ ref->data = NULL;
+ spin_unlock_irqrestore(&percpu_ref_switch_lock, flags);
+
+ kfree(data);
}
EXPORT_SYMBOL_GPL(percpu_ref_exit);
static void percpu_ref_call_confirm_rcu(struct rcu_head *rcu)
{
- struct percpu_ref *ref = container_of(rcu, struct percpu_ref, rcu);
+ struct percpu_ref_data *data = container_of(rcu,
+ struct percpu_ref_data, rcu);
+ struct percpu_ref *ref = data->ref;
- ref->confirm_switch(ref);
- ref->confirm_switch = NULL;
+ data->confirm_switch(ref);
+ data->confirm_switch = NULL;
wake_up_all(&percpu_ref_switch_waitq);
+ if (!data->allow_reinit)
+ __percpu_ref_exit(ref);
+
/* drop ref from percpu_ref_switch_to_atomic() */
percpu_ref_put(ref);
}
static void percpu_ref_switch_to_atomic_rcu(struct rcu_head *rcu)
{
- struct percpu_ref *ref = container_of(rcu, struct percpu_ref, rcu);
+ struct percpu_ref_data *data = container_of(rcu,
+ struct percpu_ref_data, rcu);
+ struct percpu_ref *ref = data->ref;
unsigned long __percpu *percpu_count = percpu_count_ptr(ref);
+ static atomic_t underflows;
unsigned long count = 0;
int cpu;
for_each_possible_cpu(cpu)
count += *per_cpu_ptr(percpu_count, cpu);
- pr_debug("global %ld percpu %ld",
- atomic_long_read(&ref->count), (long)count);
+ pr_debug("global %lu percpu %lu\n",
+ atomic_long_read(&data->count), count);
/*
* It's crucial that we sum the percpu counters _before_ adding the sum
@@ -149,11 +192,15 @@ static void percpu_ref_switch_to_atomic_rcu(struct rcu_head *rcu)
* reaching 0 before we add the percpu counts. But doing it at the same
* time is equivalent and saves us atomic operations:
*/
- atomic_long_add((long)count - PERCPU_COUNT_BIAS, &ref->count);
-
- WARN_ONCE(atomic_long_read(&ref->count) <= 0,
- "percpu ref (%ps) <= 0 (%ld) after switching to atomic",
- ref->release, atomic_long_read(&ref->count));
+ atomic_long_add((long)count - PERCPU_COUNT_BIAS, &data->count);
+
+ if (WARN_ONCE(atomic_long_read(&data->count) <= 0,
+ "percpu ref (%ps) <= 0 (%ld) after switching to atomic",
+ data->release, atomic_long_read(&data->count)) &&
+ atomic_inc_return(&underflows) < 4) {
+ pr_err("%s(): percpu_ref underflow", __func__);
+ mem_dump_obj(data);
+ }
/* @ref is viewed as dead on all CPUs, send out switch confirmation */
percpu_ref_call_confirm_rcu(rcu);
@@ -179,10 +226,12 @@ static void __percpu_ref_switch_to_atomic(struct percpu_ref *ref,
* Non-NULL ->confirm_switch is used to indicate that switching is
* in progress. Use noop one if unspecified.
*/
- ref->confirm_switch = confirm_switch ?: percpu_ref_noop_confirm_switch;
+ ref->data->confirm_switch = confirm_switch ?:
+ percpu_ref_noop_confirm_switch;
percpu_ref_get(ref); /* put after confirmation */
- call_rcu(&ref->rcu, percpu_ref_switch_to_atomic_rcu);
+ call_rcu_hurry(&ref->data->rcu,
+ percpu_ref_switch_to_atomic_rcu);
}
static void __percpu_ref_switch_to_percpu(struct percpu_ref *ref)
@@ -195,7 +244,10 @@ static void __percpu_ref_switch_to_percpu(struct percpu_ref *ref)
if (!(ref->percpu_count_ptr & __PERCPU_REF_ATOMIC))
return;
- atomic_long_add(PERCPU_COUNT_BIAS, &ref->count);
+ if (WARN_ON_ONCE(!ref->data->allow_reinit))
+ return;
+
+ atomic_long_add(PERCPU_COUNT_BIAS, &ref->data->count);
/*
* Restore per-cpu operation. smp_store_release() is paired
@@ -213,6 +265,8 @@ static void __percpu_ref_switch_to_percpu(struct percpu_ref *ref)
static void __percpu_ref_switch_mode(struct percpu_ref *ref,
percpu_ref_func_t *confirm_switch)
{
+ struct percpu_ref_data *data = ref->data;
+
lockdep_assert_held(&percpu_ref_switch_lock);
/*
@@ -220,10 +274,10 @@ static void __percpu_ref_switch_mode(struct percpu_ref *ref,
* its completion. If the caller ensures that ATOMIC switching
* isn't in progress, this function can be called from any context.
*/
- wait_event_lock_irq(percpu_ref_switch_waitq, !ref->confirm_switch,
+ wait_event_lock_irq(percpu_ref_switch_waitq, !data->confirm_switch,
percpu_ref_switch_lock);
- if (ref->force_atomic || (ref->percpu_count_ptr & __PERCPU_REF_DEAD))
+ if (data->force_atomic || percpu_ref_is_dying(ref))
__percpu_ref_switch_to_atomic(ref, confirm_switch);
else
__percpu_ref_switch_to_percpu(ref);
@@ -256,7 +310,7 @@ void percpu_ref_switch_to_atomic(struct percpu_ref *ref,
spin_lock_irqsave(&percpu_ref_switch_lock, flags);
- ref->force_atomic = true;
+ ref->data->force_atomic = true;
__percpu_ref_switch_mode(ref, confirm_switch);
spin_unlock_irqrestore(&percpu_ref_switch_lock, flags);
@@ -274,7 +328,7 @@ EXPORT_SYMBOL_GPL(percpu_ref_switch_to_atomic);
void percpu_ref_switch_to_atomic_sync(struct percpu_ref *ref)
{
percpu_ref_switch_to_atomic(ref, NULL);
- wait_event(percpu_ref_switch_waitq, !ref->confirm_switch);
+ wait_event(percpu_ref_switch_waitq, !ref->data->confirm_switch);
}
EXPORT_SYMBOL_GPL(percpu_ref_switch_to_atomic_sync);
@@ -302,7 +356,7 @@ void percpu_ref_switch_to_percpu(struct percpu_ref *ref)
spin_lock_irqsave(&percpu_ref_switch_lock, flags);
- ref->force_atomic = false;
+ ref->data->force_atomic = false;
__percpu_ref_switch_mode(ref, NULL);
spin_unlock_irqrestore(&percpu_ref_switch_lock, flags);
@@ -333,8 +387,9 @@ void percpu_ref_kill_and_confirm(struct percpu_ref *ref,
spin_lock_irqsave(&percpu_ref_switch_lock, flags);
- WARN_ONCE(ref->percpu_count_ptr & __PERCPU_REF_DEAD,
- "%s called more than once on %ps!", __func__, ref->release);
+ WARN_ONCE(percpu_ref_is_dying(ref),
+ "%s called more than once on %ps!", __func__,
+ ref->data->release);
ref->percpu_count_ptr |= __PERCPU_REF_DEAD;
__percpu_ref_switch_mode(ref, confirm_kill);
@@ -345,6 +400,34 @@ void percpu_ref_kill_and_confirm(struct percpu_ref *ref,
EXPORT_SYMBOL_GPL(percpu_ref_kill_and_confirm);
/**
+ * percpu_ref_is_zero - test whether a percpu refcount reached zero
+ * @ref: percpu_ref to test
+ *
+ * Returns %true if @ref reached zero.
+ *
+ * This function is safe to call as long as @ref is between init and exit.
+ */
+bool percpu_ref_is_zero(struct percpu_ref *ref)
+{
+ unsigned long __percpu *percpu_count;
+ unsigned long count, flags;
+
+ if (__ref_is_percpu(ref, &percpu_count))
+ return false;
+
+ /* protect us from being destroyed */
+ spin_lock_irqsave(&percpu_ref_switch_lock, flags);
+ if (ref->data)
+ count = atomic_long_read(&ref->data->count);
+ else
+ count = ref->percpu_count_ptr >> __PERCPU_REF_FLAG_BITS;
+ spin_unlock_irqrestore(&percpu_ref_switch_lock, flags);
+
+ return count == 0;
+}
+EXPORT_SYMBOL_GPL(percpu_ref_is_zero);
+
+/**
* percpu_ref_reinit - re-initialize a percpu refcount
* @ref: perpcu_ref to re-initialize
*
@@ -384,7 +467,7 @@ void percpu_ref_resurrect(struct percpu_ref *ref)
spin_lock_irqsave(&percpu_ref_switch_lock, flags);
- WARN_ON_ONCE(!(ref->percpu_count_ptr & __PERCPU_REF_DEAD));
+ WARN_ON_ONCE(!percpu_ref_is_dying(ref));
WARN_ON_ONCE(__ref_is_percpu(ref, &percpu_count));
ref->percpu_count_ptr &= ~__PERCPU_REF_DEAD;
diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c
index a66595ba5543..44dd133594d4 100644
--- a/lib/percpu_counter.c
+++ b/lib/percpu_counter.c
@@ -17,7 +17,7 @@ static DEFINE_SPINLOCK(percpu_counters_lock);
#ifdef CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER
-static struct debug_obj_descr percpu_counter_debug_descr;
+static const struct debug_obj_descr percpu_counter_debug_descr;
static bool percpu_counter_fixup_free(void *addr, enum debug_obj_state state)
{
@@ -33,7 +33,7 @@ static bool percpu_counter_fixup_free(void *addr, enum debug_obj_state state)
}
}
-static struct debug_obj_descr percpu_counter_debug_descr = {
+static const struct debug_obj_descr percpu_counter_debug_descr = {
.name = "percpu_counter",
.fixup_free = percpu_counter_fixup_free,
};
@@ -72,35 +72,67 @@ void percpu_counter_set(struct percpu_counter *fbc, s64 amount)
}
EXPORT_SYMBOL(percpu_counter_set);
-/**
- * This function is both preempt and irq safe. The former is due to explicit
- * preemption disable. The latter is guaranteed by the fact that the slow path
- * is explicitly protected by an irq-safe spinlock whereas the fast patch uses
- * this_cpu_add which is irq-safe by definition. Hence there is no need muck
- * with irq state before calling this one
+/*
+ * local_irq_save() is needed to make the function irq safe:
+ * - The slow path would be ok as protected by an irq-safe spinlock.
+ * - this_cpu_add would be ok as it is irq-safe by definition.
+ * But:
+ * The decision slow path/fast path and the actual update must be atomic, too.
+ * Otherwise a call in process context could check the current values and
+ * decide that the fast path can be used. If now an interrupt occurs before
+ * the this_cpu_add(), and the interrupt updates this_cpu(*fbc->counters),
+ * then the this_cpu_add() that is executed after the interrupt has completed
+ * can produce values larger than "batch" or even overflows.
*/
void percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount, s32 batch)
{
s64 count;
+ unsigned long flags;
- preempt_disable();
+ local_irq_save(flags);
count = __this_cpu_read(*fbc->counters) + amount;
- if (count >= batch || count <= -batch) {
- unsigned long flags;
- raw_spin_lock_irqsave(&fbc->lock, flags);
+ if (abs(count) >= batch) {
+ raw_spin_lock(&fbc->lock);
fbc->count += count;
__this_cpu_sub(*fbc->counters, count - amount);
- raw_spin_unlock_irqrestore(&fbc->lock, flags);
+ raw_spin_unlock(&fbc->lock);
} else {
this_cpu_add(*fbc->counters, amount);
}
- preempt_enable();
+ local_irq_restore(flags);
}
EXPORT_SYMBOL(percpu_counter_add_batch);
/*
+ * For percpu_counter with a big batch, the devication of its count could
+ * be big, and there is requirement to reduce the deviation, like when the
+ * counter's batch could be runtime decreased to get a better accuracy,
+ * which can be achieved by running this sync function on each CPU.
+ */
+void percpu_counter_sync(struct percpu_counter *fbc)
+{
+ unsigned long flags;
+ s64 count;
+
+ raw_spin_lock_irqsave(&fbc->lock, flags);
+ count = __this_cpu_read(*fbc->counters);
+ fbc->count += count;
+ __this_cpu_sub(*fbc->counters, count);
+ raw_spin_unlock_irqrestore(&fbc->lock, flags);
+}
+EXPORT_SYMBOL(percpu_counter_sync);
+
+/*
* Add up all the per-cpu counts, return the result. This is a more accurate
- * but much slower version of percpu_counter_read_positive()
+ * but much slower version of percpu_counter_read_positive().
+ *
+ * We use the cpu mask of (cpu_online_mask | cpu_dying_mask) to capture sums
+ * from CPUs that are in the process of being taken offline. Dying cpus have
+ * been removed from the online mask, but may not have had the hotplug dead
+ * notifier called to fold the percpu count back into the global counter sum.
+ * By including dying CPUs in the iteration mask, we avoid this race condition
+ * so __percpu_counter_sum() just does the right thing when CPUs are being taken
+ * offline.
*/
s64 __percpu_counter_sum(struct percpu_counter *fbc)
{
@@ -110,7 +142,7 @@ s64 __percpu_counter_sum(struct percpu_counter *fbc)
raw_spin_lock_irqsave(&fbc->lock, flags);
ret = fbc->count;
- for_each_online_cpu(cpu) {
+ for_each_cpu_or(cpu, cpu_online_mask, cpu_dying_mask) {
s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
ret += *pcount;
}
@@ -119,48 +151,72 @@ s64 __percpu_counter_sum(struct percpu_counter *fbc)
}
EXPORT_SYMBOL(__percpu_counter_sum);
-int __percpu_counter_init(struct percpu_counter *fbc, s64 amount, gfp_t gfp,
- struct lock_class_key *key)
+int __percpu_counter_init_many(struct percpu_counter *fbc, s64 amount,
+ gfp_t gfp, u32 nr_counters,
+ struct lock_class_key *key)
{
unsigned long flags __maybe_unused;
-
- raw_spin_lock_init(&fbc->lock);
- lockdep_set_class(&fbc->lock, key);
- fbc->count = amount;
- fbc->counters = alloc_percpu_gfp(s32, gfp);
- if (!fbc->counters)
+ size_t counter_size;
+ s32 __percpu *counters;
+ u32 i;
+
+ counter_size = ALIGN(sizeof(*counters), __alignof__(*counters));
+ counters = __alloc_percpu_gfp(nr_counters * counter_size,
+ __alignof__(*counters), gfp);
+ if (!counters) {
+ fbc[0].counters = NULL;
return -ENOMEM;
+ }
+
+ for (i = 0; i < nr_counters; i++) {
+ raw_spin_lock_init(&fbc[i].lock);
+ lockdep_set_class(&fbc[i].lock, key);
+#ifdef CONFIG_HOTPLUG_CPU
+ INIT_LIST_HEAD(&fbc[i].list);
+#endif
+ fbc[i].count = amount;
+ fbc[i].counters = (void *)counters + (i * counter_size);
- debug_percpu_counter_activate(fbc);
+ debug_percpu_counter_activate(&fbc[i]);
+ }
#ifdef CONFIG_HOTPLUG_CPU
- INIT_LIST_HEAD(&fbc->list);
spin_lock_irqsave(&percpu_counters_lock, flags);
- list_add(&fbc->list, &percpu_counters);
+ for (i = 0; i < nr_counters; i++)
+ list_add(&fbc[i].list, &percpu_counters);
spin_unlock_irqrestore(&percpu_counters_lock, flags);
#endif
return 0;
}
-EXPORT_SYMBOL(__percpu_counter_init);
+EXPORT_SYMBOL(__percpu_counter_init_many);
-void percpu_counter_destroy(struct percpu_counter *fbc)
+void percpu_counter_destroy_many(struct percpu_counter *fbc, u32 nr_counters)
{
unsigned long flags __maybe_unused;
+ u32 i;
- if (!fbc->counters)
+ if (WARN_ON_ONCE(!fbc))
return;
- debug_percpu_counter_deactivate(fbc);
+ if (!fbc[0].counters)
+ return;
+
+ for (i = 0; i < nr_counters; i++)
+ debug_percpu_counter_deactivate(&fbc[i]);
#ifdef CONFIG_HOTPLUG_CPU
spin_lock_irqsave(&percpu_counters_lock, flags);
- list_del(&fbc->list);
+ for (i = 0; i < nr_counters; i++)
+ list_del(&fbc[i].list);
spin_unlock_irqrestore(&percpu_counters_lock, flags);
#endif
- free_percpu(fbc->counters);
- fbc->counters = NULL;
+
+ free_percpu(fbc[0].counters);
+
+ for (i = 0; i < nr_counters; i++)
+ fbc[i].counters = NULL;
}
-EXPORT_SYMBOL(percpu_counter_destroy);
+EXPORT_SYMBOL(percpu_counter_destroy_many);
int percpu_counter_batch __read_mostly = 32;
EXPORT_SYMBOL(percpu_counter_batch);
@@ -222,6 +278,85 @@ int __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch)
}
EXPORT_SYMBOL(__percpu_counter_compare);
+/*
+ * Compare counter, and add amount if total is: less than or equal to limit if
+ * amount is positive, or greater than or equal to limit if amount is negative.
+ * Return true if amount is added, or false if total would be beyond the limit.
+ *
+ * Negative limit is allowed, but unusual.
+ * When negative amounts (subs) are given to percpu_counter_limited_add(),
+ * the limit would most naturally be 0 - but other limits are also allowed.
+ *
+ * Overflow beyond S64_MAX is not allowed for: counter, limit and amount
+ * are all assumed to be sane (far from S64_MIN and S64_MAX).
+ */
+bool __percpu_counter_limited_add(struct percpu_counter *fbc,
+ s64 limit, s64 amount, s32 batch)
+{
+ s64 count;
+ s64 unknown;
+ unsigned long flags;
+ bool good = false;
+
+ if (amount == 0)
+ return true;
+
+ local_irq_save(flags);
+ unknown = batch * num_online_cpus();
+ count = __this_cpu_read(*fbc->counters);
+
+ /* Skip taking the lock when safe */
+ if (abs(count + amount) <= batch &&
+ ((amount > 0 && fbc->count + unknown <= limit) ||
+ (amount < 0 && fbc->count - unknown >= limit))) {
+ this_cpu_add(*fbc->counters, amount);
+ local_irq_restore(flags);
+ return true;
+ }
+
+ raw_spin_lock(&fbc->lock);
+ count = fbc->count + amount;
+
+ /* Skip percpu_counter_sum() when safe */
+ if (amount > 0) {
+ if (count - unknown > limit)
+ goto out;
+ if (count + unknown <= limit)
+ good = true;
+ } else {
+ if (count + unknown < limit)
+ goto out;
+ if (count - unknown >= limit)
+ good = true;
+ }
+
+ if (!good) {
+ s32 *pcount;
+ int cpu;
+
+ for_each_cpu_or(cpu, cpu_online_mask, cpu_dying_mask) {
+ pcount = per_cpu_ptr(fbc->counters, cpu);
+ count += *pcount;
+ }
+ if (amount > 0) {
+ if (count > limit)
+ goto out;
+ } else {
+ if (count < limit)
+ goto out;
+ }
+ good = true;
+ }
+
+ count = __this_cpu_read(*fbc->counters);
+ fbc->count += count + amount;
+ __this_cpu_sub(*fbc->counters, count);
+out:
+ raw_spin_unlock(&fbc->lock);
+ local_irq_restore(flags);
+ return good;
+}
+
static int __init percpu_counter_startup(void)
{
int ret;
diff --git a/lib/pldmfw/Makefile b/lib/pldmfw/Makefile
new file mode 100644
index 000000000000..99ad10711abe
--- /dev/null
+++ b/lib/pldmfw/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_PLDMFW) += pldmfw.o
diff --git a/lib/pldmfw/pldmfw.c b/lib/pldmfw/pldmfw.c
new file mode 100644
index 000000000000..54e1809a38fd
--- /dev/null
+++ b/lib/pldmfw/pldmfw.c
@@ -0,0 +1,878 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2018-2019, Intel Corporation. */
+
+#include <asm/unaligned.h>
+#include <linux/crc32.h>
+#include <linux/device.h>
+#include <linux/firmware.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/pldmfw.h>
+#include <linux/slab.h>
+#include <linux/uuid.h>
+
+#include "pldmfw_private.h"
+
+/* Internal structure used to store details about the PLDM image file as it is
+ * being validated and processed.
+ */
+struct pldmfw_priv {
+ struct pldmfw *context;
+ const struct firmware *fw;
+
+ /* current offset of firmware image */
+ size_t offset;
+
+ struct list_head records;
+ struct list_head components;
+
+ /* PLDM Firmware Package Header */
+ const struct __pldm_header *header;
+ u16 total_header_size;
+
+ /* length of the component bitmap */
+ u16 component_bitmap_len;
+ u16 bitmap_size;
+
+ /* Start of the component image information */
+ u16 component_count;
+ const u8 *component_start;
+
+ /* Start pf the firmware device id records */
+ const u8 *record_start;
+ u8 record_count;
+
+ /* The CRC at the end of the package header */
+ u32 header_crc;
+
+ struct pldmfw_record *matching_record;
+};
+
+/**
+ * pldm_check_fw_space - Verify that the firmware image has space left
+ * @data: pointer to private data
+ * @offset: offset to start from
+ * @length: length to check for
+ *
+ * Verify that the firmware data can hold a chunk of bytes with the specified
+ * offset and length.
+ *
+ * Returns: zero on success, or -EFAULT if the image does not have enough
+ * space left to fit the expected length.
+ */
+static int
+pldm_check_fw_space(struct pldmfw_priv *data, size_t offset, size_t length)
+{
+ size_t expected_size = offset + length;
+ struct device *dev = data->context->dev;
+
+ if (data->fw->size < expected_size) {
+ dev_dbg(dev, "Firmware file size smaller than expected. Got %zu bytes, needed %zu bytes\n",
+ data->fw->size, expected_size);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+/**
+ * pldm_move_fw_offset - Move the current firmware offset forward
+ * @data: pointer to private data
+ * @bytes_to_move: number of bytes to move the offset forward by
+ *
+ * Check that there is enough space past the current offset, and then move the
+ * offset forward by this amount.
+ *
+ * Returns: zero on success, or -EFAULT if the image is too small to fit the
+ * expected length.
+ */
+static int
+pldm_move_fw_offset(struct pldmfw_priv *data, size_t bytes_to_move)
+{
+ int err;
+
+ err = pldm_check_fw_space(data, data->offset, bytes_to_move);
+ if (err)
+ return err;
+
+ data->offset += bytes_to_move;
+
+ return 0;
+}
+
+/**
+ * pldm_parse_header - Validate and extract details about the PLDM header
+ * @data: pointer to private data
+ *
+ * Performs initial basic verification of the PLDM image, up to the first
+ * firmware record.
+ *
+ * This includes the following checks and extractions
+ *
+ * * Verify that the UUID at the start of the header matches the expected
+ * value as defined in the DSP0267 PLDM specification
+ * * Check that the revision is 0x01
+ * * Extract the total header_size and verify that the image is large enough
+ * to contain at least the length of this header
+ * * Extract the size of the component bitmap length
+ * * Extract a pointer to the start of the record area
+ *
+ * Returns: zero on success, or a negative error code on failure.
+ */
+static int pldm_parse_header(struct pldmfw_priv *data)
+{
+ const struct __pldmfw_record_area *record_area;
+ struct device *dev = data->context->dev;
+ const struct __pldm_header *header;
+ size_t header_size;
+ int err;
+
+ err = pldm_move_fw_offset(data, sizeof(*header));
+ if (err)
+ return err;
+
+ header = (const struct __pldm_header *)data->fw->data;
+ data->header = header;
+
+ if (!uuid_equal(&header->id, &pldm_firmware_header_id)) {
+ dev_dbg(dev, "Invalid package header identifier. Expected UUID %pUB, but got %pUB\n",
+ &pldm_firmware_header_id, &header->id);
+ return -EINVAL;
+ }
+
+ if (header->revision != PACKAGE_HEADER_FORMAT_REVISION) {
+ dev_dbg(dev, "Invalid package header revision. Expected revision %u but got %u\n",
+ PACKAGE_HEADER_FORMAT_REVISION, header->revision);
+ return -EOPNOTSUPP;
+ }
+
+ data->total_header_size = get_unaligned_le16(&header->size);
+ header_size = data->total_header_size - sizeof(*header);
+
+ err = pldm_check_fw_space(data, data->offset, header_size);
+ if (err)
+ return err;
+
+ data->component_bitmap_len =
+ get_unaligned_le16(&header->component_bitmap_len);
+
+ if (data->component_bitmap_len % 8 != 0) {
+ dev_dbg(dev, "Invalid component bitmap length. The length is %u, which is not a multiple of 8\n",
+ data->component_bitmap_len);
+ return -EINVAL;
+ }
+
+ data->bitmap_size = data->component_bitmap_len / 8;
+
+ err = pldm_move_fw_offset(data, header->version_len);
+ if (err)
+ return err;
+
+ /* extract a pointer to the record area, which just follows the main
+ * PLDM header data.
+ */
+ record_area = (const struct __pldmfw_record_area *)(data->fw->data +
+ data->offset);
+
+ err = pldm_move_fw_offset(data, sizeof(*record_area));
+ if (err)
+ return err;
+
+ data->record_count = record_area->record_count;
+ data->record_start = record_area->records;
+
+ return 0;
+}
+
+/**
+ * pldm_check_desc_tlv_len - Check that the length matches expectation
+ * @data: pointer to image details
+ * @type: the descriptor type
+ * @size: the length from the descriptor header
+ *
+ * If the descriptor type is one of the documented descriptor types according
+ * to the standard, verify that the provided length matches.
+ *
+ * If the type is not recognized or is VENDOR_DEFINED, return zero.
+ *
+ * Returns: zero on success, or -EINVAL if the specified size of a standard
+ * TLV does not match the expected value defined for that TLV.
+ */
+static int
+pldm_check_desc_tlv_len(struct pldmfw_priv *data, u16 type, u16 size)
+{
+ struct device *dev = data->context->dev;
+ u16 expected_size;
+
+ switch (type) {
+ case PLDM_DESC_ID_PCI_VENDOR_ID:
+ case PLDM_DESC_ID_PCI_DEVICE_ID:
+ case PLDM_DESC_ID_PCI_SUBVENDOR_ID:
+ case PLDM_DESC_ID_PCI_SUBDEV_ID:
+ expected_size = 2;
+ break;
+ case PLDM_DESC_ID_PCI_REVISION_ID:
+ expected_size = 1;
+ break;
+ case PLDM_DESC_ID_PNP_VENDOR_ID:
+ expected_size = 3;
+ break;
+ case PLDM_DESC_ID_IANA_ENTERPRISE_ID:
+ case PLDM_DESC_ID_ACPI_VENDOR_ID:
+ case PLDM_DESC_ID_PNP_PRODUCT_ID:
+ case PLDM_DESC_ID_ACPI_PRODUCT_ID:
+ expected_size = 4;
+ break;
+ case PLDM_DESC_ID_UUID:
+ expected_size = 16;
+ break;
+ case PLDM_DESC_ID_VENDOR_DEFINED:
+ return 0;
+ default:
+ /* Do not report an error on an unexpected TLV */
+ dev_dbg(dev, "Found unrecognized TLV type 0x%04x\n", type);
+ return 0;
+ }
+
+ if (size != expected_size) {
+ dev_dbg(dev, "Found TLV type 0x%04x with unexpected length. Got %u bytes, but expected %u bytes\n",
+ type, size, expected_size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * pldm_parse_desc_tlvs - Check and skip past a number of TLVs
+ * @data: pointer to private data
+ * @record: pointer to the record this TLV belongs too
+ * @desc_count: descriptor count
+ *
+ * From the current offset, read and extract the descriptor TLVs, updating the
+ * current offset each time.
+ *
+ * Returns: zero on success, or a negative error code on failure.
+ */
+static int
+pldm_parse_desc_tlvs(struct pldmfw_priv *data, struct pldmfw_record *record, u8 desc_count)
+{
+ const struct __pldmfw_desc_tlv *__desc;
+ const u8 *desc_start;
+ u8 i;
+
+ desc_start = data->fw->data + data->offset;
+
+ pldm_for_each_desc_tlv(i, __desc, desc_start, desc_count) {
+ struct pldmfw_desc_tlv *desc;
+ int err;
+ u16 type, size;
+
+ err = pldm_move_fw_offset(data, sizeof(*__desc));
+ if (err)
+ return err;
+
+ type = get_unaligned_le16(&__desc->type);
+
+ /* According to DSP0267, this only includes the data field */
+ size = get_unaligned_le16(&__desc->size);
+
+ err = pldm_check_desc_tlv_len(data, type, size);
+ if (err)
+ return err;
+
+ /* check that we have space and move the offset forward */
+ err = pldm_move_fw_offset(data, size);
+ if (err)
+ return err;
+
+ desc = kzalloc(sizeof(*desc), GFP_KERNEL);
+ if (!desc)
+ return -ENOMEM;
+
+ desc->type = type;
+ desc->size = size;
+ desc->data = __desc->data;
+
+ list_add_tail(&desc->entry, &record->descs);
+ }
+
+ return 0;
+}
+
+/**
+ * pldm_parse_one_record - Verify size of one PLDM record
+ * @data: pointer to image details
+ * @__record: pointer to the record to check
+ *
+ * This function checks that the record size does not exceed either the size
+ * of the firmware file or the total length specified in the header section.
+ *
+ * It also verifies that the recorded length of the start of the record
+ * matches the size calculated by adding the static structure length, the
+ * component bitmap length, the version string length, the length of all
+ * descriptor TLVs, and the length of the package data.
+ *
+ * Returns: zero on success, or a negative error code on failure.
+ */
+static int
+pldm_parse_one_record(struct pldmfw_priv *data,
+ const struct __pldmfw_record_info *__record)
+{
+ struct pldmfw_record *record;
+ size_t measured_length;
+ int err;
+ const u8 *bitmap_ptr;
+ u16 record_len;
+ int i;
+
+ /* Make a copy and insert it into the record list */
+ record = kzalloc(sizeof(*record), GFP_KERNEL);
+ if (!record)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&record->descs);
+ list_add_tail(&record->entry, &data->records);
+
+ /* Then check that we have space and move the offset */
+ err = pldm_move_fw_offset(data, sizeof(*__record));
+ if (err)
+ return err;
+
+ record_len = get_unaligned_le16(&__record->record_len);
+ record->package_data_len = get_unaligned_le16(&__record->package_data_len);
+ record->version_len = __record->version_len;
+ record->version_type = __record->version_type;
+
+ bitmap_ptr = data->fw->data + data->offset;
+
+ /* check that we have space for the component bitmap length */
+ err = pldm_move_fw_offset(data, data->bitmap_size);
+ if (err)
+ return err;
+
+ record->component_bitmap_len = data->component_bitmap_len;
+ record->component_bitmap = bitmap_zalloc(record->component_bitmap_len,
+ GFP_KERNEL);
+ if (!record->component_bitmap)
+ return -ENOMEM;
+
+ for (i = 0; i < data->bitmap_size; i++)
+ bitmap_set_value8(record->component_bitmap, bitmap_ptr[i], i * 8);
+
+ record->version_string = data->fw->data + data->offset;
+
+ err = pldm_move_fw_offset(data, record->version_len);
+ if (err)
+ return err;
+
+ /* Scan through the descriptor TLVs and find the end */
+ err = pldm_parse_desc_tlvs(data, record, __record->descriptor_count);
+ if (err)
+ return err;
+
+ record->package_data = data->fw->data + data->offset;
+
+ err = pldm_move_fw_offset(data, record->package_data_len);
+ if (err)
+ return err;
+
+ measured_length = data->offset - ((const u8 *)__record - data->fw->data);
+ if (measured_length != record_len) {
+ dev_dbg(data->context->dev, "Unexpected record length. Measured record length is %zu bytes, expected length is %u bytes\n",
+ measured_length, record_len);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+/**
+ * pldm_parse_records - Locate the start of the component area
+ * @data: pointer to private data
+ *
+ * Extract the record count, and loop through each record, searching for the
+ * component area.
+ *
+ * Returns: zero on success, or a negative error code on failure.
+ */
+static int pldm_parse_records(struct pldmfw_priv *data)
+{
+ const struct __pldmfw_component_area *component_area;
+ const struct __pldmfw_record_info *record;
+ int err;
+ u8 i;
+
+ pldm_for_each_record(i, record, data->record_start, data->record_count) {
+ err = pldm_parse_one_record(data, record);
+ if (err)
+ return err;
+ }
+
+ /* Extract a pointer to the component area, which just follows the
+ * PLDM device record data.
+ */
+ component_area = (const struct __pldmfw_component_area *)(data->fw->data + data->offset);
+
+ err = pldm_move_fw_offset(data, sizeof(*component_area));
+ if (err)
+ return err;
+
+ data->component_count =
+ get_unaligned_le16(&component_area->component_image_count);
+ data->component_start = component_area->components;
+
+ return 0;
+}
+
+/**
+ * pldm_parse_components - Locate the CRC header checksum
+ * @data: pointer to private data
+ *
+ * Extract the component count, and find the pointer to the component area.
+ * Scan through each component searching for the end, which should point to
+ * the package header checksum.
+ *
+ * Extract the package header CRC and save it for verification.
+ *
+ * Returns: zero on success, or a negative error code on failure.
+ */
+static int pldm_parse_components(struct pldmfw_priv *data)
+{
+ const struct __pldmfw_component_info *__component;
+ struct device *dev = data->context->dev;
+ const u8 *header_crc_ptr;
+ int err;
+ u8 i;
+
+ pldm_for_each_component(i, __component, data->component_start, data->component_count) {
+ struct pldmfw_component *component;
+ u32 offset, size;
+
+ err = pldm_move_fw_offset(data, sizeof(*__component));
+ if (err)
+ return err;
+
+ err = pldm_move_fw_offset(data, __component->version_len);
+ if (err)
+ return err;
+
+ offset = get_unaligned_le32(&__component->location_offset);
+ size = get_unaligned_le32(&__component->size);
+
+ err = pldm_check_fw_space(data, offset, size);
+ if (err)
+ return err;
+
+ component = kzalloc(sizeof(*component), GFP_KERNEL);
+ if (!component)
+ return -ENOMEM;
+
+ component->index = i;
+ component->classification = get_unaligned_le16(&__component->classification);
+ component->identifier = get_unaligned_le16(&__component->identifier);
+ component->comparison_stamp = get_unaligned_le32(&__component->comparison_stamp);
+ component->options = get_unaligned_le16(&__component->options);
+ component->activation_method = get_unaligned_le16(&__component->activation_method);
+ component->version_type = __component->version_type;
+ component->version_len = __component->version_len;
+ component->version_string = __component->version_string;
+ component->component_data = data->fw->data + offset;
+ component->component_size = size;
+
+ list_add_tail(&component->entry, &data->components);
+ }
+
+ header_crc_ptr = data->fw->data + data->offset;
+
+ err = pldm_move_fw_offset(data, sizeof(data->header_crc));
+ if (err)
+ return err;
+
+ /* Make sure that we reached the expected offset */
+ if (data->offset != data->total_header_size) {
+ dev_dbg(dev, "Invalid firmware header size. Expected %u but got %zu\n",
+ data->total_header_size, data->offset);
+ return -EFAULT;
+ }
+
+ data->header_crc = get_unaligned_le32(header_crc_ptr);
+
+ return 0;
+}
+
+/**
+ * pldm_verify_header_crc - Verify that the CRC in the header matches
+ * @data: pointer to private data
+ *
+ * Calculates the 32-bit CRC using the standard IEEE 802.3 CRC polynomial and
+ * compares it to the value stored in the header.
+ *
+ * Returns: zero on success if the CRC matches, or -EBADMSG on an invalid CRC.
+ */
+static int pldm_verify_header_crc(struct pldmfw_priv *data)
+{
+ struct device *dev = data->context->dev;
+ u32 calculated_crc;
+ size_t length;
+
+ /* Calculate the 32-bit CRC of the header header contents up to but
+ * not including the checksum. Note that the Linux crc32_le function
+ * does not perform an expected final XOR.
+ */
+ length = data->offset - sizeof(data->header_crc);
+ calculated_crc = crc32_le(~0, data->fw->data, length) ^ ~0;
+
+ if (calculated_crc != data->header_crc) {
+ dev_dbg(dev, "Invalid CRC in firmware header. Got 0x%08x but expected 0x%08x\n",
+ calculated_crc, data->header_crc);
+ return -EBADMSG;
+ }
+
+ return 0;
+}
+
+/**
+ * pldmfw_free_priv - Free memory allocated while parsing the PLDM image
+ * @data: pointer to the PLDM data structure
+ *
+ * Loops through and clears all allocated memory associated with each
+ * allocated descriptor, record, and component.
+ */
+static void pldmfw_free_priv(struct pldmfw_priv *data)
+{
+ struct pldmfw_component *component, *c_safe;
+ struct pldmfw_record *record, *r_safe;
+ struct pldmfw_desc_tlv *desc, *d_safe;
+
+ list_for_each_entry_safe(component, c_safe, &data->components, entry) {
+ list_del(&component->entry);
+ kfree(component);
+ }
+
+ list_for_each_entry_safe(record, r_safe, &data->records, entry) {
+ list_for_each_entry_safe(desc, d_safe, &record->descs, entry) {
+ list_del(&desc->entry);
+ kfree(desc);
+ }
+
+ if (record->component_bitmap) {
+ bitmap_free(record->component_bitmap);
+ record->component_bitmap = NULL;
+ }
+
+ list_del(&record->entry);
+ kfree(record);
+ }
+}
+
+/**
+ * pldm_parse_image - parse and extract details from PLDM image
+ * @data: pointer to private data
+ *
+ * Verify that the firmware file contains valid data for a PLDM firmware
+ * file. Extract useful pointers and data from the firmware file and store
+ * them in the data structure.
+ *
+ * The PLDM firmware file format is defined in DMTF DSP0267 1.0.0. Care
+ * should be taken to use get_unaligned_le* when accessing data from the
+ * pointers in data.
+ *
+ * Returns: zero on success, or a negative error code on failure.
+ */
+static int pldm_parse_image(struct pldmfw_priv *data)
+{
+ int err;
+
+ if (WARN_ON(!(data->context->dev && data->fw->data && data->fw->size)))
+ return -EINVAL;
+
+ err = pldm_parse_header(data);
+ if (err)
+ return err;
+
+ err = pldm_parse_records(data);
+ if (err)
+ return err;
+
+ err = pldm_parse_components(data);
+ if (err)
+ return err;
+
+ return pldm_verify_header_crc(data);
+}
+
+/* these are u32 so that we can store PCI_ANY_ID */
+struct pldm_pci_record_id {
+ int vendor;
+ int device;
+ int subsystem_vendor;
+ int subsystem_device;
+};
+
+/**
+ * pldmfw_op_pci_match_record - Check if a PCI device matches the record
+ * @context: PLDM fw update structure
+ * @record: list of records extracted from the PLDM image
+ *
+ * Determine of the PCI device associated with this device matches the record
+ * data provided.
+ *
+ * Searches the descriptor TLVs and extracts the relevant descriptor data into
+ * a pldm_pci_record_id. This is then compared against the PCI device ID
+ * information.
+ *
+ * Returns: true if the device matches the record, false otherwise.
+ */
+bool pldmfw_op_pci_match_record(struct pldmfw *context, struct pldmfw_record *record)
+{
+ struct pci_dev *pdev = to_pci_dev(context->dev);
+ struct pldm_pci_record_id id = {
+ .vendor = PCI_ANY_ID,
+ .device = PCI_ANY_ID,
+ .subsystem_vendor = PCI_ANY_ID,
+ .subsystem_device = PCI_ANY_ID,
+ };
+ struct pldmfw_desc_tlv *desc;
+
+ list_for_each_entry(desc, &record->descs, entry) {
+ u16 value;
+ int *ptr;
+
+ switch (desc->type) {
+ case PLDM_DESC_ID_PCI_VENDOR_ID:
+ ptr = &id.vendor;
+ break;
+ case PLDM_DESC_ID_PCI_DEVICE_ID:
+ ptr = &id.device;
+ break;
+ case PLDM_DESC_ID_PCI_SUBVENDOR_ID:
+ ptr = &id.subsystem_vendor;
+ break;
+ case PLDM_DESC_ID_PCI_SUBDEV_ID:
+ ptr = &id.subsystem_device;
+ break;
+ default:
+ /* Skip unrelated TLVs */
+ continue;
+ }
+
+ value = get_unaligned_le16(desc->data);
+ /* A value of zero for one of the descriptors is sometimes
+ * used when the record should ignore this field when matching
+ * device. For example if the record applies to any subsystem
+ * device or vendor.
+ */
+ if (value)
+ *ptr = (int)value;
+ else
+ *ptr = PCI_ANY_ID;
+ }
+
+ if ((id.vendor == PCI_ANY_ID || id.vendor == pdev->vendor) &&
+ (id.device == PCI_ANY_ID || id.device == pdev->device) &&
+ (id.subsystem_vendor == PCI_ANY_ID || id.subsystem_vendor == pdev->subsystem_vendor) &&
+ (id.subsystem_device == PCI_ANY_ID || id.subsystem_device == pdev->subsystem_device))
+ return true;
+ else
+ return false;
+}
+EXPORT_SYMBOL(pldmfw_op_pci_match_record);
+
+/**
+ * pldm_find_matching_record - Find the first matching PLDM record
+ * @data: pointer to private data
+ *
+ * Search through PLDM records and find the first matching entry. It is
+ * expected that only one entry matches.
+ *
+ * Store a pointer to the matching record, if found.
+ *
+ * Returns: zero on success, or -ENOENT if no matching record is found.
+ */
+static int pldm_find_matching_record(struct pldmfw_priv *data)
+{
+ struct pldmfw_record *record;
+
+ list_for_each_entry(record, &data->records, entry) {
+ if (data->context->ops->match_record(data->context, record)) {
+ data->matching_record = record;
+ return 0;
+ }
+ }
+
+ return -ENOENT;
+}
+
+/**
+ * pldm_send_package_data - Send firmware the package data for the record
+ * @data: pointer to private data
+ *
+ * Send the package data associated with the matching record to the firmware,
+ * using the send_pkg_data operation.
+ *
+ * Returns: zero on success, or a negative error code on failure.
+ */
+static int
+pldm_send_package_data(struct pldmfw_priv *data)
+{
+ struct pldmfw_record *record = data->matching_record;
+ const struct pldmfw_ops *ops = data->context->ops;
+
+ return ops->send_package_data(data->context, record->package_data,
+ record->package_data_len);
+}
+
+/**
+ * pldm_send_component_tables - Send component table information to firmware
+ * @data: pointer to private data
+ *
+ * Loop over each component, sending the applicable components to the firmware
+ * via the send_component_table operation.
+ *
+ * Returns: zero on success, or a negative error code on failure.
+ */
+static int
+pldm_send_component_tables(struct pldmfw_priv *data)
+{
+ unsigned long *bitmap = data->matching_record->component_bitmap;
+ struct pldmfw_component *component;
+ int err;
+
+ list_for_each_entry(component, &data->components, entry) {
+ u8 index = component->index, transfer_flag = 0;
+
+ /* Skip components which are not intended for this device */
+ if (!test_bit(index, bitmap))
+ continue;
+
+ /* determine whether this is the start, middle, end, or both
+ * the start and end of the component tables
+ */
+ if (index == find_first_bit(bitmap, data->component_bitmap_len))
+ transfer_flag |= PLDM_TRANSFER_FLAG_START;
+ if (index == find_last_bit(bitmap, data->component_bitmap_len))
+ transfer_flag |= PLDM_TRANSFER_FLAG_END;
+ if (!transfer_flag)
+ transfer_flag = PLDM_TRANSFER_FLAG_MIDDLE;
+
+ err = data->context->ops->send_component_table(data->context,
+ component,
+ transfer_flag);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * pldm_flash_components - Program each component to device flash
+ * @data: pointer to private data
+ *
+ * Loop through each component that is active for the matching device record,
+ * and send it to the device driver for flashing.
+ *
+ * Returns: zero on success, or a negative error code on failure.
+ */
+static int pldm_flash_components(struct pldmfw_priv *data)
+{
+ unsigned long *bitmap = data->matching_record->component_bitmap;
+ struct pldmfw_component *component;
+ int err;
+
+ list_for_each_entry(component, &data->components, entry) {
+ u8 index = component->index;
+
+ /* Skip components which are not intended for this device */
+ if (!test_bit(index, bitmap))
+ continue;
+
+ err = data->context->ops->flash_component(data->context, component);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * pldm_finalize_update - Finalize the device flash update
+ * @data: pointer to private data
+ *
+ * Tell the device driver to perform any remaining logic to complete the
+ * device update.
+ *
+ * Returns: zero on success, or a PLFM_FWU error indicating the reason for
+ * failure.
+ */
+static int pldm_finalize_update(struct pldmfw_priv *data)
+{
+ if (data->context->ops->finalize_update)
+ return data->context->ops->finalize_update(data->context);
+
+ return 0;
+}
+
+/**
+ * pldmfw_flash_image - Write a PLDM-formatted firmware image to the device
+ * @context: ops and data for firmware update
+ * @fw: firmware object pointing to the relevant firmware file to program
+ *
+ * Parse the data for a given firmware file, verifying that it is a valid PLDM
+ * formatted image that matches this device.
+ *
+ * Extract the device record Package Data and Component Tables and send them
+ * to the device firmware. Extract and write the flash data for each of the
+ * components indicated in the firmware file.
+ *
+ * Returns: zero on success, or a negative error code on failure.
+ */
+int pldmfw_flash_image(struct pldmfw *context, const struct firmware *fw)
+{
+ struct pldmfw_priv *data;
+ int err;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&data->records);
+ INIT_LIST_HEAD(&data->components);
+
+ data->fw = fw;
+ data->context = context;
+
+ err = pldm_parse_image(data);
+ if (err)
+ goto out_release_data;
+
+ err = pldm_find_matching_record(data);
+ if (err)
+ goto out_release_data;
+
+ err = pldm_send_package_data(data);
+ if (err)
+ goto out_release_data;
+
+ err = pldm_send_component_tables(data);
+ if (err)
+ goto out_release_data;
+
+ err = pldm_flash_components(data);
+ if (err)
+ goto out_release_data;
+
+ err = pldm_finalize_update(data);
+
+out_release_data:
+ pldmfw_free_priv(data);
+ kfree(data);
+
+ return err;
+}
+EXPORT_SYMBOL(pldmfw_flash_image);
+
+MODULE_AUTHOR("Jacob Keller <jacob.e.keller@intel.com>");
+MODULE_DESCRIPTION("PLDM firmware flash update library");
diff --git a/lib/pldmfw/pldmfw_private.h b/lib/pldmfw/pldmfw_private.h
new file mode 100644
index 000000000000..687ef2200692
--- /dev/null
+++ b/lib/pldmfw/pldmfw_private.h
@@ -0,0 +1,238 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2018-2019, Intel Corporation. */
+
+#ifndef _PLDMFW_PRIVATE_H_
+#define _PLDMFW_PRIVATE_H_
+
+/* The following data structures define the layout of a firmware binary
+ * following the "PLDM For Firmware Update Specification", DMTF standard
+ * #DSP0267.
+ *
+ * pldmfw.c uses these structures to implement a simple engine that will parse
+ * a fw binary file in this format and perform a firmware update for a given
+ * device.
+ *
+ * Due to the variable sized data layout, alignment of fields within these
+ * structures is not guaranteed when reading. For this reason, all multi-byte
+ * field accesses should be done using the unaligned access macros.
+ * Additionally, the standard specifies that multi-byte fields are in
+ * LittleEndian format.
+ *
+ * The structure definitions are not made public, in order to keep direct
+ * accesses within code that is prepared to deal with the limitation of
+ * unaligned access.
+ */
+
+/* UUID for PLDM firmware packages: f018878c-cb7d-4943-9800-a02f059aca02 */
+static const uuid_t pldm_firmware_header_id =
+ UUID_INIT(0xf018878c, 0xcb7d, 0x4943,
+ 0x98, 0x00, 0xa0, 0x2f, 0x05, 0x9a, 0xca, 0x02);
+
+/* Revision number of the PLDM header format this code supports */
+#define PACKAGE_HEADER_FORMAT_REVISION 0x01
+
+/* timestamp104 structure defined in PLDM Base specification */
+#define PLDM_TIMESTAMP_SIZE 13
+struct __pldm_timestamp {
+ u8 b[PLDM_TIMESTAMP_SIZE];
+} __packed __aligned(1);
+
+/* Package Header Information */
+struct __pldm_header {
+ uuid_t id; /* PackageHeaderIdentifier */
+ u8 revision; /* PackageHeaderFormatRevision */
+ __le16 size; /* PackageHeaderSize */
+ struct __pldm_timestamp release_date; /* PackageReleaseDateTime */
+ __le16 component_bitmap_len; /* ComponentBitmapBitLength */
+ u8 version_type; /* PackageVersionStringType */
+ u8 version_len; /* PackageVersionStringLength */
+
+ /*
+ * DSP0267 also includes the following variable length fields at the
+ * end of this structure:
+ *
+ * PackageVersionString, length is version_len.
+ *
+ * The total size of this section is
+ * sizeof(pldm_header) + version_len;
+ */
+ u8 version_string[]; /* PackageVersionString */
+} __packed __aligned(1);
+
+/* Firmware Device ID Record */
+struct __pldmfw_record_info {
+ __le16 record_len; /* RecordLength */
+ u8 descriptor_count; /* DescriptorCount */
+ __le32 device_update_flags; /* DeviceUpdateOptionFlags */
+ u8 version_type; /* ComponentImageSetVersionType */
+ u8 version_len; /* ComponentImageSetVersionLength */
+ __le16 package_data_len; /* FirmwareDevicePackageDataLength */
+
+ /*
+ * DSP0267 also includes the following variable length fields at the
+ * end of this structure:
+ *
+ * ApplicableComponents, length is component_bitmap_len from header
+ * ComponentImageSetVersionString, length is version_len
+ * RecordDescriptors, a series of TLVs with 16bit type and length
+ * FirmwareDevicePackageData, length is package_data_len
+ *
+ * The total size of each record is
+ * sizeof(pldmfw_record_info) +
+ * component_bitmap_len (converted to bytes!) +
+ * version_len +
+ * <length of RecordDescriptors> +
+ * package_data_len
+ */
+ u8 variable_record_data[];
+} __packed __aligned(1);
+
+/* Firmware Descriptor Definition */
+struct __pldmfw_desc_tlv {
+ __le16 type; /* DescriptorType */
+ __le16 size; /* DescriptorSize */
+ u8 data[]; /* DescriptorData */
+} __aligned(1);
+
+/* Firmware Device Identification Area */
+struct __pldmfw_record_area {
+ u8 record_count; /* DeviceIDRecordCount */
+ /* This is not a struct type because the size of each record varies */
+ u8 records[];
+} __aligned(1);
+
+/* Individual Component Image Information */
+struct __pldmfw_component_info {
+ __le16 classification; /* ComponentClassfication */
+ __le16 identifier; /* ComponentIdentifier */
+ __le32 comparison_stamp; /* ComponentComparisonStamp */
+ __le16 options; /* componentOptions */
+ __le16 activation_method; /* RequestedComponentActivationMethod */
+ __le32 location_offset; /* ComponentLocationOffset */
+ __le32 size; /* ComponentSize */
+ u8 version_type; /* ComponentVersionStringType */
+ u8 version_len; /* ComponentVersionStringLength */
+
+ /*
+ * DSP0267 also includes the following variable length fields at the
+ * end of this structure:
+ *
+ * ComponentVersionString, length is version_len
+ *
+ * The total size of this section is
+ * sizeof(pldmfw_component_info) + version_len;
+ */
+ u8 version_string[]; /* ComponentVersionString */
+} __packed __aligned(1);
+
+/* Component Image Information Area */
+struct __pldmfw_component_area {
+ __le16 component_image_count;
+ /* This is not a struct type because the component size varies */
+ u8 components[];
+} __aligned(1);
+
+/**
+ * pldm_first_desc_tlv
+ * @start: byte offset of the start of the descriptor TLVs
+ *
+ * Converts the starting offset of the descriptor TLVs into a pointer to the
+ * first descriptor.
+ */
+#define pldm_first_desc_tlv(start) \
+ ((const struct __pldmfw_desc_tlv *)(start))
+
+/**
+ * pldm_next_desc_tlv
+ * @desc: pointer to a descriptor TLV
+ *
+ * Finds the pointer to the next descriptor following a given descriptor
+ */
+#define pldm_next_desc_tlv(desc) \
+ ((const struct __pldmfw_desc_tlv *)((desc)->data + \
+ get_unaligned_le16(&(desc)->size)))
+
+/**
+ * pldm_for_each_desc_tlv
+ * @i: variable to store descriptor index
+ * @desc: variable to store descriptor pointer
+ * @start: byte offset of the start of the descriptors
+ * @count: the number of descriptors
+ *
+ * for loop macro to iterate over all of the descriptors of a given PLDM
+ * record.
+ */
+#define pldm_for_each_desc_tlv(i, desc, start, count) \
+ for ((i) = 0, (desc) = pldm_first_desc_tlv(start); \
+ (i) < (count); \
+ (i)++, (desc) = pldm_next_desc_tlv(desc))
+
+/**
+ * pldm_first_record
+ * @start: byte offset of the start of the PLDM records
+ *
+ * Converts a starting offset of the PLDM records into a pointer to the first
+ * record.
+ */
+#define pldm_first_record(start) \
+ ((const struct __pldmfw_record_info *)(start))
+
+/**
+ * pldm_next_record
+ * @record: pointer to a PLDM record
+ *
+ * Finds a pointer to the next record following a given record
+ */
+#define pldm_next_record(record) \
+ ((const struct __pldmfw_record_info *) \
+ ((const u8 *)(record) + get_unaligned_le16(&(record)->record_len)))
+
+/**
+ * pldm_for_each_record
+ * @i: variable to store record index
+ * @record: variable to store record pointer
+ * @start: byte offset of the start of the records
+ * @count: the number of records
+ *
+ * for loop macro to iterate over all of the records of a PLDM file.
+ */
+#define pldm_for_each_record(i, record, start, count) \
+ for ((i) = 0, (record) = pldm_first_record(start); \
+ (i) < (count); \
+ (i)++, (record) = pldm_next_record(record))
+
+/**
+ * pldm_first_component
+ * @start: byte offset of the start of the PLDM components
+ *
+ * Convert a starting offset of the PLDM components into a pointer to the
+ * first component
+ */
+#define pldm_first_component(start) \
+ ((const struct __pldmfw_component_info *)(start))
+
+/**
+ * pldm_next_component
+ * @component: pointer to a PLDM component
+ *
+ * Finds a pointer to the next component following a given component
+ */
+#define pldm_next_component(component) \
+ ((const struct __pldmfw_component_info *)((component)->version_string + \
+ (component)->version_len))
+
+/**
+ * pldm_for_each_component
+ * @i: variable to store component index
+ * @component: variable to store component pointer
+ * @start: byte offset to the start of the first component
+ * @count: the number of components
+ *
+ * for loop macro to iterate over all of the components of a PLDM file.
+ */
+#define pldm_for_each_component(i, component, start, count) \
+ for ((i) = 0, (component) = pldm_first_component(start); \
+ (i) < (count); \
+ (i)++, (component) = pldm_next_component(component))
+
+#endif
diff --git a/lib/polynomial.c b/lib/polynomial.c
new file mode 100644
index 000000000000..66d383445fec
--- /dev/null
+++ b/lib/polynomial.c
@@ -0,0 +1,108 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Generic polynomial calculation using integer coefficients.
+ *
+ * Copyright (C) 2020 BAIKAL ELECTRONICS, JSC
+ *
+ * Authors:
+ * Maxim Kaurkin <maxim.kaurkin@baikalelectronics.ru>
+ * Serge Semin <Sergey.Semin@baikalelectronics.ru>
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/polynomial.h>
+
+/*
+ * Originally this was part of drivers/hwmon/bt1-pvt.c.
+ * There the following conversion is used and should serve as an example here:
+ *
+ * The original translation formulae of the temperature (in degrees of Celsius)
+ * to PVT data and vice-versa are following:
+ *
+ * N = 1.8322e-8*(T^4) + 2.343e-5*(T^3) + 8.7018e-3*(T^2) + 3.9269*(T^1) +
+ * 1.7204e2
+ * T = -1.6743e-11*(N^4) + 8.1542e-8*(N^3) + -1.8201e-4*(N^2) +
+ * 3.1020e-1*(N^1) - 4.838e1
+ *
+ * where T = [-48.380, 147.438]C and N = [0, 1023].
+ *
+ * They must be accordingly altered to be suitable for the integer arithmetics.
+ * The technique is called 'factor redistribution', which just makes sure the
+ * multiplications and divisions are made so to have a result of the operations
+ * within the integer numbers limit. In addition we need to translate the
+ * formulae to accept millidegrees of Celsius. Here what they look like after
+ * the alterations:
+ *
+ * N = (18322e-20*(T^4) + 2343e-13*(T^3) + 87018e-9*(T^2) + 39269e-3*T +
+ * 17204e2) / 1e4
+ * T = -16743e-12*(D^4) + 81542e-9*(D^3) - 182010e-6*(D^2) + 310200e-3*D -
+ * 48380
+ * where T = [-48380, 147438] mC and N = [0, 1023].
+ *
+ * static const struct polynomial poly_temp_to_N = {
+ * .total_divider = 10000,
+ * .terms = {
+ * {4, 18322, 10000, 10000},
+ * {3, 2343, 10000, 10},
+ * {2, 87018, 10000, 10},
+ * {1, 39269, 1000, 1},
+ * {0, 1720400, 1, 1}
+ * }
+ * };
+ *
+ * static const struct polynomial poly_N_to_temp = {
+ * .total_divider = 1,
+ * .terms = {
+ * {4, -16743, 1000, 1},
+ * {3, 81542, 1000, 1},
+ * {2, -182010, 1000, 1},
+ * {1, 310200, 1000, 1},
+ * {0, -48380, 1, 1}
+ * }
+ * };
+ */
+
+/**
+ * polynomial_calc - calculate a polynomial using integer arithmetic
+ *
+ * @poly: pointer to the descriptor of the polynomial
+ * @data: input value of the polynimal
+ *
+ * Calculate the result of a polynomial using only integer arithmetic. For
+ * this to work without too much loss of precision the coefficients has to
+ * be altered. This is called factor redistribution.
+ *
+ * Returns the result of the polynomial calculation.
+ */
+long polynomial_calc(const struct polynomial *poly, long data)
+{
+ const struct polynomial_term *term = poly->terms;
+ long total_divider = poly->total_divider ?: 1;
+ long tmp, ret = 0;
+ int deg;
+
+ /*
+ * Here is the polynomial calculation function, which performs the
+ * redistributed terms calculations. It's pretty straightforward.
+ * We walk over each degree term up to the free one, and perform
+ * the redistributed multiplication of the term coefficient, its
+ * divider (as for the rationale fraction representation), data
+ * power and the rational fraction divider leftover. Then all of
+ * this is collected in a total sum variable, which value is
+ * normalized by the total divider before being returned.
+ */
+ do {
+ tmp = term->coef;
+ for (deg = 0; deg < term->deg; ++deg)
+ tmp = mult_frac(tmp, data, term->divider);
+ ret += tmp / term->divider_leftover;
+ } while ((term++)->deg);
+
+ return ret / total_divider;
+}
+EXPORT_SYMBOL_GPL(polynomial_calc);
+
+MODULE_DESCRIPTION("Generic polynomial calculations");
+MODULE_LICENSE("GPL");
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index 18c1dfbb1765..976b9bd02a1b 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -27,6 +27,7 @@
#include <linux/string.h>
#include <linux/xarray.h>
+#include "radix-tree.h"
/*
* Radix tree node cache.
@@ -56,22 +57,12 @@ struct kmem_cache *radix_tree_node_cachep;
#define IDR_PRELOAD_SIZE (IDR_MAX_PATH * 2 - 1)
/*
- * The IDA is even shorter since it uses a bitmap at the last level.
- */
-#define IDA_INDEX_BITS (8 * sizeof(int) - 1 - ilog2(IDA_BITMAP_BITS))
-#define IDA_MAX_PATH (DIV_ROUND_UP(IDA_INDEX_BITS, \
- RADIX_TREE_MAP_SHIFT))
-#define IDA_PRELOAD_SIZE (IDA_MAX_PATH * 2 - 1)
-
-/*
* Per-cpu pool of preloaded nodes
*/
-struct radix_tree_preload {
- unsigned nr;
- /* nodes->parent points to next preallocated node */
- struct radix_tree_node *nodes;
+DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = {
+ .lock = INIT_LOCAL_LOCK(lock),
};
-static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
+EXPORT_PER_CPU_SYMBOL_GPL(radix_tree_preloads);
static inline struct radix_tree_node *entry_to_node(void *ptr)
{
@@ -177,9 +168,9 @@ static inline void all_tag_set(struct radix_tree_node *node, unsigned int tag)
/**
* radix_tree_find_next_bit - find the next set bit in a memory region
*
- * @addr: The address to base the search on
- * @size: The bitmap size in bits
- * @offset: The bitnumber to start searching at
+ * @node: where to begin the search
+ * @tag: the tag index
+ * @offset: the bitnumber to start searching at
*
* Unrollable variant of find_next_bit() for constant size arrays.
* Tail bits starting from size to roundup(size, BITS_PER_LONG) must be zero.
@@ -335,19 +326,19 @@ static __must_check int __radix_tree_preload(gfp_t gfp_mask, unsigned nr)
int ret = -ENOMEM;
/*
- * Nodes preloaded by one cgroup can be be used by another cgroup, so
+ * Nodes preloaded by one cgroup can be used by another cgroup, so
* they should never be accounted to any particular memory cgroup.
*/
gfp_mask &= ~__GFP_ACCOUNT;
- preempt_disable();
+ local_lock(&radix_tree_preloads.lock);
rtp = this_cpu_ptr(&radix_tree_preloads);
while (rtp->nr < nr) {
- preempt_enable();
+ local_unlock(&radix_tree_preloads.lock);
node = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask);
if (node == NULL)
goto out;
- preempt_disable();
+ local_lock(&radix_tree_preloads.lock);
rtp = this_cpu_ptr(&radix_tree_preloads);
if (rtp->nr < nr) {
node->parent = rtp->nodes;
@@ -389,7 +380,7 @@ int radix_tree_maybe_preload(gfp_t gfp_mask)
if (gfpflags_allow_blocking(gfp_mask))
return __radix_tree_preload(gfp_mask, RADIX_TREE_PRELOAD_SIZE);
/* Preloading doesn't help anything with this gfp mask, skip it */
- preempt_disable();
+ local_lock(&radix_tree_preloads.lock);
return 0;
}
EXPORT_SYMBOL(radix_tree_maybe_preload);
@@ -472,7 +463,7 @@ out:
/**
* radix_tree_shrink - shrink radix tree to minimum height
- * @root radix tree root
+ * @root: radix tree root
*/
static inline bool radix_tree_shrink(struct radix_tree_root *root)
{
@@ -688,7 +679,7 @@ static void radix_tree_free_nodes(struct radix_tree_node *node)
}
static inline int insert_entries(struct radix_tree_node *node,
- void __rcu **slot, void *item, bool replace)
+ void __rcu **slot, void *item)
{
if (*slot)
return -EEXIST;
@@ -702,7 +693,7 @@ static inline int insert_entries(struct radix_tree_node *node,
}
/**
- * __radix_tree_insert - insert into a radix tree
+ * radix_tree_insert - insert into a radix tree
* @root: radix tree root
* @index: index key
* @item: item to insert
@@ -722,7 +713,7 @@ int radix_tree_insert(struct radix_tree_root *root, unsigned long index,
if (error)
return error;
- error = insert_entries(node, slot, item, false);
+ error = insert_entries(node, slot, item);
if (error < 0)
return error;
@@ -930,6 +921,7 @@ EXPORT_SYMBOL(radix_tree_replace_slot);
/**
* radix_tree_iter_replace - replace item in a slot
* @root: radix tree root
+ * @iter: iterator state
* @slot: pointer to slot
* @item: new item to store in the slot.
*
@@ -1039,7 +1031,7 @@ void *radix_tree_tag_clear(struct radix_tree_root *root,
{
struct radix_tree_node *node, *parent;
unsigned long maxindex;
- int uninitialized_var(offset);
+ int offset = 0;
radix_tree_load_root(root, &node, &maxindex);
if (index > maxindex)
@@ -1144,7 +1136,6 @@ static void set_iter_tags(struct radix_tree_iter *iter,
void __rcu **radix_tree_iter_resume(void __rcu **slot,
struct radix_tree_iter *iter)
{
- slot++;
iter->index = __radix_tree_iter_add(iter, 1);
iter->next_index = iter->index;
iter->tags = 0;
@@ -1478,7 +1469,7 @@ EXPORT_SYMBOL(radix_tree_tagged);
void idr_preload(gfp_t gfp_mask)
{
if (__radix_tree_preload(gfp_mask, IDR_PRELOAD_SIZE))
- preempt_disable();
+ local_lock(&radix_tree_preloads.lock);
}
EXPORT_SYMBOL(idr_preload);
@@ -1529,7 +1520,7 @@ void __rcu **idr_get_free(struct radix_tree_root *root,
offset = radix_tree_find_next_bit(node, IDR_FREE,
offset + 1);
start = next_index(start, node, offset);
- if (start > max)
+ if (start > max || start == 0)
return ERR_PTR(-ENOSPC);
while (offset == RADIX_TREE_MAP_SIZE) {
offset = node->offset + 1;
diff --git a/lib/radix-tree.h b/lib/radix-tree.h
new file mode 100644
index 000000000000..40d5c03e2b09
--- /dev/null
+++ b/lib/radix-tree.h
@@ -0,0 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* radix-tree helpers that are only shared with xarray */
+
+struct kmem_cache;
+struct rcu_head;
+
+extern struct kmem_cache *radix_tree_node_cachep;
+extern void radix_tree_node_rcu_free(struct rcu_head *head);
diff --git a/lib/raid6/.gitignore b/lib/raid6/.gitignore
index 3de0d8921286..6be57745afd1 100644
--- a/lib/raid6/.gitignore
+++ b/lib/raid6/.gitignore
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
mktables
altivec*.c
int*.c
diff --git a/lib/raid6/Makefile b/lib/raid6/Makefile
index e723eacf7868..1c5420ff254e 100644
--- a/lib/raid6/Makefile
+++ b/lib/raid6/Makefile
@@ -2,21 +2,21 @@
obj-$(CONFIG_RAID6_PQ) += raid6_pq.o
raid6_pq-y += algos.o recov.o tables.o int1.o int2.o int4.o \
- int8.o int16.o int32.o
+ int8.o
raid6_pq-$(CONFIG_X86) += recov_ssse3.o recov_avx2.o mmx.o sse1.o sse2.o avx2.o avx512.o recov_avx512.o
raid6_pq-$(CONFIG_ALTIVEC) += altivec1.o altivec2.o altivec4.o altivec8.o \
vpermxor1.o vpermxor2.o vpermxor4.o vpermxor8.o
raid6_pq-$(CONFIG_KERNEL_MODE_NEON) += neon.o neon1.o neon2.o neon4.o neon8.o recov_neon.o recov_neon_inner.o
raid6_pq-$(CONFIG_S390) += s390vx8.o recov_s390xc.o
+raid6_pq-$(CONFIG_LOONGARCH) += loongarch_simd.o recov_loongarch_simd.o
-hostprogs-y += mktables
-
-quiet_cmd_unroll = UNROLL $@
- cmd_unroll = $(AWK) -f$(srctree)/$(src)/unroll.awk -vN=$(UNROLL) < $< > $@
+hostprogs += mktables
ifeq ($(CONFIG_ALTIVEC),y)
altivec_flags := -maltivec $(call cc-option,-mabi=altivec)
+# Enable <altivec.h>
+altivec_flags += -isystem $(shell $(CC) -print-file-name=include)
ifdef CONFIG_CC_IS_CLANG
# clang ppc port does not yet support -maltivec when -msoft-float is
@@ -26,7 +26,6 @@ CFLAGS_REMOVE_altivec1.o += -msoft-float
CFLAGS_REMOVE_altivec2.o += -msoft-float
CFLAGS_REMOVE_altivec4.o += -msoft-float
CFLAGS_REMOVE_altivec8.o += -msoft-float
-CFLAGS_REMOVE_altivec8.o += -msoft-float
CFLAGS_REMOVE_vpermxor1.o += -msoft-float
CFLAGS_REMOVE_vpermxor2.o += -msoft-float
CFLAGS_REMOVE_vpermxor4.o += -msoft-float
@@ -38,6 +37,8 @@ endif
# ARM/NEON intrinsics in a non C99-compliant environment (such as the kernel)
ifeq ($(CONFIG_KERNEL_MODE_NEON),y)
NEON_FLAGS := -ffreestanding
+# Enable <arm_neon.h>
+NEON_FLAGS += -isystem $(shell $(CC) -print-file-name=include)
ifeq ($(ARCH),arm)
NEON_FLAGS += -march=armv7-a -mfloat-abi=softfp -mfpu=neon
endif
@@ -51,111 +52,39 @@ CFLAGS_REMOVE_neon8.o += -mgeneral-regs-only
endif
endif
-targets += int1.c
-$(obj)/int1.c: UNROLL := 1
-$(obj)/int1.c: $(src)/int.uc $(src)/unroll.awk FORCE
- $(call if_changed,unroll)
-
-targets += int2.c
-$(obj)/int2.c: UNROLL := 2
-$(obj)/int2.c: $(src)/int.uc $(src)/unroll.awk FORCE
- $(call if_changed,unroll)
-
-targets += int4.c
-$(obj)/int4.c: UNROLL := 4
-$(obj)/int4.c: $(src)/int.uc $(src)/unroll.awk FORCE
- $(call if_changed,unroll)
-
-targets += int8.c
-$(obj)/int8.c: UNROLL := 8
-$(obj)/int8.c: $(src)/int.uc $(src)/unroll.awk FORCE
- $(call if_changed,unroll)
-
-targets += int16.c
-$(obj)/int16.c: UNROLL := 16
-$(obj)/int16.c: $(src)/int.uc $(src)/unroll.awk FORCE
- $(call if_changed,unroll)
+quiet_cmd_unroll = UNROLL $@
+ cmd_unroll = $(AWK) -v N=$* -f $(srctree)/$(src)/unroll.awk < $< > $@
-targets += int32.c
-$(obj)/int32.c: UNROLL := 32
-$(obj)/int32.c: $(src)/int.uc $(src)/unroll.awk FORCE
+targets += int1.c int2.c int4.c int8.c
+$(obj)/int%.c: $(src)/int.uc $(src)/unroll.awk FORCE
$(call if_changed,unroll)
CFLAGS_altivec1.o += $(altivec_flags)
-targets += altivec1.c
-$(obj)/altivec1.c: UNROLL := 1
-$(obj)/altivec1.c: $(src)/altivec.uc $(src)/unroll.awk FORCE
- $(call if_changed,unroll)
-
CFLAGS_altivec2.o += $(altivec_flags)
-targets += altivec2.c
-$(obj)/altivec2.c: UNROLL := 2
-$(obj)/altivec2.c: $(src)/altivec.uc $(src)/unroll.awk FORCE
- $(call if_changed,unroll)
-
CFLAGS_altivec4.o += $(altivec_flags)
-targets += altivec4.c
-$(obj)/altivec4.c: UNROLL := 4
-$(obj)/altivec4.c: $(src)/altivec.uc $(src)/unroll.awk FORCE
- $(call if_changed,unroll)
-
CFLAGS_altivec8.o += $(altivec_flags)
-targets += altivec8.c
-$(obj)/altivec8.c: UNROLL := 8
-$(obj)/altivec8.c: $(src)/altivec.uc $(src)/unroll.awk FORCE
+targets += altivec1.c altivec2.c altivec4.c altivec8.c
+$(obj)/altivec%.c: $(src)/altivec.uc $(src)/unroll.awk FORCE
$(call if_changed,unroll)
CFLAGS_vpermxor1.o += $(altivec_flags)
-targets += vpermxor1.c
-$(obj)/vpermxor1.c: UNROLL := 1
-$(obj)/vpermxor1.c: $(src)/vpermxor.uc $(src)/unroll.awk FORCE
- $(call if_changed,unroll)
-
CFLAGS_vpermxor2.o += $(altivec_flags)
-targets += vpermxor2.c
-$(obj)/vpermxor2.c: UNROLL := 2
-$(obj)/vpermxor2.c: $(src)/vpermxor.uc $(src)/unroll.awk FORCE
- $(call if_changed,unroll)
-
CFLAGS_vpermxor4.o += $(altivec_flags)
-targets += vpermxor4.c
-$(obj)/vpermxor4.c: UNROLL := 4
-$(obj)/vpermxor4.c: $(src)/vpermxor.uc $(src)/unroll.awk FORCE
- $(call if_changed,unroll)
-
CFLAGS_vpermxor8.o += $(altivec_flags)
-targets += vpermxor8.c
-$(obj)/vpermxor8.c: UNROLL := 8
-$(obj)/vpermxor8.c: $(src)/vpermxor.uc $(src)/unroll.awk FORCE
+targets += vpermxor1.c vpermxor2.c vpermxor4.c vpermxor8.c
+$(obj)/vpermxor%.c: $(src)/vpermxor.uc $(src)/unroll.awk FORCE
$(call if_changed,unroll)
CFLAGS_neon1.o += $(NEON_FLAGS)
-targets += neon1.c
-$(obj)/neon1.c: UNROLL := 1
-$(obj)/neon1.c: $(src)/neon.uc $(src)/unroll.awk FORCE
- $(call if_changed,unroll)
-
CFLAGS_neon2.o += $(NEON_FLAGS)
-targets += neon2.c
-$(obj)/neon2.c: UNROLL := 2
-$(obj)/neon2.c: $(src)/neon.uc $(src)/unroll.awk FORCE
- $(call if_changed,unroll)
-
CFLAGS_neon4.o += $(NEON_FLAGS)
-targets += neon4.c
-$(obj)/neon4.c: UNROLL := 4
-$(obj)/neon4.c: $(src)/neon.uc $(src)/unroll.awk FORCE
- $(call if_changed,unroll)
-
CFLAGS_neon8.o += $(NEON_FLAGS)
-targets += neon8.c
-$(obj)/neon8.c: UNROLL := 8
-$(obj)/neon8.c: $(src)/neon.uc $(src)/unroll.awk FORCE
+targets += neon1.c neon2.c neon4.c neon8.c
+$(obj)/neon%.c: $(src)/neon.uc $(src)/unroll.awk FORCE
$(call if_changed,unroll)
targets += s390vx8.c
-$(obj)/s390vx8.c: UNROLL := 8
-$(obj)/s390vx8.c: $(src)/s390vx.uc $(src)/unroll.awk FORCE
+$(obj)/s390vx%.c: $(src)/s390vx.uc $(src)/unroll.awk FORCE
$(call if_changed,unroll)
quiet_cmd_mktable = TABLE $@
diff --git a/lib/raid6/algos.c b/lib/raid6/algos.c
index 17417eee0866..cd2e88ee1f14 100644
--- a/lib/raid6/algos.c
+++ b/lib/raid6/algos.c
@@ -18,12 +18,10 @@
#else
#include <linux/module.h>
#include <linux/gfp.h>
-#if !RAID6_USE_EMPTY_ZERO_PAGE
/* In .bss so it's zeroed */
const char raid6_empty_zero_page[PAGE_SIZE] __attribute__((aligned(256)));
EXPORT_SYMBOL(raid6_empty_zero_page);
#endif
-#endif
struct raid6_calls raid6_call;
EXPORT_SYMBOL_GPL(raid6_call);
@@ -34,10 +32,8 @@ const struct raid6_calls * const raid6_algos[] = {
&raid6_avx512x2,
&raid6_avx512x1,
#endif
-#ifdef CONFIG_AS_AVX2
&raid6_avx2x2,
&raid6_avx2x1,
-#endif
&raid6_sse2x2,
&raid6_sse2x1,
&raid6_sse1x2,
@@ -51,11 +47,9 @@ const struct raid6_calls * const raid6_algos[] = {
&raid6_avx512x2,
&raid6_avx512x1,
#endif
-#ifdef CONFIG_AS_AVX2
&raid6_avx2x4,
&raid6_avx2x2,
&raid6_avx2x1,
-#endif
&raid6_sse2x4,
&raid6_sse2x2,
&raid6_sse2x1,
@@ -79,9 +73,13 @@ const struct raid6_calls * const raid6_algos[] = {
&raid6_neonx2,
&raid6_neonx1,
#endif
-#if defined(__ia64__)
- &raid6_intx32,
- &raid6_intx16,
+#ifdef CONFIG_LOONGARCH
+#ifdef CONFIG_CPU_HAS_LASX
+ &raid6_lasx,
+#endif
+#ifdef CONFIG_CPU_HAS_LSX
+ &raid6_lsx,
+#endif
#endif
&raid6_intx8,
&raid6_intx4,
@@ -97,13 +95,11 @@ void (*raid6_datap_recov)(int, size_t, int, void **);
EXPORT_SYMBOL_GPL(raid6_datap_recov);
const struct raid6_recov_calls *const raid6_recov_algos[] = {
+#ifdef CONFIG_X86
#ifdef CONFIG_AS_AVX512
&raid6_recov_avx512,
#endif
-#ifdef CONFIG_AS_AVX2
&raid6_recov_avx2,
-#endif
-#ifdef CONFIG_AS_SSSE3
&raid6_recov_ssse3,
#endif
#ifdef CONFIG_S390
@@ -112,6 +108,14 @@ const struct raid6_recov_calls *const raid6_recov_algos[] = {
#if defined(CONFIG_KERNEL_MODE_NEON)
&raid6_recov_neon,
#endif
+#ifdef CONFIG_LOONGARCH
+#ifdef CONFIG_CPU_HAS_LASX
+ &raid6_recov_lasx,
+#endif
+#ifdef CONFIG_CPU_HAS_LSX
+ &raid6_recov_lsx,
+#endif
+#endif
&raid6_recov_intx1,
NULL
};
@@ -124,6 +128,9 @@ const struct raid6_recov_calls *const raid6_recov_algos[] = {
#define time_before(x, y) ((x) < (y))
#endif
+#define RAID6_TEST_DISKS 8
+#define RAID6_TEST_DISKS_ORDER 3
+
static inline const struct raid6_recov_calls *raid6_choose_recov(void)
{
const struct raid6_recov_calls *const *algo;
@@ -146,15 +153,15 @@ static inline const struct raid6_recov_calls *raid6_choose_recov(void)
}
static inline const struct raid6_calls *raid6_choose_gen(
- void *(*const dptrs)[(65536/PAGE_SIZE)+2], const int disks)
+ void *(*const dptrs)[RAID6_TEST_DISKS], const int disks)
{
- unsigned long perf, bestgenperf, bestxorperf, j0, j1;
+ unsigned long perf, bestgenperf, j0, j1;
int start = (disks>>1)-1, stop = disks-3; /* work on the second half of the disks */
const struct raid6_calls *const *algo;
const struct raid6_calls *best;
- for (bestgenperf = 0, bestxorperf = 0, best = NULL, algo = raid6_algos; *algo; algo++) {
- if (!best || (*algo)->prefer >= best->prefer) {
+ for (bestgenperf = 0, best = NULL, algo = raid6_algos; *algo; algo++) {
+ if (!best || (*algo)->priority >= best->priority) {
if ((*algo)->valid && !(*algo)->valid())
continue;
@@ -181,44 +188,50 @@ static inline const struct raid6_calls *raid6_choose_gen(
best = *algo;
}
pr_info("raid6: %-8s gen() %5ld MB/s\n", (*algo)->name,
- (perf*HZ) >> (20-16+RAID6_TIME_JIFFIES_LG2));
+ (perf * HZ * (disks-2)) >>
+ (20 - PAGE_SHIFT + RAID6_TIME_JIFFIES_LG2));
+ }
+ }
- if (!(*algo)->xor_syndrome)
- continue;
+ if (!best) {
+ pr_err("raid6: Yikes! No algorithm found!\n");
+ goto out;
+ }
- perf = 0;
+ raid6_call = *best;
- preempt_disable();
- j0 = jiffies;
- while ((j1 = jiffies) == j0)
- cpu_relax();
- while (time_before(jiffies,
- j1 + (1<<RAID6_TIME_JIFFIES_LG2))) {
- (*algo)->xor_syndrome(disks, start, stop,
- PAGE_SIZE, *dptrs);
- perf++;
- }
- preempt_enable();
-
- if (best == *algo)
- bestxorperf = perf;
+ if (!IS_ENABLED(CONFIG_RAID6_PQ_BENCHMARK)) {
+ pr_info("raid6: skipped pq benchmark and selected %s\n",
+ best->name);
+ goto out;
+ }
- pr_info("raid6: %-8s xor() %5ld MB/s\n", (*algo)->name,
- (perf*HZ) >> (20-16+RAID6_TIME_JIFFIES_LG2+1));
+ pr_info("raid6: using algorithm %s gen() %ld MB/s\n",
+ best->name,
+ (bestgenperf * HZ * (disks - 2)) >>
+ (20 - PAGE_SHIFT + RAID6_TIME_JIFFIES_LG2));
+
+ if (best->xor_syndrome) {
+ perf = 0;
+
+ preempt_disable();
+ j0 = jiffies;
+ while ((j1 = jiffies) == j0)
+ cpu_relax();
+ while (time_before(jiffies,
+ j1 + (1 << RAID6_TIME_JIFFIES_LG2))) {
+ best->xor_syndrome(disks, start, stop,
+ PAGE_SIZE, *dptrs);
+ perf++;
}
- }
+ preempt_enable();
- if (best) {
- pr_info("raid6: using algorithm %s gen() %ld MB/s\n",
- best->name,
- (bestgenperf*HZ) >> (20-16+RAID6_TIME_JIFFIES_LG2));
- if (best->xor_syndrome)
- pr_info("raid6: .... xor() %ld MB/s, rmw enabled\n",
- (bestxorperf*HZ) >> (20-16+RAID6_TIME_JIFFIES_LG2+1));
- raid6_call = *best;
- } else
- pr_err("raid6: Yikes! No algorithm found!\n");
+ pr_info("raid6: .... xor() %ld MB/s, rmw enabled\n",
+ (perf * HZ * (disks - 2)) >>
+ (20 - PAGE_SHIFT + RAID6_TIME_JIFFIES_LG2 + 1));
+ }
+out:
return best;
}
@@ -228,27 +241,33 @@ static inline const struct raid6_calls *raid6_choose_gen(
int __init raid6_select_algo(void)
{
- const int disks = (65536/PAGE_SIZE)+2;
+ const int disks = RAID6_TEST_DISKS;
const struct raid6_calls *gen_best;
const struct raid6_recov_calls *rec_best;
- char *syndromes;
- void *dptrs[(65536/PAGE_SIZE)+2];
- int i;
-
- for (i = 0; i < disks-2; i++)
- dptrs[i] = ((char *)raid6_gfmul) + PAGE_SIZE*i;
+ char *disk_ptr, *p;
+ void *dptrs[RAID6_TEST_DISKS];
+ int i, cycle;
- /* Normal code - use a 2-page allocation to avoid D$ conflict */
- syndromes = (void *) __get_free_pages(GFP_KERNEL, 1);
-
- if (!syndromes) {
+ /* prepare the buffer and fill it circularly with gfmul table */
+ disk_ptr = (char *)__get_free_pages(GFP_KERNEL, RAID6_TEST_DISKS_ORDER);
+ if (!disk_ptr) {
pr_err("raid6: Yikes! No memory available.\n");
return -ENOMEM;
}
- dptrs[disks-2] = syndromes;
- dptrs[disks-1] = syndromes + PAGE_SIZE;
+ p = disk_ptr;
+ for (i = 0; i < disks; i++)
+ dptrs[i] = p + PAGE_SIZE * i;
+
+ cycle = ((disks - 2) * PAGE_SIZE) / 65536;
+ for (i = 0; i < cycle; i++) {
+ memcpy(p, raid6_gfmul, 65536);
+ p += 65536;
+ }
+
+ if ((disks - 2) * PAGE_SIZE % 65536)
+ memcpy(p, raid6_gfmul, (disks - 2) * PAGE_SIZE % 65536);
/* select raid gen_syndrome function */
gen_best = raid6_choose_gen(&dptrs, disks);
@@ -256,7 +275,7 @@ int __init raid6_select_algo(void)
/* select raid recover functions */
rec_best = raid6_choose_recov();
- free_pages((unsigned long)syndromes, 1);
+ free_pages((unsigned long)disk_ptr, RAID6_TEST_DISKS_ORDER);
return gen_best && rec_best ? 0 : -EINVAL;
}
diff --git a/lib/raid6/avx2.c b/lib/raid6/avx2.c
index 87184b6da28a..059024234dce 100644
--- a/lib/raid6/avx2.c
+++ b/lib/raid6/avx2.c
@@ -13,8 +13,6 @@
*
*/
-#ifdef CONFIG_AS_AVX2
-
#include <linux/raid/pq.h>
#include "x86.h"
@@ -134,7 +132,7 @@ const struct raid6_calls raid6_avx2x1 = {
raid6_avx21_xor_syndrome,
raid6_have_avx2,
"avx2x1",
- 1 /* Has cache hints */
+ .priority = 2 /* Prefer AVX2 over priority 1 (SSE2 and others) */
};
/*
@@ -264,7 +262,7 @@ const struct raid6_calls raid6_avx2x2 = {
raid6_avx22_xor_syndrome,
raid6_have_avx2,
"avx2x2",
- 1 /* Has cache hints */
+ .priority = 2 /* Prefer AVX2 over priority 1 (SSE2 and others) */
};
#ifdef CONFIG_X86_64
@@ -467,8 +465,6 @@ const struct raid6_calls raid6_avx2x4 = {
raid6_avx24_xor_syndrome,
raid6_have_avx2,
"avx2x4",
- 1 /* Has cache hints */
+ .priority = 2 /* Prefer AVX2 over priority 1 (SSE2 and others) */
};
-#endif
-
-#endif /* CONFIG_AS_AVX2 */
+#endif /* CONFIG_X86_64 */
diff --git a/lib/raid6/avx512.c b/lib/raid6/avx512.c
index bb684d144ee2..9c3e822e1adf 100644
--- a/lib/raid6/avx512.c
+++ b/lib/raid6/avx512.c
@@ -162,7 +162,7 @@ const struct raid6_calls raid6_avx512x1 = {
raid6_avx5121_xor_syndrome,
raid6_have_avx512,
"avx512x1",
- 1 /* Has cache hints */
+ .priority = 2 /* Prefer AVX512 over priority 1 (SSE2 and others) */
};
/*
@@ -319,7 +319,7 @@ const struct raid6_calls raid6_avx512x2 = {
raid6_avx5122_xor_syndrome,
raid6_have_avx512,
"avx512x2",
- 1 /* Has cache hints */
+ .priority = 2 /* Prefer AVX512 over priority 1 (SSE2 and others) */
};
#ifdef CONFIG_X86_64
@@ -557,7 +557,7 @@ const struct raid6_calls raid6_avx512x4 = {
raid6_avx5124_xor_syndrome,
raid6_have_avx512,
"avx512x4",
- 1 /* Has cache hints */
+ .priority = 2 /* Prefer AVX512 over priority 1 (SSE2 and others) */
};
#endif
diff --git a/lib/raid6/int.uc b/lib/raid6/int.uc
index 558aeac9342a..1ba56c3fa482 100644
--- a/lib/raid6/int.uc
+++ b/lib/raid6/int.uc
@@ -42,13 +42,6 @@ typedef u32 unative_t;
/*
- * IA-64 wants insane amounts of unrolling. On other architectures that
- * is just a waste of space.
- */
-#if ($# <= 8) || defined(__ia64__)
-
-
-/*
* These sub-operations are separate inlines since they can sometimes be
* specially optimized using architecture-specific hacks.
*/
@@ -152,5 +145,3 @@ const struct raid6_calls raid6_intx$# = {
"int" NSTRING "x$#",
0
};
-
-#endif
diff --git a/lib/raid6/loongarch.h b/lib/raid6/loongarch.h
new file mode 100644
index 000000000000..acfc33ce7056
--- /dev/null
+++ b/lib/raid6/loongarch.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2023 WANG Xuerui <git@xen0n.name>
+ *
+ * raid6/loongarch.h
+ *
+ * Definitions common to LoongArch RAID-6 code only
+ */
+
+#ifndef _LIB_RAID6_LOONGARCH_H
+#define _LIB_RAID6_LOONGARCH_H
+
+#ifdef __KERNEL__
+
+#include <asm/cpu-features.h>
+#include <asm/fpu.h>
+
+#else /* for user-space testing */
+
+#include <sys/auxv.h>
+
+/* have to supply these defines for glibc 2.37- and musl */
+#ifndef HWCAP_LOONGARCH_LSX
+#define HWCAP_LOONGARCH_LSX (1 << 4)
+#endif
+#ifndef HWCAP_LOONGARCH_LASX
+#define HWCAP_LOONGARCH_LASX (1 << 5)
+#endif
+
+#define kernel_fpu_begin()
+#define kernel_fpu_end()
+
+#define cpu_has_lsx (getauxval(AT_HWCAP) & HWCAP_LOONGARCH_LSX)
+#define cpu_has_lasx (getauxval(AT_HWCAP) & HWCAP_LOONGARCH_LASX)
+
+#endif /* __KERNEL__ */
+
+#endif /* _LIB_RAID6_LOONGARCH_H */
diff --git a/lib/raid6/loongarch_simd.c b/lib/raid6/loongarch_simd.c
new file mode 100644
index 000000000000..aa5d9f924ca3
--- /dev/null
+++ b/lib/raid6/loongarch_simd.c
@@ -0,0 +1,422 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * RAID6 syndrome calculations in LoongArch SIMD (LSX & LASX)
+ *
+ * Copyright 2023 WANG Xuerui <git@xen0n.name>
+ *
+ * Based on the generic RAID-6 code (int.uc):
+ *
+ * Copyright 2002-2004 H. Peter Anvin
+ */
+
+#include <linux/raid/pq.h>
+#include "loongarch.h"
+
+/*
+ * The vector algorithms are currently priority 0, which means the generic
+ * scalar algorithms are not being disabled if vector support is present.
+ * This is like the similar LoongArch RAID5 XOR code, with the main reason
+ * repeated here: it cannot be ruled out at this point of time, that some
+ * future (maybe reduced) models could run the vector algorithms slower than
+ * the scalar ones, maybe for errata or micro-op reasons. It may be
+ * appropriate to revisit this after one or two more uarch generations.
+ */
+
+#ifdef CONFIG_CPU_HAS_LSX
+#define NSIZE 16
+
+static int raid6_has_lsx(void)
+{
+ return cpu_has_lsx;
+}
+
+static void raid6_lsx_gen_syndrome(int disks, size_t bytes, void **ptrs)
+{
+ u8 **dptr = (u8 **)ptrs;
+ u8 *p, *q;
+ int d, z, z0;
+
+ z0 = disks - 3; /* Highest data disk */
+ p = dptr[z0+1]; /* XOR parity */
+ q = dptr[z0+2]; /* RS syndrome */
+
+ kernel_fpu_begin();
+
+ /*
+ * $vr0, $vr1, $vr2, $vr3: wp
+ * $vr4, $vr5, $vr6, $vr7: wq
+ * $vr8, $vr9, $vr10, $vr11: wd
+ * $vr12, $vr13, $vr14, $vr15: w2
+ * $vr16, $vr17, $vr18, $vr19: w1
+ */
+ for (d = 0; d < bytes; d += NSIZE*4) {
+ /* wq$$ = wp$$ = *(unative_t *)&dptr[z0][d+$$*NSIZE]; */
+ asm volatile("vld $vr0, %0" : : "m"(dptr[z0][d+0*NSIZE]));
+ asm volatile("vld $vr1, %0" : : "m"(dptr[z0][d+1*NSIZE]));
+ asm volatile("vld $vr2, %0" : : "m"(dptr[z0][d+2*NSIZE]));
+ asm volatile("vld $vr3, %0" : : "m"(dptr[z0][d+3*NSIZE]));
+ asm volatile("vori.b $vr4, $vr0, 0");
+ asm volatile("vori.b $vr5, $vr1, 0");
+ asm volatile("vori.b $vr6, $vr2, 0");
+ asm volatile("vori.b $vr7, $vr3, 0");
+ for (z = z0-1; z >= 0; z--) {
+ /* wd$$ = *(unative_t *)&dptr[z][d+$$*NSIZE]; */
+ asm volatile("vld $vr8, %0" : : "m"(dptr[z][d+0*NSIZE]));
+ asm volatile("vld $vr9, %0" : : "m"(dptr[z][d+1*NSIZE]));
+ asm volatile("vld $vr10, %0" : : "m"(dptr[z][d+2*NSIZE]));
+ asm volatile("vld $vr11, %0" : : "m"(dptr[z][d+3*NSIZE]));
+ /* wp$$ ^= wd$$; */
+ asm volatile("vxor.v $vr0, $vr0, $vr8");
+ asm volatile("vxor.v $vr1, $vr1, $vr9");
+ asm volatile("vxor.v $vr2, $vr2, $vr10");
+ asm volatile("vxor.v $vr3, $vr3, $vr11");
+ /* w2$$ = MASK(wq$$); */
+ asm volatile("vslti.b $vr12, $vr4, 0");
+ asm volatile("vslti.b $vr13, $vr5, 0");
+ asm volatile("vslti.b $vr14, $vr6, 0");
+ asm volatile("vslti.b $vr15, $vr7, 0");
+ /* w1$$ = SHLBYTE(wq$$); */
+ asm volatile("vslli.b $vr16, $vr4, 1");
+ asm volatile("vslli.b $vr17, $vr5, 1");
+ asm volatile("vslli.b $vr18, $vr6, 1");
+ asm volatile("vslli.b $vr19, $vr7, 1");
+ /* w2$$ &= NBYTES(0x1d); */
+ asm volatile("vandi.b $vr12, $vr12, 0x1d");
+ asm volatile("vandi.b $vr13, $vr13, 0x1d");
+ asm volatile("vandi.b $vr14, $vr14, 0x1d");
+ asm volatile("vandi.b $vr15, $vr15, 0x1d");
+ /* w1$$ ^= w2$$; */
+ asm volatile("vxor.v $vr16, $vr16, $vr12");
+ asm volatile("vxor.v $vr17, $vr17, $vr13");
+ asm volatile("vxor.v $vr18, $vr18, $vr14");
+ asm volatile("vxor.v $vr19, $vr19, $vr15");
+ /* wq$$ = w1$$ ^ wd$$; */
+ asm volatile("vxor.v $vr4, $vr16, $vr8");
+ asm volatile("vxor.v $vr5, $vr17, $vr9");
+ asm volatile("vxor.v $vr6, $vr18, $vr10");
+ asm volatile("vxor.v $vr7, $vr19, $vr11");
+ }
+ /* *(unative_t *)&p[d+NSIZE*$$] = wp$$; */
+ asm volatile("vst $vr0, %0" : "=m"(p[d+NSIZE*0]));
+ asm volatile("vst $vr1, %0" : "=m"(p[d+NSIZE*1]));
+ asm volatile("vst $vr2, %0" : "=m"(p[d+NSIZE*2]));
+ asm volatile("vst $vr3, %0" : "=m"(p[d+NSIZE*3]));
+ /* *(unative_t *)&q[d+NSIZE*$$] = wq$$; */
+ asm volatile("vst $vr4, %0" : "=m"(q[d+NSIZE*0]));
+ asm volatile("vst $vr5, %0" : "=m"(q[d+NSIZE*1]));
+ asm volatile("vst $vr6, %0" : "=m"(q[d+NSIZE*2]));
+ asm volatile("vst $vr7, %0" : "=m"(q[d+NSIZE*3]));
+ }
+
+ kernel_fpu_end();
+}
+
+static void raid6_lsx_xor_syndrome(int disks, int start, int stop,
+ size_t bytes, void **ptrs)
+{
+ u8 **dptr = (u8 **)ptrs;
+ u8 *p, *q;
+ int d, z, z0;
+
+ z0 = stop; /* P/Q right side optimization */
+ p = dptr[disks-2]; /* XOR parity */
+ q = dptr[disks-1]; /* RS syndrome */
+
+ kernel_fpu_begin();
+
+ /*
+ * $vr0, $vr1, $vr2, $vr3: wp
+ * $vr4, $vr5, $vr6, $vr7: wq
+ * $vr8, $vr9, $vr10, $vr11: wd
+ * $vr12, $vr13, $vr14, $vr15: w2
+ * $vr16, $vr17, $vr18, $vr19: w1
+ */
+ for (d = 0; d < bytes; d += NSIZE*4) {
+ /* P/Q data pages */
+ /* wq$$ = wp$$ = *(unative_t *)&dptr[z0][d+$$*NSIZE]; */
+ asm volatile("vld $vr0, %0" : : "m"(dptr[z0][d+0*NSIZE]));
+ asm volatile("vld $vr1, %0" : : "m"(dptr[z0][d+1*NSIZE]));
+ asm volatile("vld $vr2, %0" : : "m"(dptr[z0][d+2*NSIZE]));
+ asm volatile("vld $vr3, %0" : : "m"(dptr[z0][d+3*NSIZE]));
+ asm volatile("vori.b $vr4, $vr0, 0");
+ asm volatile("vori.b $vr5, $vr1, 0");
+ asm volatile("vori.b $vr6, $vr2, 0");
+ asm volatile("vori.b $vr7, $vr3, 0");
+ for (z = z0-1; z >= start; z--) {
+ /* wd$$ = *(unative_t *)&dptr[z][d+$$*NSIZE]; */
+ asm volatile("vld $vr8, %0" : : "m"(dptr[z][d+0*NSIZE]));
+ asm volatile("vld $vr9, %0" : : "m"(dptr[z][d+1*NSIZE]));
+ asm volatile("vld $vr10, %0" : : "m"(dptr[z][d+2*NSIZE]));
+ asm volatile("vld $vr11, %0" : : "m"(dptr[z][d+3*NSIZE]));
+ /* wp$$ ^= wd$$; */
+ asm volatile("vxor.v $vr0, $vr0, $vr8");
+ asm volatile("vxor.v $vr1, $vr1, $vr9");
+ asm volatile("vxor.v $vr2, $vr2, $vr10");
+ asm volatile("vxor.v $vr3, $vr3, $vr11");
+ /* w2$$ = MASK(wq$$); */
+ asm volatile("vslti.b $vr12, $vr4, 0");
+ asm volatile("vslti.b $vr13, $vr5, 0");
+ asm volatile("vslti.b $vr14, $vr6, 0");
+ asm volatile("vslti.b $vr15, $vr7, 0");
+ /* w1$$ = SHLBYTE(wq$$); */
+ asm volatile("vslli.b $vr16, $vr4, 1");
+ asm volatile("vslli.b $vr17, $vr5, 1");
+ asm volatile("vslli.b $vr18, $vr6, 1");
+ asm volatile("vslli.b $vr19, $vr7, 1");
+ /* w2$$ &= NBYTES(0x1d); */
+ asm volatile("vandi.b $vr12, $vr12, 0x1d");
+ asm volatile("vandi.b $vr13, $vr13, 0x1d");
+ asm volatile("vandi.b $vr14, $vr14, 0x1d");
+ asm volatile("vandi.b $vr15, $vr15, 0x1d");
+ /* w1$$ ^= w2$$; */
+ asm volatile("vxor.v $vr16, $vr16, $vr12");
+ asm volatile("vxor.v $vr17, $vr17, $vr13");
+ asm volatile("vxor.v $vr18, $vr18, $vr14");
+ asm volatile("vxor.v $vr19, $vr19, $vr15");
+ /* wq$$ = w1$$ ^ wd$$; */
+ asm volatile("vxor.v $vr4, $vr16, $vr8");
+ asm volatile("vxor.v $vr5, $vr17, $vr9");
+ asm volatile("vxor.v $vr6, $vr18, $vr10");
+ asm volatile("vxor.v $vr7, $vr19, $vr11");
+ }
+
+ /* P/Q left side optimization */
+ for (z = start-1; z >= 0; z--) {
+ /* w2$$ = MASK(wq$$); */
+ asm volatile("vslti.b $vr12, $vr4, 0");
+ asm volatile("vslti.b $vr13, $vr5, 0");
+ asm volatile("vslti.b $vr14, $vr6, 0");
+ asm volatile("vslti.b $vr15, $vr7, 0");
+ /* w1$$ = SHLBYTE(wq$$); */
+ asm volatile("vslli.b $vr16, $vr4, 1");
+ asm volatile("vslli.b $vr17, $vr5, 1");
+ asm volatile("vslli.b $vr18, $vr6, 1");
+ asm volatile("vslli.b $vr19, $vr7, 1");
+ /* w2$$ &= NBYTES(0x1d); */
+ asm volatile("vandi.b $vr12, $vr12, 0x1d");
+ asm volatile("vandi.b $vr13, $vr13, 0x1d");
+ asm volatile("vandi.b $vr14, $vr14, 0x1d");
+ asm volatile("vandi.b $vr15, $vr15, 0x1d");
+ /* wq$$ = w1$$ ^ w2$$; */
+ asm volatile("vxor.v $vr4, $vr16, $vr12");
+ asm volatile("vxor.v $vr5, $vr17, $vr13");
+ asm volatile("vxor.v $vr6, $vr18, $vr14");
+ asm volatile("vxor.v $vr7, $vr19, $vr15");
+ }
+ /*
+ * *(unative_t *)&p[d+NSIZE*$$] ^= wp$$;
+ * *(unative_t *)&q[d+NSIZE*$$] ^= wq$$;
+ */
+ asm volatile(
+ "vld $vr20, %0\n\t"
+ "vld $vr21, %1\n\t"
+ "vld $vr22, %2\n\t"
+ "vld $vr23, %3\n\t"
+ "vld $vr24, %4\n\t"
+ "vld $vr25, %5\n\t"
+ "vld $vr26, %6\n\t"
+ "vld $vr27, %7\n\t"
+ "vxor.v $vr20, $vr20, $vr0\n\t"
+ "vxor.v $vr21, $vr21, $vr1\n\t"
+ "vxor.v $vr22, $vr22, $vr2\n\t"
+ "vxor.v $vr23, $vr23, $vr3\n\t"
+ "vxor.v $vr24, $vr24, $vr4\n\t"
+ "vxor.v $vr25, $vr25, $vr5\n\t"
+ "vxor.v $vr26, $vr26, $vr6\n\t"
+ "vxor.v $vr27, $vr27, $vr7\n\t"
+ "vst $vr20, %0\n\t"
+ "vst $vr21, %1\n\t"
+ "vst $vr22, %2\n\t"
+ "vst $vr23, %3\n\t"
+ "vst $vr24, %4\n\t"
+ "vst $vr25, %5\n\t"
+ "vst $vr26, %6\n\t"
+ "vst $vr27, %7\n\t"
+ : "+m"(p[d+NSIZE*0]), "+m"(p[d+NSIZE*1]),
+ "+m"(p[d+NSIZE*2]), "+m"(p[d+NSIZE*3]),
+ "+m"(q[d+NSIZE*0]), "+m"(q[d+NSIZE*1]),
+ "+m"(q[d+NSIZE*2]), "+m"(q[d+NSIZE*3])
+ );
+ }
+
+ kernel_fpu_end();
+}
+
+const struct raid6_calls raid6_lsx = {
+ raid6_lsx_gen_syndrome,
+ raid6_lsx_xor_syndrome,
+ raid6_has_lsx,
+ "lsx",
+ .priority = 0 /* see the comment near the top of the file for reason */
+};
+
+#undef NSIZE
+#endif /* CONFIG_CPU_HAS_LSX */
+
+#ifdef CONFIG_CPU_HAS_LASX
+#define NSIZE 32
+
+static int raid6_has_lasx(void)
+{
+ return cpu_has_lasx;
+}
+
+static void raid6_lasx_gen_syndrome(int disks, size_t bytes, void **ptrs)
+{
+ u8 **dptr = (u8 **)ptrs;
+ u8 *p, *q;
+ int d, z, z0;
+
+ z0 = disks - 3; /* Highest data disk */
+ p = dptr[z0+1]; /* XOR parity */
+ q = dptr[z0+2]; /* RS syndrome */
+
+ kernel_fpu_begin();
+
+ /*
+ * $xr0, $xr1: wp
+ * $xr2, $xr3: wq
+ * $xr4, $xr5: wd
+ * $xr6, $xr7: w2
+ * $xr8, $xr9: w1
+ */
+ for (d = 0; d < bytes; d += NSIZE*2) {
+ /* wq$$ = wp$$ = *(unative_t *)&dptr[z0][d+$$*NSIZE]; */
+ asm volatile("xvld $xr0, %0" : : "m"(dptr[z0][d+0*NSIZE]));
+ asm volatile("xvld $xr1, %0" : : "m"(dptr[z0][d+1*NSIZE]));
+ asm volatile("xvori.b $xr2, $xr0, 0");
+ asm volatile("xvori.b $xr3, $xr1, 0");
+ for (z = z0-1; z >= 0; z--) {
+ /* wd$$ = *(unative_t *)&dptr[z][d+$$*NSIZE]; */
+ asm volatile("xvld $xr4, %0" : : "m"(dptr[z][d+0*NSIZE]));
+ asm volatile("xvld $xr5, %0" : : "m"(dptr[z][d+1*NSIZE]));
+ /* wp$$ ^= wd$$; */
+ asm volatile("xvxor.v $xr0, $xr0, $xr4");
+ asm volatile("xvxor.v $xr1, $xr1, $xr5");
+ /* w2$$ = MASK(wq$$); */
+ asm volatile("xvslti.b $xr6, $xr2, 0");
+ asm volatile("xvslti.b $xr7, $xr3, 0");
+ /* w1$$ = SHLBYTE(wq$$); */
+ asm volatile("xvslli.b $xr8, $xr2, 1");
+ asm volatile("xvslli.b $xr9, $xr3, 1");
+ /* w2$$ &= NBYTES(0x1d); */
+ asm volatile("xvandi.b $xr6, $xr6, 0x1d");
+ asm volatile("xvandi.b $xr7, $xr7, 0x1d");
+ /* w1$$ ^= w2$$; */
+ asm volatile("xvxor.v $xr8, $xr8, $xr6");
+ asm volatile("xvxor.v $xr9, $xr9, $xr7");
+ /* wq$$ = w1$$ ^ wd$$; */
+ asm volatile("xvxor.v $xr2, $xr8, $xr4");
+ asm volatile("xvxor.v $xr3, $xr9, $xr5");
+ }
+ /* *(unative_t *)&p[d+NSIZE*$$] = wp$$; */
+ asm volatile("xvst $xr0, %0" : "=m"(p[d+NSIZE*0]));
+ asm volatile("xvst $xr1, %0" : "=m"(p[d+NSIZE*1]));
+ /* *(unative_t *)&q[d+NSIZE*$$] = wq$$; */
+ asm volatile("xvst $xr2, %0" : "=m"(q[d+NSIZE*0]));
+ asm volatile("xvst $xr3, %0" : "=m"(q[d+NSIZE*1]));
+ }
+
+ kernel_fpu_end();
+}
+
+static void raid6_lasx_xor_syndrome(int disks, int start, int stop,
+ size_t bytes, void **ptrs)
+{
+ u8 **dptr = (u8 **)ptrs;
+ u8 *p, *q;
+ int d, z, z0;
+
+ z0 = stop; /* P/Q right side optimization */
+ p = dptr[disks-2]; /* XOR parity */
+ q = dptr[disks-1]; /* RS syndrome */
+
+ kernel_fpu_begin();
+
+ /*
+ * $xr0, $xr1: wp
+ * $xr2, $xr3: wq
+ * $xr4, $xr5: wd
+ * $xr6, $xr7: w2
+ * $xr8, $xr9: w1
+ */
+ for (d = 0; d < bytes; d += NSIZE*2) {
+ /* P/Q data pages */
+ /* wq$$ = wp$$ = *(unative_t *)&dptr[z0][d+$$*NSIZE]; */
+ asm volatile("xvld $xr0, %0" : : "m"(dptr[z0][d+0*NSIZE]));
+ asm volatile("xvld $xr1, %0" : : "m"(dptr[z0][d+1*NSIZE]));
+ asm volatile("xvori.b $xr2, $xr0, 0");
+ asm volatile("xvori.b $xr3, $xr1, 0");
+ for (z = z0-1; z >= start; z--) {
+ /* wd$$ = *(unative_t *)&dptr[z][d+$$*NSIZE]; */
+ asm volatile("xvld $xr4, %0" : : "m"(dptr[z][d+0*NSIZE]));
+ asm volatile("xvld $xr5, %0" : : "m"(dptr[z][d+1*NSIZE]));
+ /* wp$$ ^= wd$$; */
+ asm volatile("xvxor.v $xr0, $xr0, $xr4");
+ asm volatile("xvxor.v $xr1, $xr1, $xr5");
+ /* w2$$ = MASK(wq$$); */
+ asm volatile("xvslti.b $xr6, $xr2, 0");
+ asm volatile("xvslti.b $xr7, $xr3, 0");
+ /* w1$$ = SHLBYTE(wq$$); */
+ asm volatile("xvslli.b $xr8, $xr2, 1");
+ asm volatile("xvslli.b $xr9, $xr3, 1");
+ /* w2$$ &= NBYTES(0x1d); */
+ asm volatile("xvandi.b $xr6, $xr6, 0x1d");
+ asm volatile("xvandi.b $xr7, $xr7, 0x1d");
+ /* w1$$ ^= w2$$; */
+ asm volatile("xvxor.v $xr8, $xr8, $xr6");
+ asm volatile("xvxor.v $xr9, $xr9, $xr7");
+ /* wq$$ = w1$$ ^ wd$$; */
+ asm volatile("xvxor.v $xr2, $xr8, $xr4");
+ asm volatile("xvxor.v $xr3, $xr9, $xr5");
+ }
+
+ /* P/Q left side optimization */
+ for (z = start-1; z >= 0; z--) {
+ /* w2$$ = MASK(wq$$); */
+ asm volatile("xvslti.b $xr6, $xr2, 0");
+ asm volatile("xvslti.b $xr7, $xr3, 0");
+ /* w1$$ = SHLBYTE(wq$$); */
+ asm volatile("xvslli.b $xr8, $xr2, 1");
+ asm volatile("xvslli.b $xr9, $xr3, 1");
+ /* w2$$ &= NBYTES(0x1d); */
+ asm volatile("xvandi.b $xr6, $xr6, 0x1d");
+ asm volatile("xvandi.b $xr7, $xr7, 0x1d");
+ /* wq$$ = w1$$ ^ w2$$; */
+ asm volatile("xvxor.v $xr2, $xr8, $xr6");
+ asm volatile("xvxor.v $xr3, $xr9, $xr7");
+ }
+ /*
+ * *(unative_t *)&p[d+NSIZE*$$] ^= wp$$;
+ * *(unative_t *)&q[d+NSIZE*$$] ^= wq$$;
+ */
+ asm volatile(
+ "xvld $xr10, %0\n\t"
+ "xvld $xr11, %1\n\t"
+ "xvld $xr12, %2\n\t"
+ "xvld $xr13, %3\n\t"
+ "xvxor.v $xr10, $xr10, $xr0\n\t"
+ "xvxor.v $xr11, $xr11, $xr1\n\t"
+ "xvxor.v $xr12, $xr12, $xr2\n\t"
+ "xvxor.v $xr13, $xr13, $xr3\n\t"
+ "xvst $xr10, %0\n\t"
+ "xvst $xr11, %1\n\t"
+ "xvst $xr12, %2\n\t"
+ "xvst $xr13, %3\n\t"
+ : "+m"(p[d+NSIZE*0]), "+m"(p[d+NSIZE*1]),
+ "+m"(q[d+NSIZE*0]), "+m"(q[d+NSIZE*1])
+ );
+ }
+
+ kernel_fpu_end();
+}
+
+const struct raid6_calls raid6_lasx = {
+ raid6_lasx_gen_syndrome,
+ raid6_lasx_xor_syndrome,
+ raid6_has_lasx,
+ "lasx",
+ .priority = 0 /* see the comment near the top of the file for reason */
+};
+#undef NSIZE
+#endif /* CONFIG_CPU_HAS_LASX */
diff --git a/lib/raid6/mktables.c b/lib/raid6/mktables.c
index 9c485df1308f..3be03793237c 100644
--- a/lib/raid6/mktables.c
+++ b/lib/raid6/mktables.c
@@ -56,8 +56,10 @@ int main(int argc, char *argv[])
uint8_t v;
uint8_t exptbl[256], invtbl[256];
- printf("#include <linux/raid/pq.h>\n");
+ printf("#ifdef __KERNEL__\n");
printf("#include <linux/export.h>\n");
+ printf("#endif\n");
+ printf("#include <linux/raid/pq.h>\n");
/* Compute multiplication table */
printf("\nconst u8 __attribute__((aligned(256)))\n"
diff --git a/lib/raid6/neon.h b/lib/raid6/neon.h
new file mode 100644
index 000000000000..2ca41ee9b499
--- /dev/null
+++ b/lib/raid6/neon.h
@@ -0,0 +1,22 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+void raid6_neon1_gen_syndrome_real(int disks, unsigned long bytes, void **ptrs);
+void raid6_neon1_xor_syndrome_real(int disks, int start, int stop,
+ unsigned long bytes, void **ptrs);
+void raid6_neon2_gen_syndrome_real(int disks, unsigned long bytes, void **ptrs);
+void raid6_neon2_xor_syndrome_real(int disks, int start, int stop,
+ unsigned long bytes, void **ptrs);
+void raid6_neon4_gen_syndrome_real(int disks, unsigned long bytes, void **ptrs);
+void raid6_neon4_xor_syndrome_real(int disks, int start, int stop,
+ unsigned long bytes, void **ptrs);
+void raid6_neon8_gen_syndrome_real(int disks, unsigned long bytes, void **ptrs);
+void raid6_neon8_xor_syndrome_real(int disks, int start, int stop,
+ unsigned long bytes, void **ptrs);
+void __raid6_2data_recov_neon(int bytes, uint8_t *p, uint8_t *q, uint8_t *dp,
+ uint8_t *dq, const uint8_t *pbmul,
+ const uint8_t *qmul);
+
+void __raid6_datap_recov_neon(int bytes, uint8_t *p, uint8_t *q, uint8_t *dq,
+ const uint8_t *qmul);
+
+
diff --git a/lib/raid6/neon.uc b/lib/raid6/neon.uc
index b7c68030da4f..355270af0cd6 100644
--- a/lib/raid6/neon.uc
+++ b/lib/raid6/neon.uc
@@ -25,6 +25,7 @@
*/
#include <arm_neon.h>
+#include "neon.h"
typedef uint8x16_t unative_t;
diff --git a/lib/raid6/recov.c b/lib/raid6/recov.c
index e49d519de6cb..a7c1b2bbe40d 100644
--- a/lib/raid6/recov.c
+++ b/lib/raid6/recov.c
@@ -13,7 +13,6 @@
* the syndrome.)
*/
-#include <linux/export.h>
#include <linux/raid/pq.h>
/* Recover two failed data blocks. */
diff --git a/lib/raid6/recov_avx2.c b/lib/raid6/recov_avx2.c
index 7a3b5e7f66ee..4e8095403ee2 100644
--- a/lib/raid6/recov_avx2.c
+++ b/lib/raid6/recov_avx2.c
@@ -4,8 +4,6 @@
* Author: Jim Kukunas <james.t.kukunas@linux.intel.com>
*/
-#ifdef CONFIG_AS_AVX2
-
#include <linux/raid/pq.h>
#include "x86.h"
@@ -313,7 +311,3 @@ const struct raid6_recov_calls raid6_recov_avx2 = {
#endif
.priority = 2,
};
-
-#else
-#warning "your version of binutils lacks AVX2 support"
-#endif
diff --git a/lib/raid6/recov_loongarch_simd.c b/lib/raid6/recov_loongarch_simd.c
new file mode 100644
index 000000000000..94aeac85e6f7
--- /dev/null
+++ b/lib/raid6/recov_loongarch_simd.c
@@ -0,0 +1,513 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * RAID6 recovery algorithms in LoongArch SIMD (LSX & LASX)
+ *
+ * Copyright (C) 2023 WANG Xuerui <git@xen0n.name>
+ *
+ * Originally based on recov_avx2.c and recov_ssse3.c:
+ *
+ * Copyright (C) 2012 Intel Corporation
+ * Author: Jim Kukunas <james.t.kukunas@linux.intel.com>
+ */
+
+#include <linux/raid/pq.h>
+#include "loongarch.h"
+
+/*
+ * Unlike with the syndrome calculation algorithms, there's no boot-time
+ * selection of recovery algorithms by benchmarking, so we have to specify
+ * the priorities and hope the future cores will all have decent vector
+ * support (i.e. no LASX slower than LSX, or even scalar code).
+ */
+
+#ifdef CONFIG_CPU_HAS_LSX
+static int raid6_has_lsx(void)
+{
+ return cpu_has_lsx;
+}
+
+static void raid6_2data_recov_lsx(int disks, size_t bytes, int faila,
+ int failb, void **ptrs)
+{
+ u8 *p, *q, *dp, *dq;
+ const u8 *pbmul; /* P multiplier table for B data */
+ const u8 *qmul; /* Q multiplier table (for both) */
+
+ p = (u8 *)ptrs[disks - 2];
+ q = (u8 *)ptrs[disks - 1];
+
+ /*
+ * Compute syndrome with zero for the missing data pages
+ * Use the dead data pages as temporary storage for
+ * delta p and delta q
+ */
+ dp = (u8 *)ptrs[faila];
+ ptrs[faila] = (void *)raid6_empty_zero_page;
+ ptrs[disks - 2] = dp;
+ dq = (u8 *)ptrs[failb];
+ ptrs[failb] = (void *)raid6_empty_zero_page;
+ ptrs[disks - 1] = dq;
+
+ raid6_call.gen_syndrome(disks, bytes, ptrs);
+
+ /* Restore pointer table */
+ ptrs[faila] = dp;
+ ptrs[failb] = dq;
+ ptrs[disks - 2] = p;
+ ptrs[disks - 1] = q;
+
+ /* Now, pick the proper data tables */
+ pbmul = raid6_vgfmul[raid6_gfexi[failb - faila]];
+ qmul = raid6_vgfmul[raid6_gfinv[raid6_gfexp[faila] ^ raid6_gfexp[failb]]];
+
+ kernel_fpu_begin();
+
+ /*
+ * vr20, vr21: qmul
+ * vr22, vr23: pbmul
+ */
+ asm volatile("vld $vr20, %0" : : "m" (qmul[0]));
+ asm volatile("vld $vr21, %0" : : "m" (qmul[16]));
+ asm volatile("vld $vr22, %0" : : "m" (pbmul[0]));
+ asm volatile("vld $vr23, %0" : : "m" (pbmul[16]));
+
+ while (bytes) {
+ /* vr4 - vr7: Q */
+ asm volatile("vld $vr4, %0" : : "m" (q[0]));
+ asm volatile("vld $vr5, %0" : : "m" (q[16]));
+ asm volatile("vld $vr6, %0" : : "m" (q[32]));
+ asm volatile("vld $vr7, %0" : : "m" (q[48]));
+ /* vr4 - vr7: Q + Qxy */
+ asm volatile("vld $vr8, %0" : : "m" (dq[0]));
+ asm volatile("vld $vr9, %0" : : "m" (dq[16]));
+ asm volatile("vld $vr10, %0" : : "m" (dq[32]));
+ asm volatile("vld $vr11, %0" : : "m" (dq[48]));
+ asm volatile("vxor.v $vr4, $vr4, $vr8");
+ asm volatile("vxor.v $vr5, $vr5, $vr9");
+ asm volatile("vxor.v $vr6, $vr6, $vr10");
+ asm volatile("vxor.v $vr7, $vr7, $vr11");
+ /* vr0 - vr3: P */
+ asm volatile("vld $vr0, %0" : : "m" (p[0]));
+ asm volatile("vld $vr1, %0" : : "m" (p[16]));
+ asm volatile("vld $vr2, %0" : : "m" (p[32]));
+ asm volatile("vld $vr3, %0" : : "m" (p[48]));
+ /* vr0 - vr3: P + Pxy */
+ asm volatile("vld $vr8, %0" : : "m" (dp[0]));
+ asm volatile("vld $vr9, %0" : : "m" (dp[16]));
+ asm volatile("vld $vr10, %0" : : "m" (dp[32]));
+ asm volatile("vld $vr11, %0" : : "m" (dp[48]));
+ asm volatile("vxor.v $vr0, $vr0, $vr8");
+ asm volatile("vxor.v $vr1, $vr1, $vr9");
+ asm volatile("vxor.v $vr2, $vr2, $vr10");
+ asm volatile("vxor.v $vr3, $vr3, $vr11");
+
+ /* vr8 - vr11: higher 4 bits of each byte of (Q + Qxy) */
+ asm volatile("vsrli.b $vr8, $vr4, 4");
+ asm volatile("vsrli.b $vr9, $vr5, 4");
+ asm volatile("vsrli.b $vr10, $vr6, 4");
+ asm volatile("vsrli.b $vr11, $vr7, 4");
+ /* vr4 - vr7: lower 4 bits of each byte of (Q + Qxy) */
+ asm volatile("vandi.b $vr4, $vr4, 0x0f");
+ asm volatile("vandi.b $vr5, $vr5, 0x0f");
+ asm volatile("vandi.b $vr6, $vr6, 0x0f");
+ asm volatile("vandi.b $vr7, $vr7, 0x0f");
+ /* lookup from qmul[0] */
+ asm volatile("vshuf.b $vr4, $vr20, $vr20, $vr4");
+ asm volatile("vshuf.b $vr5, $vr20, $vr20, $vr5");
+ asm volatile("vshuf.b $vr6, $vr20, $vr20, $vr6");
+ asm volatile("vshuf.b $vr7, $vr20, $vr20, $vr7");
+ /* lookup from qmul[16] */
+ asm volatile("vshuf.b $vr8, $vr21, $vr21, $vr8");
+ asm volatile("vshuf.b $vr9, $vr21, $vr21, $vr9");
+ asm volatile("vshuf.b $vr10, $vr21, $vr21, $vr10");
+ asm volatile("vshuf.b $vr11, $vr21, $vr21, $vr11");
+ /* vr16 - vr19: B(Q + Qxy) */
+ asm volatile("vxor.v $vr16, $vr8, $vr4");
+ asm volatile("vxor.v $vr17, $vr9, $vr5");
+ asm volatile("vxor.v $vr18, $vr10, $vr6");
+ asm volatile("vxor.v $vr19, $vr11, $vr7");
+
+ /* vr4 - vr7: higher 4 bits of each byte of (P + Pxy) */
+ asm volatile("vsrli.b $vr4, $vr0, 4");
+ asm volatile("vsrli.b $vr5, $vr1, 4");
+ asm volatile("vsrli.b $vr6, $vr2, 4");
+ asm volatile("vsrli.b $vr7, $vr3, 4");
+ /* vr12 - vr15: lower 4 bits of each byte of (P + Pxy) */
+ asm volatile("vandi.b $vr12, $vr0, 0x0f");
+ asm volatile("vandi.b $vr13, $vr1, 0x0f");
+ asm volatile("vandi.b $vr14, $vr2, 0x0f");
+ asm volatile("vandi.b $vr15, $vr3, 0x0f");
+ /* lookup from pbmul[0] */
+ asm volatile("vshuf.b $vr12, $vr22, $vr22, $vr12");
+ asm volatile("vshuf.b $vr13, $vr22, $vr22, $vr13");
+ asm volatile("vshuf.b $vr14, $vr22, $vr22, $vr14");
+ asm volatile("vshuf.b $vr15, $vr22, $vr22, $vr15");
+ /* lookup from pbmul[16] */
+ asm volatile("vshuf.b $vr4, $vr23, $vr23, $vr4");
+ asm volatile("vshuf.b $vr5, $vr23, $vr23, $vr5");
+ asm volatile("vshuf.b $vr6, $vr23, $vr23, $vr6");
+ asm volatile("vshuf.b $vr7, $vr23, $vr23, $vr7");
+ /* vr4 - vr7: A(P + Pxy) */
+ asm volatile("vxor.v $vr4, $vr4, $vr12");
+ asm volatile("vxor.v $vr5, $vr5, $vr13");
+ asm volatile("vxor.v $vr6, $vr6, $vr14");
+ asm volatile("vxor.v $vr7, $vr7, $vr15");
+
+ /* vr4 - vr7: A(P + Pxy) + B(Q + Qxy) = Dx */
+ asm volatile("vxor.v $vr4, $vr4, $vr16");
+ asm volatile("vxor.v $vr5, $vr5, $vr17");
+ asm volatile("vxor.v $vr6, $vr6, $vr18");
+ asm volatile("vxor.v $vr7, $vr7, $vr19");
+ asm volatile("vst $vr4, %0" : "=m" (dq[0]));
+ asm volatile("vst $vr5, %0" : "=m" (dq[16]));
+ asm volatile("vst $vr6, %0" : "=m" (dq[32]));
+ asm volatile("vst $vr7, %0" : "=m" (dq[48]));
+
+ /* vr0 - vr3: P + Pxy + Dx = Dy */
+ asm volatile("vxor.v $vr0, $vr0, $vr4");
+ asm volatile("vxor.v $vr1, $vr1, $vr5");
+ asm volatile("vxor.v $vr2, $vr2, $vr6");
+ asm volatile("vxor.v $vr3, $vr3, $vr7");
+ asm volatile("vst $vr0, %0" : "=m" (dp[0]));
+ asm volatile("vst $vr1, %0" : "=m" (dp[16]));
+ asm volatile("vst $vr2, %0" : "=m" (dp[32]));
+ asm volatile("vst $vr3, %0" : "=m" (dp[48]));
+
+ bytes -= 64;
+ p += 64;
+ q += 64;
+ dp += 64;
+ dq += 64;
+ }
+
+ kernel_fpu_end();
+}
+
+static void raid6_datap_recov_lsx(int disks, size_t bytes, int faila,
+ void **ptrs)
+{
+ u8 *p, *q, *dq;
+ const u8 *qmul; /* Q multiplier table */
+
+ p = (u8 *)ptrs[disks - 2];
+ q = (u8 *)ptrs[disks - 1];
+
+ /*
+ * Compute syndrome with zero for the missing data page
+ * Use the dead data page as temporary storage for delta q
+ */
+ dq = (u8 *)ptrs[faila];
+ ptrs[faila] = (void *)raid6_empty_zero_page;
+ ptrs[disks - 1] = dq;
+
+ raid6_call.gen_syndrome(disks, bytes, ptrs);
+
+ /* Restore pointer table */
+ ptrs[faila] = dq;
+ ptrs[disks - 1] = q;
+
+ /* Now, pick the proper data tables */
+ qmul = raid6_vgfmul[raid6_gfinv[raid6_gfexp[faila]]];
+
+ kernel_fpu_begin();
+
+ /* vr22, vr23: qmul */
+ asm volatile("vld $vr22, %0" : : "m" (qmul[0]));
+ asm volatile("vld $vr23, %0" : : "m" (qmul[16]));
+
+ while (bytes) {
+ /* vr0 - vr3: P + Dx */
+ asm volatile("vld $vr0, %0" : : "m" (p[0]));
+ asm volatile("vld $vr1, %0" : : "m" (p[16]));
+ asm volatile("vld $vr2, %0" : : "m" (p[32]));
+ asm volatile("vld $vr3, %0" : : "m" (p[48]));
+ /* vr4 - vr7: Qx */
+ asm volatile("vld $vr4, %0" : : "m" (dq[0]));
+ asm volatile("vld $vr5, %0" : : "m" (dq[16]));
+ asm volatile("vld $vr6, %0" : : "m" (dq[32]));
+ asm volatile("vld $vr7, %0" : : "m" (dq[48]));
+ /* vr4 - vr7: Q + Qx */
+ asm volatile("vld $vr8, %0" : : "m" (q[0]));
+ asm volatile("vld $vr9, %0" : : "m" (q[16]));
+ asm volatile("vld $vr10, %0" : : "m" (q[32]));
+ asm volatile("vld $vr11, %0" : : "m" (q[48]));
+ asm volatile("vxor.v $vr4, $vr4, $vr8");
+ asm volatile("vxor.v $vr5, $vr5, $vr9");
+ asm volatile("vxor.v $vr6, $vr6, $vr10");
+ asm volatile("vxor.v $vr7, $vr7, $vr11");
+
+ /* vr8 - vr11: higher 4 bits of each byte of (Q + Qx) */
+ asm volatile("vsrli.b $vr8, $vr4, 4");
+ asm volatile("vsrli.b $vr9, $vr5, 4");
+ asm volatile("vsrli.b $vr10, $vr6, 4");
+ asm volatile("vsrli.b $vr11, $vr7, 4");
+ /* vr4 - vr7: lower 4 bits of each byte of (Q + Qx) */
+ asm volatile("vandi.b $vr4, $vr4, 0x0f");
+ asm volatile("vandi.b $vr5, $vr5, 0x0f");
+ asm volatile("vandi.b $vr6, $vr6, 0x0f");
+ asm volatile("vandi.b $vr7, $vr7, 0x0f");
+ /* lookup from qmul[0] */
+ asm volatile("vshuf.b $vr4, $vr22, $vr22, $vr4");
+ asm volatile("vshuf.b $vr5, $vr22, $vr22, $vr5");
+ asm volatile("vshuf.b $vr6, $vr22, $vr22, $vr6");
+ asm volatile("vshuf.b $vr7, $vr22, $vr22, $vr7");
+ /* lookup from qmul[16] */
+ asm volatile("vshuf.b $vr8, $vr23, $vr23, $vr8");
+ asm volatile("vshuf.b $vr9, $vr23, $vr23, $vr9");
+ asm volatile("vshuf.b $vr10, $vr23, $vr23, $vr10");
+ asm volatile("vshuf.b $vr11, $vr23, $vr23, $vr11");
+ /* vr4 - vr7: qmul(Q + Qx) = Dx */
+ asm volatile("vxor.v $vr4, $vr4, $vr8");
+ asm volatile("vxor.v $vr5, $vr5, $vr9");
+ asm volatile("vxor.v $vr6, $vr6, $vr10");
+ asm volatile("vxor.v $vr7, $vr7, $vr11");
+ asm volatile("vst $vr4, %0" : "=m" (dq[0]));
+ asm volatile("vst $vr5, %0" : "=m" (dq[16]));
+ asm volatile("vst $vr6, %0" : "=m" (dq[32]));
+ asm volatile("vst $vr7, %0" : "=m" (dq[48]));
+
+ /* vr0 - vr3: P + Dx + Dx = P */
+ asm volatile("vxor.v $vr0, $vr0, $vr4");
+ asm volatile("vxor.v $vr1, $vr1, $vr5");
+ asm volatile("vxor.v $vr2, $vr2, $vr6");
+ asm volatile("vxor.v $vr3, $vr3, $vr7");
+ asm volatile("vst $vr0, %0" : "=m" (p[0]));
+ asm volatile("vst $vr1, %0" : "=m" (p[16]));
+ asm volatile("vst $vr2, %0" : "=m" (p[32]));
+ asm volatile("vst $vr3, %0" : "=m" (p[48]));
+
+ bytes -= 64;
+ p += 64;
+ q += 64;
+ dq += 64;
+ }
+
+ kernel_fpu_end();
+}
+
+const struct raid6_recov_calls raid6_recov_lsx = {
+ .data2 = raid6_2data_recov_lsx,
+ .datap = raid6_datap_recov_lsx,
+ .valid = raid6_has_lsx,
+ .name = "lsx",
+ .priority = 1,
+};
+#endif /* CONFIG_CPU_HAS_LSX */
+
+#ifdef CONFIG_CPU_HAS_LASX
+static int raid6_has_lasx(void)
+{
+ return cpu_has_lasx;
+}
+
+static void raid6_2data_recov_lasx(int disks, size_t bytes, int faila,
+ int failb, void **ptrs)
+{
+ u8 *p, *q, *dp, *dq;
+ const u8 *pbmul; /* P multiplier table for B data */
+ const u8 *qmul; /* Q multiplier table (for both) */
+
+ p = (u8 *)ptrs[disks - 2];
+ q = (u8 *)ptrs[disks - 1];
+
+ /*
+ * Compute syndrome with zero for the missing data pages
+ * Use the dead data pages as temporary storage for
+ * delta p and delta q
+ */
+ dp = (u8 *)ptrs[faila];
+ ptrs[faila] = (void *)raid6_empty_zero_page;
+ ptrs[disks - 2] = dp;
+ dq = (u8 *)ptrs[failb];
+ ptrs[failb] = (void *)raid6_empty_zero_page;
+ ptrs[disks - 1] = dq;
+
+ raid6_call.gen_syndrome(disks, bytes, ptrs);
+
+ /* Restore pointer table */
+ ptrs[faila] = dp;
+ ptrs[failb] = dq;
+ ptrs[disks - 2] = p;
+ ptrs[disks - 1] = q;
+
+ /* Now, pick the proper data tables */
+ pbmul = raid6_vgfmul[raid6_gfexi[failb - faila]];
+ qmul = raid6_vgfmul[raid6_gfinv[raid6_gfexp[faila] ^ raid6_gfexp[failb]]];
+
+ kernel_fpu_begin();
+
+ /*
+ * xr20, xr21: qmul
+ * xr22, xr23: pbmul
+ */
+ asm volatile("vld $vr20, %0" : : "m" (qmul[0]));
+ asm volatile("vld $vr21, %0" : : "m" (qmul[16]));
+ asm volatile("vld $vr22, %0" : : "m" (pbmul[0]));
+ asm volatile("vld $vr23, %0" : : "m" (pbmul[16]));
+ asm volatile("xvreplve0.q $xr20, $xr20");
+ asm volatile("xvreplve0.q $xr21, $xr21");
+ asm volatile("xvreplve0.q $xr22, $xr22");
+ asm volatile("xvreplve0.q $xr23, $xr23");
+
+ while (bytes) {
+ /* xr0, xr1: Q */
+ asm volatile("xvld $xr0, %0" : : "m" (q[0]));
+ asm volatile("xvld $xr1, %0" : : "m" (q[32]));
+ /* xr0, xr1: Q + Qxy */
+ asm volatile("xvld $xr4, %0" : : "m" (dq[0]));
+ asm volatile("xvld $xr5, %0" : : "m" (dq[32]));
+ asm volatile("xvxor.v $xr0, $xr0, $xr4");
+ asm volatile("xvxor.v $xr1, $xr1, $xr5");
+ /* xr2, xr3: P */
+ asm volatile("xvld $xr2, %0" : : "m" (p[0]));
+ asm volatile("xvld $xr3, %0" : : "m" (p[32]));
+ /* xr2, xr3: P + Pxy */
+ asm volatile("xvld $xr4, %0" : : "m" (dp[0]));
+ asm volatile("xvld $xr5, %0" : : "m" (dp[32]));
+ asm volatile("xvxor.v $xr2, $xr2, $xr4");
+ asm volatile("xvxor.v $xr3, $xr3, $xr5");
+
+ /* xr4, xr5: higher 4 bits of each byte of (Q + Qxy) */
+ asm volatile("xvsrli.b $xr4, $xr0, 4");
+ asm volatile("xvsrli.b $xr5, $xr1, 4");
+ /* xr0, xr1: lower 4 bits of each byte of (Q + Qxy) */
+ asm volatile("xvandi.b $xr0, $xr0, 0x0f");
+ asm volatile("xvandi.b $xr1, $xr1, 0x0f");
+ /* lookup from qmul[0] */
+ asm volatile("xvshuf.b $xr0, $xr20, $xr20, $xr0");
+ asm volatile("xvshuf.b $xr1, $xr20, $xr20, $xr1");
+ /* lookup from qmul[16] */
+ asm volatile("xvshuf.b $xr4, $xr21, $xr21, $xr4");
+ asm volatile("xvshuf.b $xr5, $xr21, $xr21, $xr5");
+ /* xr6, xr7: B(Q + Qxy) */
+ asm volatile("xvxor.v $xr6, $xr4, $xr0");
+ asm volatile("xvxor.v $xr7, $xr5, $xr1");
+
+ /* xr4, xr5: higher 4 bits of each byte of (P + Pxy) */
+ asm volatile("xvsrli.b $xr4, $xr2, 4");
+ asm volatile("xvsrli.b $xr5, $xr3, 4");
+ /* xr0, xr1: lower 4 bits of each byte of (P + Pxy) */
+ asm volatile("xvandi.b $xr0, $xr2, 0x0f");
+ asm volatile("xvandi.b $xr1, $xr3, 0x0f");
+ /* lookup from pbmul[0] */
+ asm volatile("xvshuf.b $xr0, $xr22, $xr22, $xr0");
+ asm volatile("xvshuf.b $xr1, $xr22, $xr22, $xr1");
+ /* lookup from pbmul[16] */
+ asm volatile("xvshuf.b $xr4, $xr23, $xr23, $xr4");
+ asm volatile("xvshuf.b $xr5, $xr23, $xr23, $xr5");
+ /* xr0, xr1: A(P + Pxy) */
+ asm volatile("xvxor.v $xr0, $xr0, $xr4");
+ asm volatile("xvxor.v $xr1, $xr1, $xr5");
+
+ /* xr0, xr1: A(P + Pxy) + B(Q + Qxy) = Dx */
+ asm volatile("xvxor.v $xr0, $xr0, $xr6");
+ asm volatile("xvxor.v $xr1, $xr1, $xr7");
+
+ /* xr2, xr3: P + Pxy + Dx = Dy */
+ asm volatile("xvxor.v $xr2, $xr2, $xr0");
+ asm volatile("xvxor.v $xr3, $xr3, $xr1");
+
+ asm volatile("xvst $xr0, %0" : "=m" (dq[0]));
+ asm volatile("xvst $xr1, %0" : "=m" (dq[32]));
+ asm volatile("xvst $xr2, %0" : "=m" (dp[0]));
+ asm volatile("xvst $xr3, %0" : "=m" (dp[32]));
+
+ bytes -= 64;
+ p += 64;
+ q += 64;
+ dp += 64;
+ dq += 64;
+ }
+
+ kernel_fpu_end();
+}
+
+static void raid6_datap_recov_lasx(int disks, size_t bytes, int faila,
+ void **ptrs)
+{
+ u8 *p, *q, *dq;
+ const u8 *qmul; /* Q multiplier table */
+
+ p = (u8 *)ptrs[disks - 2];
+ q = (u8 *)ptrs[disks - 1];
+
+ /*
+ * Compute syndrome with zero for the missing data page
+ * Use the dead data page as temporary storage for delta q
+ */
+ dq = (u8 *)ptrs[faila];
+ ptrs[faila] = (void *)raid6_empty_zero_page;
+ ptrs[disks - 1] = dq;
+
+ raid6_call.gen_syndrome(disks, bytes, ptrs);
+
+ /* Restore pointer table */
+ ptrs[faila] = dq;
+ ptrs[disks - 1] = q;
+
+ /* Now, pick the proper data tables */
+ qmul = raid6_vgfmul[raid6_gfinv[raid6_gfexp[faila]]];
+
+ kernel_fpu_begin();
+
+ /* xr22, xr23: qmul */
+ asm volatile("vld $vr22, %0" : : "m" (qmul[0]));
+ asm volatile("xvreplve0.q $xr22, $xr22");
+ asm volatile("vld $vr23, %0" : : "m" (qmul[16]));
+ asm volatile("xvreplve0.q $xr23, $xr23");
+
+ while (bytes) {
+ /* xr0, xr1: P + Dx */
+ asm volatile("xvld $xr0, %0" : : "m" (p[0]));
+ asm volatile("xvld $xr1, %0" : : "m" (p[32]));
+ /* xr2, xr3: Qx */
+ asm volatile("xvld $xr2, %0" : : "m" (dq[0]));
+ asm volatile("xvld $xr3, %0" : : "m" (dq[32]));
+ /* xr2, xr3: Q + Qx */
+ asm volatile("xvld $xr4, %0" : : "m" (q[0]));
+ asm volatile("xvld $xr5, %0" : : "m" (q[32]));
+ asm volatile("xvxor.v $xr2, $xr2, $xr4");
+ asm volatile("xvxor.v $xr3, $xr3, $xr5");
+
+ /* xr4, xr5: higher 4 bits of each byte of (Q + Qx) */
+ asm volatile("xvsrli.b $xr4, $xr2, 4");
+ asm volatile("xvsrli.b $xr5, $xr3, 4");
+ /* xr2, xr3: lower 4 bits of each byte of (Q + Qx) */
+ asm volatile("xvandi.b $xr2, $xr2, 0x0f");
+ asm volatile("xvandi.b $xr3, $xr3, 0x0f");
+ /* lookup from qmul[0] */
+ asm volatile("xvshuf.b $xr2, $xr22, $xr22, $xr2");
+ asm volatile("xvshuf.b $xr3, $xr22, $xr22, $xr3");
+ /* lookup from qmul[16] */
+ asm volatile("xvshuf.b $xr4, $xr23, $xr23, $xr4");
+ asm volatile("xvshuf.b $xr5, $xr23, $xr23, $xr5");
+ /* xr2, xr3: qmul(Q + Qx) = Dx */
+ asm volatile("xvxor.v $xr2, $xr2, $xr4");
+ asm volatile("xvxor.v $xr3, $xr3, $xr5");
+
+ /* xr0, xr1: P + Dx + Dx = P */
+ asm volatile("xvxor.v $xr0, $xr0, $xr2");
+ asm volatile("xvxor.v $xr1, $xr1, $xr3");
+
+ asm volatile("xvst $xr2, %0" : "=m" (dq[0]));
+ asm volatile("xvst $xr3, %0" : "=m" (dq[32]));
+ asm volatile("xvst $xr0, %0" : "=m" (p[0]));
+ asm volatile("xvst $xr1, %0" : "=m" (p[32]));
+
+ bytes -= 64;
+ p += 64;
+ q += 64;
+ dq += 64;
+ }
+
+ kernel_fpu_end();
+}
+
+const struct raid6_recov_calls raid6_recov_lasx = {
+ .data2 = raid6_2data_recov_lasx,
+ .datap = raid6_datap_recov_lasx,
+ .valid = raid6_has_lasx,
+ .name = "lasx",
+ .priority = 2,
+};
+#endif /* CONFIG_CPU_HAS_LASX */
diff --git a/lib/raid6/recov_neon.c b/lib/raid6/recov_neon.c
index d6fba8bf8c0a..1bfc14174d4d 100644
--- a/lib/raid6/recov_neon.c
+++ b/lib/raid6/recov_neon.c
@@ -8,6 +8,7 @@
#ifdef __KERNEL__
#include <asm/neon.h>
+#include "neon.h"
#else
#define kernel_neon_begin()
#define kernel_neon_end()
@@ -19,13 +20,6 @@ static int raid6_has_neon(void)
return cpu_has_neon();
}
-void __raid6_2data_recov_neon(int bytes, uint8_t *p, uint8_t *q, uint8_t *dp,
- uint8_t *dq, const uint8_t *pbmul,
- const uint8_t *qmul);
-
-void __raid6_datap_recov_neon(int bytes, uint8_t *p, uint8_t *q, uint8_t *dq,
- const uint8_t *qmul);
-
static void raid6_2data_recov_neon(int disks, size_t bytes, int faila,
int failb, void **ptrs)
{
diff --git a/lib/raid6/recov_neon_inner.c b/lib/raid6/recov_neon_inner.c
index 90eb80d43790..f9e7e8f5a151 100644
--- a/lib/raid6/recov_neon_inner.c
+++ b/lib/raid6/recov_neon_inner.c
@@ -5,6 +5,7 @@
*/
#include <arm_neon.h>
+#include "neon.h"
#ifdef CONFIG_ARM
/*
diff --git a/lib/raid6/recov_ssse3.c b/lib/raid6/recov_ssse3.c
index 1de97d2405d0..4bfa3c6b60de 100644
--- a/lib/raid6/recov_ssse3.c
+++ b/lib/raid6/recov_ssse3.c
@@ -3,8 +3,6 @@
* Copyright (C) 2012 Intel Corporation
*/
-#ifdef CONFIG_AS_SSSE3
-
#include <linux/raid/pq.h>
#include "x86.h"
@@ -328,7 +326,3 @@ const struct raid6_recov_calls raid6_recov_ssse3 = {
#endif
.priority = 1,
};
-
-#else
-#warning "your version of binutils lacks SSSE3 support"
-#endif
diff --git a/lib/raid6/s390vx.uc b/lib/raid6/s390vx.uc
index 914ebe98fc21..cd0b9e95f499 100644
--- a/lib/raid6/s390vx.uc
+++ b/lib/raid6/s390vx.uc
@@ -13,8 +13,7 @@
#include <linux/raid/pq.h>
#include <asm/fpu/api.h>
-
-asm(".include \"asm/vx-insn.h\"\n");
+#include <asm/vx-insn.h>
#define NSIZE 16
@@ -60,7 +59,7 @@ static inline void LOAD_DATA(int x, u8 *ptr)
typedef struct { u8 _[16 * $#]; } addrtype;
register addrtype *__ptr asm("1") = (addrtype *) ptr;
- asm volatile ("VLM %2,%3,0,%r1"
+ asm volatile ("VLM %2,%3,0,%1"
: : "m" (*__ptr), "a" (__ptr), "i" (x),
"i" (x + $# - 1));
}
@@ -159,7 +158,7 @@ static void raid6_s390vx$#_xor_syndrome(int disks, int start, int stop,
static int raid6_s390vx$#_valid(void)
{
- return MACHINE_HAS_VX;
+ return cpu_has_vx();
}
const struct raid6_calls raid6_s390vx$# = {
diff --git a/lib/raid6/test/.gitignore b/lib/raid6/test/.gitignore
new file mode 100644
index 000000000000..1b68a77f348f
--- /dev/null
+++ b/lib/raid6/test/.gitignore
@@ -0,0 +1,3 @@
+/int.uc
+/neon.uc
+/raid6test
diff --git a/lib/raid6/test/Makefile b/lib/raid6/test/Makefile
index 3ab8720aa2f8..2abe0076a636 100644
--- a/lib/raid6/test/Makefile
+++ b/lib/raid6/test/Makefile
@@ -4,14 +4,17 @@
# from userspace.
#
-CC = gcc
-OPTFLAGS = -O2 # Adjust as desired
-CFLAGS = -I.. -I ../../../include -g $(OPTFLAGS)
-LD = ld
-AWK = awk -f
-AR = ar
-RANLIB = ranlib
-OBJS = int1.o int2.o int4.o int8.o int16.o int32.o recov.o algos.o tables.o
+pound := \#
+
+# Adjust as desired
+CC = gcc
+OPTFLAGS = -O2
+CFLAGS = -I.. -I ../../../include -g $(OPTFLAGS)
+LD = ld
+AWK = awk -f
+AR = ar
+RANLIB = ranlib
+OBJS = int1.o int2.o int4.o int8.o int16.o int32.o recov.o algos.o tables.o
ARCH := $(shell uname -m 2>/dev/null | sed -e /s/i.86/i386/)
ifeq ($(ARCH),i386)
@@ -32,29 +35,37 @@ ifeq ($(ARCH),aarch64)
HAS_NEON = yes
endif
+ifeq ($(findstring ppc,$(ARCH)),ppc)
+ CFLAGS += -I../../../arch/powerpc/include
+ HAS_ALTIVEC := $(shell printf '$(pound)include <altivec.h>\nvector int a;\n' |\
+ gcc -c -x c - >/dev/null && rm ./-.o && echo yes)
+endif
+
+ifeq ($(ARCH),loongarch64)
+ CFLAGS += -I../../../arch/loongarch/include -DCONFIG_LOONGARCH=1
+ CFLAGS += $(shell echo 'vld $$vr0, $$zero, 0' | \
+ gcc -c -x assembler - >/dev/null 2>&1 && \
+ rm ./-.o && echo -DCONFIG_CPU_HAS_LSX=1)
+ CFLAGS += $(shell echo 'xvld $$xr0, $$zero, 0' | \
+ gcc -c -x assembler - >/dev/null 2>&1 && \
+ rm ./-.o && echo -DCONFIG_CPU_HAS_LASX=1)
+endif
+
ifeq ($(IS_X86),yes)
OBJS += mmx.o sse1.o sse2.o avx2.o recov_ssse3.o recov_avx2.o avx512.o recov_avx512.o
- CFLAGS += $(shell echo "pshufb %xmm0, %xmm0" | \
- gcc -c -x assembler - >&/dev/null && \
- rm ./-.o && echo -DCONFIG_AS_SSSE3=1)
- CFLAGS += $(shell echo "vpbroadcastb %xmm0, %ymm1" | \
- gcc -c -x assembler - >&/dev/null && \
- rm ./-.o && echo -DCONFIG_AS_AVX2=1)
- CFLAGS += $(shell echo "vpmovm2b %k1, %zmm5" | \
- gcc -c -x assembler - >&/dev/null && \
- rm ./-.o && echo -DCONFIG_AS_AVX512=1)
+ CFLAGS += -DCONFIG_X86
+ CFLAGS += $(shell echo "vpmovm2b %k1, %zmm5" | \
+ gcc -c -x assembler - >/dev/null 2>&1 && \
+ rm ./-.o && echo -DCONFIG_AS_AVX512=1)
else ifeq ($(HAS_NEON),yes)
OBJS += neon.o neon1.o neon2.o neon4.o neon8.o recov_neon.o recov_neon_inner.o
CFLAGS += -DCONFIG_KERNEL_MODE_NEON=1
-else
- HAS_ALTIVEC := $(shell printf '\#include <altivec.h>\nvector int a;\n' |\
- gcc -c -x c - >/dev/null && rm ./-.o && echo yes)
- ifeq ($(HAS_ALTIVEC),yes)
- CFLAGS += -I../../../arch/powerpc/include
- CFLAGS += -DCONFIG_ALTIVEC
- OBJS += altivec1.o altivec2.o altivec4.o altivec8.o \
- vpermxor1.o vpermxor2.o vpermxor4.o vpermxor8.o
- endif
+else ifeq ($(HAS_ALTIVEC),yes)
+ CFLAGS += -DCONFIG_ALTIVEC
+ OBJS += altivec1.o altivec2.o altivec4.o altivec8.o \
+ vpermxor1.o vpermxor2.o vpermxor4.o vpermxor8.o
+else ifeq ($(ARCH),loongarch64)
+ OBJS += loongarch_simd.o recov_loongarch_simd.o
endif
.c.o:
@@ -66,12 +77,12 @@ endif
%.uc: ../%.uc
cp -f $< $@
-all: raid6.a raid6test
+all: raid6.a raid6test
raid6.a: $(OBJS)
- rm -f $@
- $(AR) cq $@ $^
- $(RANLIB) $@
+ rm -f $@
+ $(AR) cq $@ $^
+ $(RANLIB) $@
raid6test: test.c raid6.a
$(CC) $(CFLAGS) -o raid6test $^
diff --git a/lib/raid6/test/test.c b/lib/raid6/test/test.c
index a3cf071941ab..841a55242aba 100644
--- a/lib/raid6/test/test.c
+++ b/lib/raid6/test/test.c
@@ -19,7 +19,6 @@
#define NDISKS 16 /* Including P and Q */
const char raid6_empty_zero_page[PAGE_SIZE] __attribute__((aligned(PAGE_SIZE)));
-struct raid6_calls raid6_call;
char *dataptrs[NDISKS];
char data[NDISKS][PAGE_SIZE] __attribute__((aligned(PAGE_SIZE)));
diff --git a/lib/raid6/unroll.awk b/lib/raid6/unroll.awk
index c6aa03631df8..0809805a7e23 100644
--- a/lib/raid6/unroll.awk
+++ b/lib/raid6/unroll.awk
@@ -13,7 +13,7 @@ BEGIN {
for (i = 0; i < rep; ++i) {
tmp = $0
gsub(/\$\$/, i, tmp)
- gsub(/\$\#/, n, tmp)
+ gsub(/\$#/, n, tmp)
gsub(/\$\*/, "$", tmp)
print tmp
}
diff --git a/lib/raid6/vpermxor.uc b/lib/raid6/vpermxor.uc
index 10475dc423c1..1bfb127fbfe8 100644
--- a/lib/raid6/vpermxor.uc
+++ b/lib/raid6/vpermxor.uc
@@ -24,9 +24,9 @@
#ifdef CONFIG_ALTIVEC
#include <altivec.h>
+#include <asm/ppc-opcode.h>
#ifdef __KERNEL__
#include <asm/cputable.h>
-#include <asm/ppc-opcode.h>
#include <asm/switch_to.h>
#endif
diff --git a/lib/random32.c b/lib/random32.c
index 763b920a6206..32060b852668 100644
--- a/lib/random32.c
+++ b/lib/random32.c
@@ -38,24 +38,16 @@
#include <linux/jiffies.h>
#include <linux/random.h>
#include <linux/sched.h>
+#include <linux/bitops.h>
+#include <linux/slab.h>
#include <asm/unaligned.h>
-#ifdef CONFIG_RANDOM32_SELFTEST
-static void __init prandom_state_selftest(void);
-#else
-static inline void prandom_state_selftest(void)
-{
-}
-#endif
-
-static DEFINE_PER_CPU(struct rnd_state, net_rand_state) __latent_entropy;
-
/**
* prandom_u32_state - seeded pseudo-random number generator.
* @state: pointer to state structure holding seeded state.
*
* This is used for pseudo-randomness with no outside seeding.
- * For more random results, use prandom_u32().
+ * For more random results, use get_random_u32().
*/
u32 prandom_u32_state(struct rnd_state *state)
{
@@ -70,25 +62,6 @@ u32 prandom_u32_state(struct rnd_state *state)
EXPORT_SYMBOL(prandom_u32_state);
/**
- * prandom_u32 - pseudo random number generator
- *
- * A 32 bit pseudo-random number is generated using a fast
- * algorithm suitable for simulation. This algorithm is NOT
- * considered safe for cryptographic use.
- */
-u32 prandom_u32(void)
-{
- struct rnd_state *state = &get_cpu_var(net_rand_state);
- u32 res;
-
- res = prandom_u32_state(state);
- put_cpu_var(net_rand_state);
-
- return res;
-}
-EXPORT_SYMBOL(prandom_u32);
-
-/**
* prandom_bytes_state - get the requested number of pseudo-random bytes
*
* @state: pointer to state structure holding seeded state.
@@ -96,7 +69,7 @@ EXPORT_SYMBOL(prandom_u32);
* @bytes: the requested number of bytes
*
* This is used for pseudo-randomness with no outside seeding.
- * For more random results, use prandom_bytes().
+ * For more random results, use get_random_bytes().
*/
void prandom_bytes_state(struct rnd_state *state, void *buf, size_t bytes)
{
@@ -119,20 +92,6 @@ void prandom_bytes_state(struct rnd_state *state, void *buf, size_t bytes)
}
EXPORT_SYMBOL(prandom_bytes_state);
-/**
- * prandom_bytes - get the requested number of pseudo-random bytes
- * @buf: where to copy the pseudo-random bytes to
- * @bytes: the requested number of bytes
- */
-void prandom_bytes(void *buf, size_t bytes)
-{
- struct rnd_state *state = &get_cpu_var(net_rand_state);
-
- prandom_bytes_state(state, buf, bytes);
- put_cpu_var(net_rand_state);
-}
-EXPORT_SYMBOL(prandom_bytes);
-
static void prandom_warmup(struct rnd_state *state)
{
/* Calling RNG ten times to satisfy recurrence condition */
@@ -148,96 +107,6 @@ static void prandom_warmup(struct rnd_state *state)
prandom_u32_state(state);
}
-static u32 __extract_hwseed(void)
-{
- unsigned int val = 0;
-
- (void)(arch_get_random_seed_int(&val) ||
- arch_get_random_int(&val));
-
- return val;
-}
-
-static void prandom_seed_early(struct rnd_state *state, u32 seed,
- bool mix_with_hwseed)
-{
-#define LCG(x) ((x) * 69069U) /* super-duper LCG */
-#define HWSEED() (mix_with_hwseed ? __extract_hwseed() : 0)
- state->s1 = __seed(HWSEED() ^ LCG(seed), 2U);
- state->s2 = __seed(HWSEED() ^ LCG(state->s1), 8U);
- state->s3 = __seed(HWSEED() ^ LCG(state->s2), 16U);
- state->s4 = __seed(HWSEED() ^ LCG(state->s3), 128U);
-}
-
-/**
- * prandom_seed - add entropy to pseudo random number generator
- * @entropy: entropy value
- *
- * Add some additional entropy to the prandom pool.
- */
-void prandom_seed(u32 entropy)
-{
- int i;
- /*
- * No locking on the CPUs, but then somewhat random results are, well,
- * expected.
- */
- for_each_possible_cpu(i) {
- struct rnd_state *state = &per_cpu(net_rand_state, i);
-
- state->s1 = __seed(state->s1 ^ entropy, 2U);
- prandom_warmup(state);
- }
-}
-EXPORT_SYMBOL(prandom_seed);
-
-/*
- * Generate some initially weak seeding values to allow
- * to start the prandom_u32() engine.
- */
-static int __init prandom_init(void)
-{
- int i;
-
- prandom_state_selftest();
-
- for_each_possible_cpu(i) {
- struct rnd_state *state = &per_cpu(net_rand_state, i);
- u32 weak_seed = (i + jiffies) ^ random_get_entropy();
-
- prandom_seed_early(state, weak_seed, true);
- prandom_warmup(state);
- }
-
- return 0;
-}
-core_initcall(prandom_init);
-
-static void __prandom_timer(struct timer_list *unused);
-
-static DEFINE_TIMER(seed_timer, __prandom_timer);
-
-static void __prandom_timer(struct timer_list *unused)
-{
- u32 entropy;
- unsigned long expires;
-
- get_random_bytes(&entropy, sizeof(entropy));
- prandom_seed(entropy);
-
- /* reseed every ~60 seconds, in [40 .. 80) interval with slack */
- expires = 40 + prandom_u32_max(40);
- seed_timer.expires = jiffies + msecs_to_jiffies(expires * MSEC_PER_SEC);
-
- add_timer(&seed_timer);
-}
-
-static void __init __prandom_start_seed_timer(void)
-{
- seed_timer.expires = jiffies + msecs_to_jiffies(40 * MSEC_PER_SEC);
- add_timer(&seed_timer);
-}
-
void prandom_seed_full_state(struct rnd_state __percpu *pcpu_state)
{
int i;
@@ -257,51 +126,6 @@ void prandom_seed_full_state(struct rnd_state __percpu *pcpu_state)
}
EXPORT_SYMBOL(prandom_seed_full_state);
-/*
- * Generate better values after random number generator
- * is fully initialized.
- */
-static void __prandom_reseed(bool late)
-{
- unsigned long flags;
- static bool latch = false;
- static DEFINE_SPINLOCK(lock);
-
- /* Asking for random bytes might result in bytes getting
- * moved into the nonblocking pool and thus marking it
- * as initialized. In this case we would double back into
- * this function and attempt to do a late reseed.
- * Ignore the pointless attempt to reseed again if we're
- * already waiting for bytes when the nonblocking pool
- * got initialized.
- */
-
- /* only allow initial seeding (late == false) once */
- if (!spin_trylock_irqsave(&lock, flags))
- return;
-
- if (latch && !late)
- goto out;
-
- latch = true;
- prandom_seed_full_state(&net_rand_state);
-out:
- spin_unlock_irqrestore(&lock, flags);
-}
-
-void prandom_reseed_late(void)
-{
- __prandom_reseed(true);
-}
-
-static int __init prandom_reseed(void)
-{
- __prandom_reseed(false);
- __prandom_start_seed_timer();
- return 0;
-}
-late_initcall(prandom_reseed);
-
#ifdef CONFIG_RANDOM32_SELFTEST
static struct prandom_test1 {
u32 seed;
@@ -421,7 +245,16 @@ static struct prandom_test2 {
{ 407983964U, 921U, 728767059U },
};
-static void __init prandom_state_selftest(void)
+static void prandom_state_selftest_seed(struct rnd_state *state, u32 seed)
+{
+#define LCG(x) ((x) * 69069U) /* super-duper LCG */
+ state->s1 = __seed(LCG(seed), 2U);
+ state->s2 = __seed(LCG(state->s1), 8U);
+ state->s3 = __seed(LCG(state->s2), 16U);
+ state->s4 = __seed(LCG(state->s3), 128U);
+}
+
+static int __init prandom_state_selftest(void)
{
int i, j, errors = 0, runs = 0;
bool error = false;
@@ -429,7 +262,7 @@ static void __init prandom_state_selftest(void)
for (i = 0; i < ARRAY_SIZE(test1); i++) {
struct rnd_state state;
- prandom_seed_early(&state, test1[i].seed, false);
+ prandom_state_selftest_seed(&state, test1[i].seed);
prandom_warmup(&state);
if (test1[i].result != prandom_u32_state(&state))
@@ -444,7 +277,7 @@ static void __init prandom_state_selftest(void)
for (i = 0; i < ARRAY_SIZE(test2); i++) {
struct rnd_state state;
- prandom_seed_early(&state, test2[i].seed, false);
+ prandom_state_selftest_seed(&state, test2[i].seed);
prandom_warmup(&state);
for (j = 0; j < test2[i].iteration - 1; j++)
@@ -461,5 +294,7 @@ static void __init prandom_state_selftest(void)
pr_warn("prandom: %d/%d self tests failed\n", errors, runs);
else
pr_info("prandom: %d self tests passed\n", runs);
+ return 0;
}
+core_initcall(prandom_state_selftest);
#endif
diff --git a/lib/ratelimit.c b/lib/ratelimit.c
index e01a93f46f83..ce945c17980b 100644
--- a/lib/ratelimit.c
+++ b/lib/ratelimit.c
@@ -26,10 +26,16 @@
*/
int ___ratelimit(struct ratelimit_state *rs, const char *func)
{
+ /* Paired with WRITE_ONCE() in .proc_handler().
+ * Changing two values seperately could be inconsistent
+ * and some message could be lost. (See: net_ratelimit_state).
+ */
+ int interval = READ_ONCE(rs->interval);
+ int burst = READ_ONCE(rs->burst);
unsigned long flags;
int ret;
- if (!rs->interval)
+ if (!interval)
return 1;
/*
@@ -44,7 +50,7 @@ int ___ratelimit(struct ratelimit_state *rs, const char *func)
if (!rs->begin)
rs->begin = jiffies;
- if (time_is_before_jiffies(rs->begin + rs->interval)) {
+ if (time_is_before_jiffies(rs->begin + interval)) {
if (rs->missed) {
if (!(rs->flags & RATELIMIT_MSG_ON_RELEASE)) {
printk_deferred(KERN_WARNING
@@ -56,7 +62,7 @@ int ___ratelimit(struct ratelimit_state *rs, const char *func)
rs->begin = jiffies;
rs->printed = 0;
}
- if (rs->burst && rs->burst > rs->printed) {
+ if (burst && burst > rs->printed) {
rs->printed++;
ret = 1;
} else {
diff --git a/lib/rbtree.c b/lib/rbtree.c
index 1ef6e25d031c..5114eda6309c 100644
--- a/lib/rbtree.c
+++ b/lib/rbtree.c
@@ -13,7 +13,7 @@
#include <linux/export.h>
/*
- * red-black trees properties: http://en.wikipedia.org/wiki/Rbtree
+ * red-black trees properties: https://en.wikipedia.org/wiki/Rbtree
*
* 1) A node is either red or black
* 2) The root is black
@@ -58,7 +58,7 @@
static inline void rb_set_black(struct rb_node *rb)
{
- rb->__rb_parent_color |= RB_BLACK;
+ rb->__rb_parent_color += RB_BLACK;
}
static inline struct rb_node *rb_red_parent(struct rb_node *red)
@@ -83,14 +83,10 @@ __rb_rotate_set_parents(struct rb_node *old, struct rb_node *new,
static __always_inline void
__rb_insert(struct rb_node *node, struct rb_root *root,
- bool newleft, struct rb_node **leftmost,
void (*augment_rotate)(struct rb_node *old, struct rb_node *new))
{
struct rb_node *parent = rb_red_parent(node), *gparent, *tmp;
- if (newleft)
- *leftmost = node;
-
while (true) {
/*
* Loop invariant: node is red.
@@ -437,38 +433,19 @@ static const struct rb_augment_callbacks dummy_callbacks = {
void rb_insert_color(struct rb_node *node, struct rb_root *root)
{
- __rb_insert(node, root, false, NULL, dummy_rotate);
+ __rb_insert(node, root, dummy_rotate);
}
EXPORT_SYMBOL(rb_insert_color);
void rb_erase(struct rb_node *node, struct rb_root *root)
{
struct rb_node *rebalance;
- rebalance = __rb_erase_augmented(node, root,
- NULL, &dummy_callbacks);
+ rebalance = __rb_erase_augmented(node, root, &dummy_callbacks);
if (rebalance)
____rb_erase_color(rebalance, root, dummy_rotate);
}
EXPORT_SYMBOL(rb_erase);
-void rb_insert_color_cached(struct rb_node *node,
- struct rb_root_cached *root, bool leftmost)
-{
- __rb_insert(node, &root->rb_root, leftmost,
- &root->rb_leftmost, dummy_rotate);
-}
-EXPORT_SYMBOL(rb_insert_color_cached);
-
-void rb_erase_cached(struct rb_node *node, struct rb_root_cached *root)
-{
- struct rb_node *rebalance;
- rebalance = __rb_erase_augmented(node, &root->rb_root,
- &root->rb_leftmost, &dummy_callbacks);
- if (rebalance)
- ____rb_erase_color(rebalance, &root->rb_root, dummy_rotate);
-}
-EXPORT_SYMBOL(rb_erase_cached);
-
/*
* Augmented rbtree manipulation functions.
*
@@ -477,10 +454,9 @@ EXPORT_SYMBOL(rb_erase_cached);
*/
void __rb_insert_augmented(struct rb_node *node, struct rb_root *root,
- bool newleft, struct rb_node **leftmost,
void (*augment_rotate)(struct rb_node *old, struct rb_node *new))
{
- __rb_insert(node, root, newleft, leftmost, augment_rotate);
+ __rb_insert(node, root, augment_rotate);
}
EXPORT_SYMBOL(__rb_insert_augmented);
@@ -527,7 +503,7 @@ struct rb_node *rb_next(const struct rb_node *node)
if (node->rb_right) {
node = node->rb_right;
while (node->rb_left)
- node=node->rb_left;
+ node = node->rb_left;
return (struct rb_node *)node;
}
@@ -559,7 +535,7 @@ struct rb_node *rb_prev(const struct rb_node *node)
if (node->rb_left) {
node = node->rb_left;
while (node->rb_right)
- node=node->rb_right;
+ node = node->rb_right;
return (struct rb_node *)node;
}
@@ -591,16 +567,6 @@ void rb_replace_node(struct rb_node *victim, struct rb_node *new,
}
EXPORT_SYMBOL(rb_replace_node);
-void rb_replace_node_cached(struct rb_node *victim, struct rb_node *new,
- struct rb_root_cached *root)
-{
- rb_replace_node(victim, new, &root->rb_root);
-
- if (root->rb_leftmost == victim)
- root->rb_leftmost = new;
-}
-EXPORT_SYMBOL(rb_replace_node_cached);
-
void rb_replace_node_rcu(struct rb_node *victim, struct rb_node *new,
struct rb_root *root)
{
diff --git a/lib/rbtree_test.c b/lib/rbtree_test.c
index 62b8ee92643d..41ae3c7570d3 100644
--- a/lib/rbtree_test.c
+++ b/lib/rbtree_test.c
@@ -77,26 +77,10 @@ static inline void erase_cached(struct test_node *node, struct rb_root_cached *r
}
-static inline u32 augment_recompute(struct test_node *node)
-{
- u32 max = node->val, child_augmented;
- if (node->rb.rb_left) {
- child_augmented = rb_entry(node->rb.rb_left, struct test_node,
- rb)->augmented;
- if (max < child_augmented)
- max = child_augmented;
- }
- if (node->rb.rb_right) {
- child_augmented = rb_entry(node->rb.rb_right, struct test_node,
- rb)->augmented;
- if (max < child_augmented)
- max = child_augmented;
- }
- return max;
-}
+#define NODE_VAL(node) ((node)->val)
-RB_DECLARE_CALLBACKS(static, augment_callbacks, struct test_node, rb,
- u32, augmented, augment_recompute)
+RB_DECLARE_CALLBACKS_MAX(static, augment_callbacks,
+ struct test_node, rb, u32, augmented, NODE_VAL)
static void insert_augmented(struct test_node *node,
struct rb_root_cached *root)
@@ -238,7 +222,20 @@ static void check_augmented(int nr_nodes)
check(nr_nodes);
for (rb = rb_first(&root.rb_root); rb; rb = rb_next(rb)) {
struct test_node *node = rb_entry(rb, struct test_node, rb);
- WARN_ON_ONCE(node->augmented != augment_recompute(node));
+ u32 subtree, max = node->val;
+ if (node->rb.rb_left) {
+ subtree = rb_entry(node->rb.rb_left, struct test_node,
+ rb)->augmented;
+ if (max < subtree)
+ max = subtree;
+ }
+ if (node->rb.rb_right) {
+ subtree = rb_entry(node->rb.rb_right, struct test_node,
+ rb)->augmented;
+ if (max < subtree)
+ max = subtree;
+ }
+ WARN_ON_ONCE(node->augmented != max);
}
}
diff --git a/lib/rcuref.c b/lib/rcuref.c
new file mode 100644
index 000000000000..97f300eca927
--- /dev/null
+++ b/lib/rcuref.c
@@ -0,0 +1,281 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+/*
+ * rcuref - A scalable reference count implementation for RCU managed objects
+ *
+ * rcuref is provided to replace open coded reference count implementations
+ * based on atomic_t. It protects explicitely RCU managed objects which can
+ * be visible even after the last reference has been dropped and the object
+ * is heading towards destruction.
+ *
+ * A common usage pattern is:
+ *
+ * get()
+ * rcu_read_lock();
+ * p = get_ptr();
+ * if (p && !atomic_inc_not_zero(&p->refcnt))
+ * p = NULL;
+ * rcu_read_unlock();
+ * return p;
+ *
+ * put()
+ * if (!atomic_dec_return(&->refcnt)) {
+ * remove_ptr(p);
+ * kfree_rcu((p, rcu);
+ * }
+ *
+ * atomic_inc_not_zero() is implemented with a try_cmpxchg() loop which has
+ * O(N^2) behaviour under contention with N concurrent operations.
+ *
+ * rcuref uses atomic_add_negative_relaxed() for the fast path, which scales
+ * better under contention.
+ *
+ * Why not refcount?
+ * =================
+ *
+ * In principle it should be possible to make refcount use the rcuref
+ * scheme, but the destruction race described below cannot be prevented
+ * unless the protected object is RCU managed.
+ *
+ * Theory of operation
+ * ===================
+ *
+ * rcuref uses an unsigned integer reference counter. As long as the
+ * counter value is greater than or equal to RCUREF_ONEREF and not larger
+ * than RCUREF_MAXREF the reference is alive:
+ *
+ * ONEREF MAXREF SATURATED RELEASED DEAD NOREF
+ * 0 0x7FFFFFFF 0x8000000 0xA0000000 0xBFFFFFFF 0xC0000000 0xE0000000 0xFFFFFFFF
+ * <---valid --------> <-------saturation zone-------> <-----dead zone----->
+ *
+ * The get() and put() operations do unconditional increments and
+ * decrements. The result is checked after the operation. This optimizes
+ * for the fast path.
+ *
+ * If the reference count is saturated or dead, then the increments and
+ * decrements are not harmful as the reference count still stays in the
+ * respective zones and is always set back to STATURATED resp. DEAD. The
+ * zones have room for 2^28 racing operations in each direction, which
+ * makes it practically impossible to escape the zones.
+ *
+ * Once the last reference is dropped the reference count becomes
+ * RCUREF_NOREF which forces rcuref_put() into the slowpath operation. The
+ * slowpath then tries to set the reference count from RCUREF_NOREF to
+ * RCUREF_DEAD via a cmpxchg(). This opens a small window where a
+ * concurrent rcuref_get() can acquire the reference count and bring it
+ * back to RCUREF_ONEREF or even drop the reference again and mark it DEAD.
+ *
+ * If the cmpxchg() succeeds then a concurrent rcuref_get() will result in
+ * DEAD + 1, which is inside the dead zone. If that happens the reference
+ * count is put back to DEAD.
+ *
+ * The actual race is possible due to the unconditional increment and
+ * decrements in rcuref_get() and rcuref_put():
+ *
+ * T1 T2
+ * get() put()
+ * if (atomic_add_negative(-1, &ref->refcnt))
+ * succeeds-> atomic_cmpxchg(&ref->refcnt, NOREF, DEAD);
+ *
+ * atomic_add_negative(1, &ref->refcnt); <- Elevates refcount to DEAD + 1
+ *
+ * As the result of T1's add is negative, the get() goes into the slow path
+ * and observes refcnt being in the dead zone which makes the operation fail.
+ *
+ * Possible critical states:
+ *
+ * Context Counter References Operation
+ * T1 0 1 init()
+ * T2 1 2 get()
+ * T1 0 1 put()
+ * T2 -1 0 put() tries to mark dead
+ * T1 0 1 get()
+ * T2 0 1 put() mark dead fails
+ * T1 -1 0 put() tries to mark dead
+ * T1 DEAD 0 put() mark dead succeeds
+ * T2 DEAD+1 0 get() fails and puts it back to DEAD
+ *
+ * Of course there are more complex scenarios, but the above illustrates
+ * the working principle. The rest is left to the imagination of the
+ * reader.
+ *
+ * Deconstruction race
+ * ===================
+ *
+ * The release operation must be protected by prohibiting a grace period in
+ * order to prevent a possible use after free:
+ *
+ * T1 T2
+ * put() get()
+ * // ref->refcnt = ONEREF
+ * if (!atomic_add_negative(-1, &ref->refcnt))
+ * return false; <- Not taken
+ *
+ * // ref->refcnt == NOREF
+ * --> preemption
+ * // Elevates ref->refcnt to ONEREF
+ * if (!atomic_add_negative(1, &ref->refcnt))
+ * return true; <- taken
+ *
+ * if (put(&p->ref)) { <-- Succeeds
+ * remove_pointer(p);
+ * kfree_rcu(p, rcu);
+ * }
+ *
+ * RCU grace period ends, object is freed
+ *
+ * atomic_cmpxchg(&ref->refcnt, NOREF, DEAD); <- UAF
+ *
+ * This is prevented by disabling preemption around the put() operation as
+ * that's in most kernel configurations cheaper than a rcu_read_lock() /
+ * rcu_read_unlock() pair and in many cases even a NOOP. In any case it
+ * prevents the grace period which keeps the object alive until all put()
+ * operations complete.
+ *
+ * Saturation protection
+ * =====================
+ *
+ * The reference count has a saturation limit RCUREF_MAXREF (INT_MAX).
+ * Once this is exceedded the reference count becomes stale by setting it
+ * to RCUREF_SATURATED, which will cause a memory leak, but it prevents
+ * wrap arounds which obviously cause worse problems than a memory
+ * leak. When saturation is reached a warning is emitted.
+ *
+ * Race conditions
+ * ===============
+ *
+ * All reference count increment/decrement operations are unconditional and
+ * only verified after the fact. This optimizes for the good case and takes
+ * the occasional race vs. a dead or already saturated refcount into
+ * account. The saturation and dead zones are large enough to accomodate
+ * for that.
+ *
+ * Memory ordering
+ * ===============
+ *
+ * Memory ordering rules are slightly relaxed wrt regular atomic_t functions
+ * and provide only what is strictly required for refcounts.
+ *
+ * The increments are fully relaxed; these will not provide ordering. The
+ * rationale is that whatever is used to obtain the object to increase the
+ * reference count on will provide the ordering. For locked data
+ * structures, its the lock acquire, for RCU/lockless data structures its
+ * the dependent load.
+ *
+ * rcuref_get() provides a control dependency ordering future stores which
+ * ensures that the object is not modified when acquiring a reference
+ * fails.
+ *
+ * rcuref_put() provides release order, i.e. all prior loads and stores
+ * will be issued before. It also provides a control dependency ordering
+ * against the subsequent destruction of the object.
+ *
+ * If rcuref_put() successfully dropped the last reference and marked the
+ * object DEAD it also provides acquire ordering.
+ */
+
+#include <linux/export.h>
+#include <linux/rcuref.h>
+
+/**
+ * rcuref_get_slowpath - Slowpath of rcuref_get()
+ * @ref: Pointer to the reference count
+ *
+ * Invoked when the reference count is outside of the valid zone.
+ *
+ * Return:
+ * False if the reference count was already marked dead
+ *
+ * True if the reference count is saturated, which prevents the
+ * object from being deconstructed ever.
+ */
+bool rcuref_get_slowpath(rcuref_t *ref)
+{
+ unsigned int cnt = atomic_read(&ref->refcnt);
+
+ /*
+ * If the reference count was already marked dead, undo the
+ * increment so it stays in the middle of the dead zone and return
+ * fail.
+ */
+ if (cnt >= RCUREF_RELEASED) {
+ atomic_set(&ref->refcnt, RCUREF_DEAD);
+ return false;
+ }
+
+ /*
+ * If it was saturated, warn and mark it so. In case the increment
+ * was already on a saturated value restore the saturation
+ * marker. This keeps it in the middle of the saturation zone and
+ * prevents the reference count from overflowing. This leaks the
+ * object memory, but prevents the obvious reference count overflow
+ * damage.
+ */
+ if (WARN_ONCE(cnt > RCUREF_MAXREF, "rcuref saturated - leaking memory"))
+ atomic_set(&ref->refcnt, RCUREF_SATURATED);
+ return true;
+}
+EXPORT_SYMBOL_GPL(rcuref_get_slowpath);
+
+/**
+ * rcuref_put_slowpath - Slowpath of __rcuref_put()
+ * @ref: Pointer to the reference count
+ *
+ * Invoked when the reference count is outside of the valid zone.
+ *
+ * Return:
+ * True if this was the last reference with no future references
+ * possible. This signals the caller that it can safely schedule the
+ * object, which is protected by the reference counter, for
+ * deconstruction.
+ *
+ * False if there are still active references or the put() raced
+ * with a concurrent get()/put() pair. Caller is not allowed to
+ * deconstruct the protected object.
+ */
+bool rcuref_put_slowpath(rcuref_t *ref)
+{
+ unsigned int cnt = atomic_read(&ref->refcnt);
+
+ /* Did this drop the last reference? */
+ if (likely(cnt == RCUREF_NOREF)) {
+ /*
+ * Carefully try to set the reference count to RCUREF_DEAD.
+ *
+ * This can fail if a concurrent get() operation has
+ * elevated it again or the corresponding put() even marked
+ * it dead already. Both are valid situations and do not
+ * require a retry. If this fails the caller is not
+ * allowed to deconstruct the object.
+ */
+ if (!atomic_try_cmpxchg_release(&ref->refcnt, &cnt, RCUREF_DEAD))
+ return false;
+
+ /*
+ * The caller can safely schedule the object for
+ * deconstruction. Provide acquire ordering.
+ */
+ smp_acquire__after_ctrl_dep();
+ return true;
+ }
+
+ /*
+ * If the reference count was already in the dead zone, then this
+ * put() operation is imbalanced. Warn, put the reference count back to
+ * DEAD and tell the caller to not deconstruct the object.
+ */
+ if (WARN_ONCE(cnt >= RCUREF_RELEASED, "rcuref - imbalanced put()")) {
+ atomic_set(&ref->refcnt, RCUREF_DEAD);
+ return false;
+ }
+
+ /*
+ * This is a put() operation on a saturated refcount. Restore the
+ * mean saturation value and tell the caller to not deconstruct the
+ * object.
+ */
+ if (cnt > RCUREF_MAXREF)
+ atomic_set(&ref->refcnt, RCUREF_SATURATED);
+ return false;
+}
+EXPORT_SYMBOL_GPL(rcuref_put_slowpath);
diff --git a/lib/reed_solomon/Makefile b/lib/reed_solomon/Makefile
index ba9d7a3329eb..5d4fa68f26cb 100644
--- a/lib/reed_solomon/Makefile
+++ b/lib/reed_solomon/Makefile
@@ -4,4 +4,4 @@
#
obj-$(CONFIG_REED_SOLOMON) += reed_solomon.o
-
+obj-$(CONFIG_REED_SOLOMON_TEST) += test_rslib.o
diff --git a/lib/reed_solomon/decode_rs.c b/lib/reed_solomon/decode_rs.c
index 121beb2f0930..805de84ae83d 100644
--- a/lib/reed_solomon/decode_rs.c
+++ b/lib/reed_solomon/decode_rs.c
@@ -22,6 +22,7 @@
uint16_t *index_of = rs->index_of;
uint16_t u, q, tmp, num1, num2, den, discr_r, syn_error;
int count = 0;
+ int num_corrected;
uint16_t msk = (uint16_t) rs->nn;
/*
@@ -39,7 +40,7 @@
/* Check length parameter for validity */
pad = nn - nroots - len;
- BUG_ON(pad < 0 || pad >= nn);
+ BUG_ON(pad < 0 || pad >= nn - nroots);
/* Does the caller provide the syndrome ? */
if (s != NULL) {
@@ -98,8 +99,7 @@
/* if syndrome is zero, data[] is a codeword and there are no
* errors to correct. So return data[] unmodified
*/
- count = 0;
- goto finish;
+ return 0;
}
decode:
@@ -185,6 +185,15 @@
if (lambda[i] != nn)
deg_lambda = i;
}
+
+ if (deg_lambda == 0) {
+ /*
+ * deg(lambda) is zero even though the syndrome is non-zero
+ * => uncorrectable error detected
+ */
+ return -EBADMSG;
+ }
+
/* Find roots of error+erasure locator polynomial by Chien search */
memcpy(&reg[1], &lambda[1], nroots * sizeof(reg[0]));
count = 0; /* Number of roots of lambda(x) */
@@ -198,6 +207,12 @@
}
if (q != 0)
continue; /* Not a root */
+
+ if (k < pad) {
+ /* Impossible error location. Uncorrectable error. */
+ return -EBADMSG;
+ }
+
/* store root (index-form) and error location number */
root[count] = i;
loc[count] = k;
@@ -212,8 +227,7 @@
* deg(lambda) unequal to number of roots => uncorrectable
* error detected
*/
- count = -EBADMSG;
- goto finish;
+ return -EBADMSG;
}
/*
* Compute err+eras evaluator poly omega(x) = s(x)*lambda(x) (modulo
@@ -233,7 +247,9 @@
/*
* Compute error values in poly-form. num1 = omega(inv(X(l))), num2 =
* inv(X(l))**(fcr-1) and den = lambda_pr(inv(X(l))) all in poly-form
+ * Note: we reuse the buffer for b to store the correction pattern
*/
+ num_corrected = 0;
for (j = count - 1; j >= 0; j--) {
num1 = 0;
for (i = deg_omega; i >= 0; i--) {
@@ -241,6 +257,13 @@
num1 ^= alpha_to[rs_modnn(rs, omega[i] +
i * root[j])];
}
+
+ if (num1 == 0) {
+ /* Nothing to correct at this position */
+ b[j] = 0;
+ continue;
+ }
+
num2 = alpha_to[rs_modnn(rs, root[j] * (fcr - 1) + nn)];
den = 0;
@@ -252,30 +275,52 @@
i * root[j])];
}
}
- /* Apply error to data */
- if (num1 != 0 && loc[j] >= pad) {
- uint16_t cor = alpha_to[rs_modnn(rs,index_of[num1] +
- index_of[num2] +
- nn - index_of[den])];
- /* Store the error correction pattern, if a
- * correction buffer is available */
- if (corr) {
- corr[j] = cor;
- } else {
- /* If a data buffer is given and the
- * error is inside the message,
- * correct it */
- if (data && (loc[j] < (nn - nroots)))
- data[loc[j] - pad] ^= cor;
- }
+
+ b[j] = alpha_to[rs_modnn(rs, index_of[num1] +
+ index_of[num2] +
+ nn - index_of[den])];
+ num_corrected++;
+ }
+
+ /*
+ * We compute the syndrome of the 'error' and check that it matches
+ * the syndrome of the received word
+ */
+ for (i = 0; i < nroots; i++) {
+ tmp = 0;
+ for (j = 0; j < count; j++) {
+ if (b[j] == 0)
+ continue;
+
+ k = (fcr + i) * prim * (nn-loc[j]-1);
+ tmp ^= alpha_to[rs_modnn(rs, index_of[b[j]] + k)];
}
+
+ if (tmp != alpha_to[s[i]])
+ return -EBADMSG;
}
-finish:
- if (eras_pos != NULL) {
- for (i = 0; i < count; i++)
- eras_pos[i] = loc[i] - pad;
+ /*
+ * Store the error correction pattern, if a
+ * correction buffer is available
+ */
+ if (corr && eras_pos) {
+ j = 0;
+ for (i = 0; i < count; i++) {
+ if (b[i]) {
+ corr[j] = b[i];
+ eras_pos[j++] = loc[i] - pad;
+ }
+ }
+ } else if (data && par) {
+ /* Apply error to data and parity */
+ for (i = 0; i < count; i++) {
+ if (loc[i] < (nn - nroots))
+ data[loc[i] - pad] ^= b[i];
+ else
+ par[loc[i] - pad - len] ^= b[i];
+ }
}
- return count;
+ return num_corrected;
}
diff --git a/lib/reed_solomon/reed_solomon.c b/lib/reed_solomon/reed_solomon.c
index e5fdc8b9e856..bbc01bad3053 100644
--- a/lib/reed_solomon/reed_solomon.c
+++ b/lib/reed_solomon/reed_solomon.c
@@ -340,7 +340,8 @@ EXPORT_SYMBOL_GPL(encode_rs8);
* @data: data field of a given type
* @par: received parity data field
* @len: data length
- * @s: syndrome data field (if NULL, syndrome is calculated)
+ * @s: syndrome data field, must be in index form
+ * (if NULL, syndrome is calculated)
* @no_eras: number of erasures
* @eras_pos: position of erasures, can be NULL
* @invmsk: invert data mask (will be xored on data, not on parity!)
@@ -354,7 +355,8 @@ EXPORT_SYMBOL_GPL(encode_rs8);
* decoding, so the caller has to ensure that decoder invocations are
* serialized.
*
- * Returns the number of corrected bits or -EBADMSG for uncorrectable errors.
+ * Returns the number of corrected symbols or -EBADMSG for uncorrectable
+ * errors. The count includes errors in the parity.
*/
int decode_rs8(struct rs_control *rsc, uint8_t *data, uint16_t *par, int len,
uint16_t *s, int no_eras, int *eras_pos, uint16_t invmsk,
@@ -391,7 +393,8 @@ EXPORT_SYMBOL_GPL(encode_rs16);
* @data: data field of a given type
* @par: received parity data field
* @len: data length
- * @s: syndrome data field (if NULL, syndrome is calculated)
+ * @s: syndrome data field, must be in index form
+ * (if NULL, syndrome is calculated)
* @no_eras: number of erasures
* @eras_pos: position of erasures, can be NULL
* @invmsk: invert data mask (will be xored on data, not on parity!)
@@ -403,7 +406,8 @@ EXPORT_SYMBOL_GPL(encode_rs16);
* decoding, so the caller has to ensure that decoder invocations are
* serialized.
*
- * Returns the number of corrected bits or -EBADMSG for uncorrectable errors.
+ * Returns the number of corrected symbols or -EBADMSG for uncorrectable
+ * errors. The count includes errors in the parity.
*/
int decode_rs16(struct rs_control *rsc, uint16_t *data, uint16_t *par, int len,
uint16_t *s, int no_eras, int *eras_pos, uint16_t invmsk,
diff --git a/lib/reed_solomon/test_rslib.c b/lib/reed_solomon/test_rslib.c
new file mode 100644
index 000000000000..75cb1adac884
--- /dev/null
+++ b/lib/reed_solomon/test_rslib.c
@@ -0,0 +1,518 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Tests for Generic Reed Solomon encoder / decoder library
+ *
+ * Written by Ferdinand Blomqvist
+ * Based on previous work by Phil Karn, KA9Q
+ */
+#include <linux/rslib.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/random.h>
+#include <linux/slab.h>
+
+enum verbosity {
+ V_SILENT,
+ V_PROGRESS,
+ V_CSUMMARY
+};
+
+enum method {
+ CORR_BUFFER,
+ CALLER_SYNDROME,
+ IN_PLACE
+};
+
+#define __param(type, name, init, msg) \
+ static type name = init; \
+ module_param(name, type, 0444); \
+ MODULE_PARM_DESC(name, msg)
+
+__param(int, v, V_PROGRESS, "Verbosity level");
+__param(int, ewsc, 1, "Erasures without symbol corruption");
+__param(int, bc, 1, "Test for correct behaviour beyond error correction capacity");
+
+struct etab {
+ int symsize;
+ int genpoly;
+ int fcs;
+ int prim;
+ int nroots;
+ int ntrials;
+};
+
+/* List of codes to test */
+static struct etab Tab[] = {
+ {2, 0x7, 1, 1, 1, 100000 },
+ {3, 0xb, 1, 1, 2, 100000 },
+ {3, 0xb, 1, 1, 3, 100000 },
+ {3, 0xb, 2, 1, 4, 100000 },
+ {4, 0x13, 1, 1, 4, 10000 },
+ {5, 0x25, 1, 1, 6, 1000 },
+ {6, 0x43, 3, 1, 8, 1000 },
+ {7, 0x89, 1, 1, 14, 500 },
+ {8, 0x11d, 1, 1, 30, 100 },
+ {8, 0x187, 112, 11, 32, 100 },
+ {9, 0x211, 1, 1, 33, 80 },
+ {0, 0, 0, 0, 0, 0},
+};
+
+
+struct estat {
+ int dwrong;
+ int irv;
+ int wepos;
+ int nwords;
+};
+
+struct bcstat {
+ int rfail;
+ int rsuccess;
+ int noncw;
+ int nwords;
+};
+
+struct wspace {
+ uint16_t *c; /* sent codeword */
+ uint16_t *r; /* received word */
+ uint16_t *s; /* syndrome */
+ uint16_t *corr; /* correction buffer */
+ int *errlocs;
+ int *derrlocs;
+};
+
+struct pad {
+ int mult;
+ int shift;
+};
+
+static struct pad pad_coef[] = {
+ { 0, 0 },
+ { 1, 2 },
+ { 1, 1 },
+ { 3, 2 },
+ { 1, 0 },
+};
+
+static void free_ws(struct wspace *ws)
+{
+ if (!ws)
+ return;
+
+ kfree(ws->errlocs);
+ kfree(ws->c);
+ kfree(ws);
+}
+
+static struct wspace *alloc_ws(struct rs_codec *rs)
+{
+ int nroots = rs->nroots;
+ struct wspace *ws;
+ int nn = rs->nn;
+
+ ws = kzalloc(sizeof(*ws), GFP_KERNEL);
+ if (!ws)
+ return NULL;
+
+ ws->c = kmalloc_array(2 * (nn + nroots),
+ sizeof(uint16_t), GFP_KERNEL);
+ if (!ws->c)
+ goto err;
+
+ ws->r = ws->c + nn;
+ ws->s = ws->r + nn;
+ ws->corr = ws->s + nroots;
+
+ ws->errlocs = kmalloc_array(nn + nroots, sizeof(int), GFP_KERNEL);
+ if (!ws->errlocs)
+ goto err;
+
+ ws->derrlocs = ws->errlocs + nn;
+ return ws;
+
+err:
+ free_ws(ws);
+ return NULL;
+}
+
+
+/*
+ * Generates a random codeword and stores it in c. Generates random errors and
+ * erasures, and stores the random word with errors in r. Erasure positions are
+ * stored in derrlocs, while errlocs has one of three values in every position:
+ *
+ * 0 if there is no error in this position;
+ * 1 if there is a symbol error in this position;
+ * 2 if there is an erasure without symbol corruption.
+ *
+ * Returns the number of corrupted symbols.
+ */
+static int get_rcw_we(struct rs_control *rs, struct wspace *ws,
+ int len, int errs, int eras)
+{
+ int nroots = rs->codec->nroots;
+ int *derrlocs = ws->derrlocs;
+ int *errlocs = ws->errlocs;
+ int dlen = len - nroots;
+ int nn = rs->codec->nn;
+ uint16_t *c = ws->c;
+ uint16_t *r = ws->r;
+ int errval;
+ int errloc;
+ int i;
+
+ /* Load c with random data and encode */
+ for (i = 0; i < dlen; i++)
+ c[i] = get_random_u32() & nn;
+
+ memset(c + dlen, 0, nroots * sizeof(*c));
+ encode_rs16(rs, c, dlen, c + dlen, 0);
+
+ /* Make copyand add errors and erasures */
+ memcpy(r, c, len * sizeof(*r));
+ memset(errlocs, 0, len * sizeof(*errlocs));
+ memset(derrlocs, 0, nroots * sizeof(*derrlocs));
+
+ /* Generating random errors */
+ for (i = 0; i < errs; i++) {
+ do {
+ /* Error value must be nonzero */
+ errval = get_random_u32() & nn;
+ } while (errval == 0);
+
+ do {
+ /* Must not choose the same location twice */
+ errloc = get_random_u32_below(len);
+ } while (errlocs[errloc] != 0);
+
+ errlocs[errloc] = 1;
+ r[errloc] ^= errval;
+ }
+
+ /* Generating random erasures */
+ for (i = 0; i < eras; i++) {
+ do {
+ /* Must not choose the same location twice */
+ errloc = get_random_u32_below(len);
+ } while (errlocs[errloc] != 0);
+
+ derrlocs[i] = errloc;
+
+ if (ewsc && get_random_u32_below(2)) {
+ /* Erasure with the symbol intact */
+ errlocs[errloc] = 2;
+ } else {
+ /* Erasure with corrupted symbol */
+ do {
+ /* Error value must be nonzero */
+ errval = get_random_u32() & nn;
+ } while (errval == 0);
+
+ errlocs[errloc] = 1;
+ r[errloc] ^= errval;
+ errs++;
+ }
+ }
+
+ return errs;
+}
+
+static void fix_err(uint16_t *data, int nerrs, uint16_t *corr, int *errlocs)
+{
+ int i;
+
+ for (i = 0; i < nerrs; i++)
+ data[errlocs[i]] ^= corr[i];
+}
+
+static void compute_syndrome(struct rs_control *rsc, uint16_t *data,
+ int len, uint16_t *syn)
+{
+ struct rs_codec *rs = rsc->codec;
+ uint16_t *alpha_to = rs->alpha_to;
+ uint16_t *index_of = rs->index_of;
+ int nroots = rs->nroots;
+ int prim = rs->prim;
+ int fcr = rs->fcr;
+ int i, j;
+
+ /* Calculating syndrome */
+ for (i = 0; i < nroots; i++) {
+ syn[i] = data[0];
+ for (j = 1; j < len; j++) {
+ if (syn[i] == 0) {
+ syn[i] = data[j];
+ } else {
+ syn[i] = data[j] ^
+ alpha_to[rs_modnn(rs, index_of[syn[i]]
+ + (fcr + i) * prim)];
+ }
+ }
+ }
+
+ /* Convert to index form */
+ for (i = 0; i < nroots; i++)
+ syn[i] = rs->index_of[syn[i]];
+}
+
+/* Test up to error correction capacity */
+static void test_uc(struct rs_control *rs, int len, int errs,
+ int eras, int trials, struct estat *stat,
+ struct wspace *ws, int method)
+{
+ int dlen = len - rs->codec->nroots;
+ int *derrlocs = ws->derrlocs;
+ int *errlocs = ws->errlocs;
+ uint16_t *corr = ws->corr;
+ uint16_t *c = ws->c;
+ uint16_t *r = ws->r;
+ uint16_t *s = ws->s;
+ int derrs, nerrs;
+ int i, j;
+
+ for (j = 0; j < trials; j++) {
+ nerrs = get_rcw_we(rs, ws, len, errs, eras);
+
+ switch (method) {
+ case CORR_BUFFER:
+ derrs = decode_rs16(rs, r, r + dlen, dlen,
+ NULL, eras, derrlocs, 0, corr);
+ fix_err(r, derrs, corr, derrlocs);
+ break;
+ case CALLER_SYNDROME:
+ compute_syndrome(rs, r, len, s);
+ derrs = decode_rs16(rs, NULL, NULL, dlen,
+ s, eras, derrlocs, 0, corr);
+ fix_err(r, derrs, corr, derrlocs);
+ break;
+ case IN_PLACE:
+ derrs = decode_rs16(rs, r, r + dlen, dlen,
+ NULL, eras, derrlocs, 0, NULL);
+ break;
+ default:
+ continue;
+ }
+
+ if (derrs != nerrs)
+ stat->irv++;
+
+ if (method != IN_PLACE) {
+ for (i = 0; i < derrs; i++) {
+ if (errlocs[derrlocs[i]] != 1)
+ stat->wepos++;
+ }
+ }
+
+ if (memcmp(r, c, len * sizeof(*r)))
+ stat->dwrong++;
+ }
+ stat->nwords += trials;
+}
+
+static int ex_rs_helper(struct rs_control *rs, struct wspace *ws,
+ int len, int trials, int method)
+{
+ static const char * const desc[] = {
+ "Testing correction buffer interface...",
+ "Testing with caller provided syndrome...",
+ "Testing in-place interface..."
+ };
+
+ struct estat stat = {0, 0, 0, 0};
+ int nroots = rs->codec->nroots;
+ int errs, eras, retval;
+
+ if (v >= V_PROGRESS)
+ pr_info(" %s\n", desc[method]);
+
+ for (errs = 0; errs <= nroots / 2; errs++)
+ for (eras = 0; eras <= nroots - 2 * errs; eras++)
+ test_uc(rs, len, errs, eras, trials, &stat, ws, method);
+
+ if (v >= V_CSUMMARY) {
+ pr_info(" Decodes wrong: %d / %d\n",
+ stat.dwrong, stat.nwords);
+ pr_info(" Wrong return value: %d / %d\n",
+ stat.irv, stat.nwords);
+ if (method != IN_PLACE)
+ pr_info(" Wrong error position: %d\n", stat.wepos);
+ }
+
+ retval = stat.dwrong + stat.wepos + stat.irv;
+ if (retval && v >= V_PROGRESS)
+ pr_warn(" FAIL: %d decoding failures!\n", retval);
+
+ return retval;
+}
+
+static int exercise_rs(struct rs_control *rs, struct wspace *ws,
+ int len, int trials)
+{
+
+ int retval = 0;
+ int i;
+
+ if (v >= V_PROGRESS)
+ pr_info("Testing up to error correction capacity...\n");
+
+ for (i = 0; i <= IN_PLACE; i++)
+ retval |= ex_rs_helper(rs, ws, len, trials, i);
+
+ return retval;
+}
+
+/* Tests for correct behaviour beyond error correction capacity */
+static void test_bc(struct rs_control *rs, int len, int errs,
+ int eras, int trials, struct bcstat *stat,
+ struct wspace *ws)
+{
+ int nroots = rs->codec->nroots;
+ int dlen = len - nroots;
+ int *derrlocs = ws->derrlocs;
+ uint16_t *corr = ws->corr;
+ uint16_t *r = ws->r;
+ int derrs, j;
+
+ for (j = 0; j < trials; j++) {
+ get_rcw_we(rs, ws, len, errs, eras);
+ derrs = decode_rs16(rs, r, r + dlen, dlen,
+ NULL, eras, derrlocs, 0, corr);
+ fix_err(r, derrs, corr, derrlocs);
+
+ if (derrs >= 0) {
+ stat->rsuccess++;
+
+ /*
+ * We check that the returned word is actually a
+ * codeword. The obvious way to do this would be to
+ * compute the syndrome, but we don't want to replicate
+ * that code here. However, all the codes are in
+ * systematic form, and therefore we can encode the
+ * returned word, and see whether the parity changes or
+ * not.
+ */
+ memset(corr, 0, nroots * sizeof(*corr));
+ encode_rs16(rs, r, dlen, corr, 0);
+
+ if (memcmp(r + dlen, corr, nroots * sizeof(*corr)))
+ stat->noncw++;
+ } else {
+ stat->rfail++;
+ }
+ }
+ stat->nwords += trials;
+}
+
+static int exercise_rs_bc(struct rs_control *rs, struct wspace *ws,
+ int len, int trials)
+{
+ struct bcstat stat = {0, 0, 0, 0};
+ int nroots = rs->codec->nroots;
+ int errs, eras, cutoff;
+
+ if (v >= V_PROGRESS)
+ pr_info("Testing beyond error correction capacity...\n");
+
+ for (errs = 1; errs <= nroots; errs++) {
+ eras = nroots - 2 * errs + 1;
+ if (eras < 0)
+ eras = 0;
+
+ cutoff = nroots <= len - errs ? nroots : len - errs;
+ for (; eras <= cutoff; eras++)
+ test_bc(rs, len, errs, eras, trials, &stat, ws);
+ }
+
+ if (v >= V_CSUMMARY) {
+ pr_info(" decoder gives up: %d / %d\n",
+ stat.rfail, stat.nwords);
+ pr_info(" decoder returns success: %d / %d\n",
+ stat.rsuccess, stat.nwords);
+ pr_info(" not a codeword: %d / %d\n",
+ stat.noncw, stat.rsuccess);
+ }
+
+ if (stat.noncw && v >= V_PROGRESS)
+ pr_warn(" FAIL: %d silent failures!\n", stat.noncw);
+
+ return stat.noncw;
+}
+
+static int run_exercise(struct etab *e)
+{
+ int nn = (1 << e->symsize) - 1;
+ int kk = nn - e->nroots;
+ struct rs_control *rsc;
+ int retval = -ENOMEM;
+ int max_pad = kk - 1;
+ int prev_pad = -1;
+ struct wspace *ws;
+ int i;
+
+ rsc = init_rs(e->symsize, e->genpoly, e->fcs, e->prim, e->nroots);
+ if (!rsc)
+ return retval;
+
+ ws = alloc_ws(rsc->codec);
+ if (!ws)
+ goto err;
+
+ retval = 0;
+ for (i = 0; i < ARRAY_SIZE(pad_coef); i++) {
+ int pad = (pad_coef[i].mult * max_pad) >> pad_coef[i].shift;
+ int len = nn - pad;
+
+ if (pad == prev_pad)
+ continue;
+
+ prev_pad = pad;
+ if (v >= V_PROGRESS) {
+ pr_info("Testing (%d,%d)_%d code...\n",
+ len, kk - pad, nn + 1);
+ }
+
+ retval |= exercise_rs(rsc, ws, len, e->ntrials);
+ if (bc)
+ retval |= exercise_rs_bc(rsc, ws, len, e->ntrials);
+ }
+
+ free_ws(ws);
+
+err:
+ free_rs(rsc);
+ return retval;
+}
+
+static int __init test_rslib_init(void)
+{
+ int i, fail = 0;
+
+ for (i = 0; Tab[i].symsize != 0 ; i++) {
+ int retval;
+
+ retval = run_exercise(Tab + i);
+ if (retval < 0)
+ return -ENOMEM;
+
+ fail |= retval;
+ }
+
+ if (fail)
+ pr_warn("rslib: test failed\n");
+ else
+ pr_info("rslib: test ok\n");
+
+ return -EAGAIN; /* Fail will directly unload the module */
+}
+
+static void __exit test_rslib_exit(void)
+{
+}
+
+module_init(test_rslib_init)
+module_exit(test_rslib_exit)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Ferdinand Blomqvist");
+MODULE_DESCRIPTION("Reed-Solomon library test");
diff --git a/lib/ref_tracker.c b/lib/ref_tracker.c
new file mode 100644
index 000000000000..cf5609b1ca79
--- /dev/null
+++ b/lib/ref_tracker.c
@@ -0,0 +1,275 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#define pr_fmt(fmt) "ref_tracker: " fmt
+
+#include <linux/export.h>
+#include <linux/list_sort.h>
+#include <linux/ref_tracker.h>
+#include <linux/slab.h>
+#include <linux/stacktrace.h>
+#include <linux/stackdepot.h>
+
+#define REF_TRACKER_STACK_ENTRIES 16
+#define STACK_BUF_SIZE 1024
+
+struct ref_tracker {
+ struct list_head head; /* anchor into dir->list or dir->quarantine */
+ bool dead;
+ depot_stack_handle_t alloc_stack_handle;
+ depot_stack_handle_t free_stack_handle;
+};
+
+struct ref_tracker_dir_stats {
+ int total;
+ int count;
+ struct {
+ depot_stack_handle_t stack_handle;
+ unsigned int count;
+ } stacks[];
+};
+
+static struct ref_tracker_dir_stats *
+ref_tracker_get_stats(struct ref_tracker_dir *dir, unsigned int limit)
+{
+ struct ref_tracker_dir_stats *stats;
+ struct ref_tracker *tracker;
+
+ stats = kmalloc(struct_size(stats, stacks, limit),
+ GFP_NOWAIT | __GFP_NOWARN);
+ if (!stats)
+ return ERR_PTR(-ENOMEM);
+ stats->total = 0;
+ stats->count = 0;
+
+ list_for_each_entry(tracker, &dir->list, head) {
+ depot_stack_handle_t stack = tracker->alloc_stack_handle;
+ int i;
+
+ ++stats->total;
+ for (i = 0; i < stats->count; ++i)
+ if (stats->stacks[i].stack_handle == stack)
+ break;
+ if (i >= limit)
+ continue;
+ if (i >= stats->count) {
+ stats->stacks[i].stack_handle = stack;
+ stats->stacks[i].count = 0;
+ ++stats->count;
+ }
+ ++stats->stacks[i].count;
+ }
+
+ return stats;
+}
+
+struct ostream {
+ char *buf;
+ int size, used;
+};
+
+#define pr_ostream(stream, fmt, args...) \
+({ \
+ struct ostream *_s = (stream); \
+\
+ if (!_s->buf) { \
+ pr_err(fmt, ##args); \
+ } else { \
+ int ret, len = _s->size - _s->used; \
+ ret = snprintf(_s->buf + _s->used, len, pr_fmt(fmt), ##args); \
+ _s->used += min(ret, len); \
+ } \
+})
+
+static void
+__ref_tracker_dir_pr_ostream(struct ref_tracker_dir *dir,
+ unsigned int display_limit, struct ostream *s)
+{
+ struct ref_tracker_dir_stats *stats;
+ unsigned int i = 0, skipped;
+ depot_stack_handle_t stack;
+ char *sbuf;
+
+ lockdep_assert_held(&dir->lock);
+
+ if (list_empty(&dir->list))
+ return;
+
+ stats = ref_tracker_get_stats(dir, display_limit);
+ if (IS_ERR(stats)) {
+ pr_ostream(s, "%s@%pK: couldn't get stats, error %pe\n",
+ dir->name, dir, stats);
+ return;
+ }
+
+ sbuf = kmalloc(STACK_BUF_SIZE, GFP_NOWAIT | __GFP_NOWARN);
+
+ for (i = 0, skipped = stats->total; i < stats->count; ++i) {
+ stack = stats->stacks[i].stack_handle;
+ if (sbuf && !stack_depot_snprint(stack, sbuf, STACK_BUF_SIZE, 4))
+ sbuf[0] = 0;
+ pr_ostream(s, "%s@%pK has %d/%d users at\n%s\n", dir->name, dir,
+ stats->stacks[i].count, stats->total, sbuf);
+ skipped -= stats->stacks[i].count;
+ }
+
+ if (skipped)
+ pr_ostream(s, "%s@%pK skipped reports about %d/%d users.\n",
+ dir->name, dir, skipped, stats->total);
+
+ kfree(sbuf);
+
+ kfree(stats);
+}
+
+void ref_tracker_dir_print_locked(struct ref_tracker_dir *dir,
+ unsigned int display_limit)
+{
+ struct ostream os = {};
+
+ __ref_tracker_dir_pr_ostream(dir, display_limit, &os);
+}
+EXPORT_SYMBOL(ref_tracker_dir_print_locked);
+
+void ref_tracker_dir_print(struct ref_tracker_dir *dir,
+ unsigned int display_limit)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&dir->lock, flags);
+ ref_tracker_dir_print_locked(dir, display_limit);
+ spin_unlock_irqrestore(&dir->lock, flags);
+}
+EXPORT_SYMBOL(ref_tracker_dir_print);
+
+int ref_tracker_dir_snprint(struct ref_tracker_dir *dir, char *buf, size_t size)
+{
+ struct ostream os = { .buf = buf, .size = size };
+ unsigned long flags;
+
+ spin_lock_irqsave(&dir->lock, flags);
+ __ref_tracker_dir_pr_ostream(dir, 16, &os);
+ spin_unlock_irqrestore(&dir->lock, flags);
+
+ return os.used;
+}
+EXPORT_SYMBOL(ref_tracker_dir_snprint);
+
+void ref_tracker_dir_exit(struct ref_tracker_dir *dir)
+{
+ struct ref_tracker *tracker, *n;
+ unsigned long flags;
+ bool leak = false;
+
+ dir->dead = true;
+ spin_lock_irqsave(&dir->lock, flags);
+ list_for_each_entry_safe(tracker, n, &dir->quarantine, head) {
+ list_del(&tracker->head);
+ kfree(tracker);
+ dir->quarantine_avail++;
+ }
+ if (!list_empty(&dir->list)) {
+ ref_tracker_dir_print_locked(dir, 16);
+ leak = true;
+ list_for_each_entry_safe(tracker, n, &dir->list, head) {
+ list_del(&tracker->head);
+ kfree(tracker);
+ }
+ }
+ spin_unlock_irqrestore(&dir->lock, flags);
+ WARN_ON_ONCE(leak);
+ WARN_ON_ONCE(refcount_read(&dir->untracked) != 1);
+ WARN_ON_ONCE(refcount_read(&dir->no_tracker) != 1);
+}
+EXPORT_SYMBOL(ref_tracker_dir_exit);
+
+int ref_tracker_alloc(struct ref_tracker_dir *dir,
+ struct ref_tracker **trackerp,
+ gfp_t gfp)
+{
+ unsigned long entries[REF_TRACKER_STACK_ENTRIES];
+ struct ref_tracker *tracker;
+ unsigned int nr_entries;
+ gfp_t gfp_mask = gfp | __GFP_NOWARN;
+ unsigned long flags;
+
+ WARN_ON_ONCE(dir->dead);
+
+ if (!trackerp) {
+ refcount_inc(&dir->no_tracker);
+ return 0;
+ }
+ if (gfp & __GFP_DIRECT_RECLAIM)
+ gfp_mask |= __GFP_NOFAIL;
+ *trackerp = tracker = kzalloc(sizeof(*tracker), gfp_mask);
+ if (unlikely(!tracker)) {
+ pr_err_once("memory allocation failure, unreliable refcount tracker.\n");
+ refcount_inc(&dir->untracked);
+ return -ENOMEM;
+ }
+ nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 1);
+ tracker->alloc_stack_handle = stack_depot_save(entries, nr_entries, gfp);
+
+ spin_lock_irqsave(&dir->lock, flags);
+ list_add(&tracker->head, &dir->list);
+ spin_unlock_irqrestore(&dir->lock, flags);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ref_tracker_alloc);
+
+int ref_tracker_free(struct ref_tracker_dir *dir,
+ struct ref_tracker **trackerp)
+{
+ unsigned long entries[REF_TRACKER_STACK_ENTRIES];
+ depot_stack_handle_t stack_handle;
+ struct ref_tracker *tracker;
+ unsigned int nr_entries;
+ unsigned long flags;
+
+ WARN_ON_ONCE(dir->dead);
+
+ if (!trackerp) {
+ refcount_dec(&dir->no_tracker);
+ return 0;
+ }
+ tracker = *trackerp;
+ if (!tracker) {
+ refcount_dec(&dir->untracked);
+ return -EEXIST;
+ }
+ nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 1);
+ stack_handle = stack_depot_save(entries, nr_entries,
+ GFP_NOWAIT | __GFP_NOWARN);
+
+ spin_lock_irqsave(&dir->lock, flags);
+ if (tracker->dead) {
+ pr_err("reference already released.\n");
+ if (tracker->alloc_stack_handle) {
+ pr_err("allocated in:\n");
+ stack_depot_print(tracker->alloc_stack_handle);
+ }
+ if (tracker->free_stack_handle) {
+ pr_err("freed in:\n");
+ stack_depot_print(tracker->free_stack_handle);
+ }
+ spin_unlock_irqrestore(&dir->lock, flags);
+ WARN_ON_ONCE(1);
+ return -EINVAL;
+ }
+ tracker->dead = true;
+
+ tracker->free_stack_handle = stack_handle;
+
+ list_move_tail(&tracker->head, &dir->quarantine);
+ if (!dir->quarantine_avail) {
+ tracker = list_first_entry(&dir->quarantine, struct ref_tracker, head);
+ list_del(&tracker->head);
+ } else {
+ dir->quarantine_avail--;
+ tracker = NULL;
+ }
+ spin_unlock_irqrestore(&dir->lock, flags);
+
+ kfree(tracker);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ref_tracker_free);
diff --git a/lib/refcount.c b/lib/refcount.c
index 6e904af0fb3e..a207a8f22b3c 100644
--- a/lib/refcount.c
+++ b/lib/refcount.c
@@ -1,41 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Variant of atomic_t specialized for reference counts.
- *
- * The interface matches the atomic_t interface (to aid in porting) but only
- * provides the few functions one should use for reference counting.
- *
- * It differs in that the counter saturates at UINT_MAX and will not move once
- * there. This avoids wrapping the counter and causing 'spurious'
- * use-after-free issues.
- *
- * Memory ordering rules are slightly relaxed wrt regular atomic_t functions
- * and provide only what is strictly required for refcounts.
- *
- * The increments are fully relaxed; these will not provide ordering. The
- * rationale is that whatever is used to obtain the object we're increasing the
- * reference count on will provide the ordering. For locked data structures,
- * its the lock acquire, for RCU/lockless data structures its the dependent
- * load.
- *
- * Do note that inc_not_zero() provides a control dependency which will order
- * future stores against the inc, this ensures we'll never modify the object
- * if we did not in fact acquire a reference.
- *
- * The decrements will provide release order, such that all the prior loads and
- * stores will be issued before, it also provides a control dependency, which
- * will order us against the subsequent free().
- *
- * The control dependency is against the load of the cmpxchg (ll/sc) that
- * succeeded. This means the stores aren't fully ordered, but this is fine
- * because the 1->0 transition indicates no concurrency.
- *
- * Note that the allocator is responsible for ordering things between free()
- * and alloc().
- *
- * The decrements dec_and_test() and sub_and_test() also provide acquire
- * ordering on success.
- *
+ * Out-of-line refcount functions.
*/
#include <linux/mutex.h>
@@ -43,199 +8,33 @@
#include <linux/spinlock.h>
#include <linux/bug.h>
-/**
- * refcount_add_not_zero_checked - add a value to a refcount unless it is 0
- * @i: the value to add to the refcount
- * @r: the refcount
- *
- * Will saturate at UINT_MAX and WARN.
- *
- * Provides no memory ordering, it is assumed the caller has guaranteed the
- * object memory to be stable (RCU, etc.). It does provide a control dependency
- * and thereby orders future stores. See the comment on top.
- *
- * Use of this function is not recommended for the normal reference counting
- * use case in which references are taken and released one at a time. In these
- * cases, refcount_inc(), or one of its variants, should instead be used to
- * increment a reference count.
- *
- * Return: false if the passed refcount is 0, true otherwise
- */
-bool refcount_add_not_zero_checked(unsigned int i, refcount_t *r)
-{
- unsigned int new, val = atomic_read(&r->refs);
-
- do {
- if (!val)
- return false;
-
- if (unlikely(val == UINT_MAX))
- return true;
-
- new = val + i;
- if (new < val)
- new = UINT_MAX;
-
- } while (!atomic_try_cmpxchg_relaxed(&r->refs, &val, new));
-
- WARN_ONCE(new == UINT_MAX, "refcount_t: saturated; leaking memory.\n");
-
- return true;
-}
-EXPORT_SYMBOL(refcount_add_not_zero_checked);
-
-/**
- * refcount_add_checked - add a value to a refcount
- * @i: the value to add to the refcount
- * @r: the refcount
- *
- * Similar to atomic_add(), but will saturate at UINT_MAX and WARN.
- *
- * Provides no memory ordering, it is assumed the caller has guaranteed the
- * object memory to be stable (RCU, etc.). It does provide a control dependency
- * and thereby orders future stores. See the comment on top.
- *
- * Use of this function is not recommended for the normal reference counting
- * use case in which references are taken and released one at a time. In these
- * cases, refcount_inc(), or one of its variants, should instead be used to
- * increment a reference count.
- */
-void refcount_add_checked(unsigned int i, refcount_t *r)
-{
- WARN_ONCE(!refcount_add_not_zero_checked(i, r), "refcount_t: addition on 0; use-after-free.\n");
-}
-EXPORT_SYMBOL(refcount_add_checked);
-
-/**
- * refcount_inc_not_zero_checked - increment a refcount unless it is 0
- * @r: the refcount to increment
- *
- * Similar to atomic_inc_not_zero(), but will saturate at UINT_MAX and WARN.
- *
- * Provides no memory ordering, it is assumed the caller has guaranteed the
- * object memory to be stable (RCU, etc.). It does provide a control dependency
- * and thereby orders future stores. See the comment on top.
- *
- * Return: true if the increment was successful, false otherwise
- */
-bool refcount_inc_not_zero_checked(refcount_t *r)
-{
- unsigned int new, val = atomic_read(&r->refs);
-
- do {
- new = val + 1;
-
- if (!val)
- return false;
-
- if (unlikely(!new))
- return true;
-
- } while (!atomic_try_cmpxchg_relaxed(&r->refs, &val, new));
+#define REFCOUNT_WARN(str) WARN_ONCE(1, "refcount_t: " str ".\n")
- WARN_ONCE(new == UINT_MAX, "refcount_t: saturated; leaking memory.\n");
-
- return true;
-}
-EXPORT_SYMBOL(refcount_inc_not_zero_checked);
-
-/**
- * refcount_inc_checked - increment a refcount
- * @r: the refcount to increment
- *
- * Similar to atomic_inc(), but will saturate at UINT_MAX and WARN.
- *
- * Provides no memory ordering, it is assumed the caller already has a
- * reference on the object.
- *
- * Will WARN if the refcount is 0, as this represents a possible use-after-free
- * condition.
- */
-void refcount_inc_checked(refcount_t *r)
+void refcount_warn_saturate(refcount_t *r, enum refcount_saturation_type t)
{
- WARN_ONCE(!refcount_inc_not_zero_checked(r), "refcount_t: increment on 0; use-after-free.\n");
-}
-EXPORT_SYMBOL(refcount_inc_checked);
-
-/**
- * refcount_sub_and_test_checked - subtract from a refcount and test if it is 0
- * @i: amount to subtract from the refcount
- * @r: the refcount
- *
- * Similar to atomic_dec_and_test(), but it will WARN, return false and
- * ultimately leak on underflow and will fail to decrement when saturated
- * at UINT_MAX.
- *
- * Provides release memory ordering, such that prior loads and stores are done
- * before, and provides an acquire ordering on success such that free()
- * must come after.
- *
- * Use of this function is not recommended for the normal reference counting
- * use case in which references are taken and released one at a time. In these
- * cases, refcount_dec(), or one of its variants, should instead be used to
- * decrement a reference count.
- *
- * Return: true if the resulting refcount is 0, false otherwise
- */
-bool refcount_sub_and_test_checked(unsigned int i, refcount_t *r)
-{
- unsigned int new, val = atomic_read(&r->refs);
-
- do {
- if (unlikely(val == UINT_MAX))
- return false;
-
- new = val - i;
- if (new > val) {
- WARN_ONCE(new > val, "refcount_t: underflow; use-after-free.\n");
- return false;
- }
-
- } while (!atomic_try_cmpxchg_release(&r->refs, &val, new));
-
- if (!new) {
- smp_acquire__after_ctrl_dep();
- return true;
+ refcount_set(r, REFCOUNT_SATURATED);
+
+ switch (t) {
+ case REFCOUNT_ADD_NOT_ZERO_OVF:
+ REFCOUNT_WARN("saturated; leaking memory");
+ break;
+ case REFCOUNT_ADD_OVF:
+ REFCOUNT_WARN("saturated; leaking memory");
+ break;
+ case REFCOUNT_ADD_UAF:
+ REFCOUNT_WARN("addition on 0; use-after-free");
+ break;
+ case REFCOUNT_SUB_UAF:
+ REFCOUNT_WARN("underflow; use-after-free");
+ break;
+ case REFCOUNT_DEC_LEAK:
+ REFCOUNT_WARN("decrement hit 0; leaking memory");
+ break;
+ default:
+ REFCOUNT_WARN("unknown saturation event!?");
}
- return false;
-
-}
-EXPORT_SYMBOL(refcount_sub_and_test_checked);
-
-/**
- * refcount_dec_and_test_checked - decrement a refcount and test if it is 0
- * @r: the refcount
- *
- * Similar to atomic_dec_and_test(), it will WARN on underflow and fail to
- * decrement when saturated at UINT_MAX.
- *
- * Provides release memory ordering, such that prior loads and stores are done
- * before, and provides an acquire ordering on success such that free()
- * must come after.
- *
- * Return: true if the resulting refcount is 0, false otherwise
- */
-bool refcount_dec_and_test_checked(refcount_t *r)
-{
- return refcount_sub_and_test_checked(1, r);
-}
-EXPORT_SYMBOL(refcount_dec_and_test_checked);
-
-/**
- * refcount_dec_checked - decrement a refcount
- * @r: the refcount
- *
- * Similar to atomic_dec(), it will WARN on underflow and fail to decrement
- * when saturated at UINT_MAX.
- *
- * Provides release memory ordering, such that prior loads and stores are done
- * before.
- */
-void refcount_dec_checked(refcount_t *r)
-{
- WARN_ONCE(refcount_dec_and_test_checked(r), "refcount_t: decrement hit 0; leaking memory.\n");
}
-EXPORT_SYMBOL(refcount_dec_checked);
+EXPORT_SYMBOL(refcount_warn_saturate);
/**
* refcount_dec_if_one - decrement a refcount if it is 1
@@ -277,7 +76,7 @@ bool refcount_dec_not_one(refcount_t *r)
unsigned int new, val = atomic_read(&r->refs);
do {
- if (unlikely(val == UINT_MAX))
+ if (unlikely(val == REFCOUNT_SATURATED))
return true;
if (val == 1)
@@ -302,7 +101,7 @@ EXPORT_SYMBOL(refcount_dec_not_one);
* @lock: the mutex to be locked
*
* Similar to atomic_dec_and_mutex_lock(), it will WARN on underflow and fail
- * to decrement when saturated at UINT_MAX.
+ * to decrement when saturated at REFCOUNT_SATURATED.
*
* Provides release memory ordering, such that prior loads and stores are done
* before, and provides a control dependency such that free() must come after.
@@ -333,7 +132,7 @@ EXPORT_SYMBOL(refcount_dec_and_mutex_lock);
* @lock: the spinlock to be locked
*
* Similar to atomic_dec_and_lock(), it will WARN on underflow and fail to
- * decrement when saturated at UINT_MAX.
+ * decrement when saturated at REFCOUNT_SATURATED.
*
* Provides release memory ordering, such that prior loads and stores are done
* before, and provides a control dependency such that free() must come after.
@@ -365,7 +164,7 @@ EXPORT_SYMBOL(refcount_dec_and_lock);
* @flags: saved IRQ-flags if the is acquired
*
* Same as refcount_dec_and_lock() above except that the spinlock is acquired
- * with disabled interupts.
+ * with disabled interrupts.
*
* Return: true and hold spinlock if able to decrement refcount to 0, false
* otherwise
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index bdb7e4cadf05..6ae2ba8e06a2 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -31,7 +31,7 @@
union nested_table {
union nested_table __rcu *table;
- struct rhash_lock_head *bucket;
+ struct rhash_lock_head __rcu *bucket;
};
static u32 head_hashfn(struct rhashtable *ht,
@@ -63,13 +63,22 @@ EXPORT_SYMBOL_GPL(lockdep_rht_bucket_is_held);
#define ASSERT_RHT_MUTEX(HT)
#endif
+static inline union nested_table *nested_table_top(
+ const struct bucket_table *tbl)
+{
+ /* The top-level bucket entry does not need RCU protection
+ * because it's set at the same time as tbl->nest.
+ */
+ return (void *)rcu_dereference_protected(tbl->buckets[0], 1);
+}
+
static void nested_table_free(union nested_table *ntbl, unsigned int size)
{
const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
const unsigned int len = 1 << shift;
unsigned int i;
- ntbl = rcu_dereference_raw(ntbl->table);
+ ntbl = rcu_dereference_protected(ntbl->table, 1);
if (!ntbl)
return;
@@ -89,7 +98,7 @@ static void nested_bucket_table_free(const struct bucket_table *tbl)
union nested_table *ntbl;
unsigned int i;
- ntbl = (union nested_table *)rcu_dereference_raw(tbl->buckets[0]);
+ ntbl = nested_table_top(tbl);
for (i = 0; i < len; i++)
nested_table_free(ntbl + i, size);
@@ -213,7 +222,7 @@ static struct bucket_table *rhashtable_last_table(struct rhashtable *ht,
}
static int rhashtable_rehash_one(struct rhashtable *ht,
- struct rhash_lock_head **bkt,
+ struct rhash_lock_head __rcu **bkt,
unsigned int old_hash)
{
struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
@@ -222,6 +231,7 @@ static int rhashtable_rehash_one(struct rhashtable *ht,
struct rhash_head *head, *next, *entry;
struct rhash_head __rcu **pprev = NULL;
unsigned int new_hash;
+ unsigned long flags;
if (new_tbl->nest)
goto out;
@@ -244,13 +254,14 @@ static int rhashtable_rehash_one(struct rhashtable *ht,
new_hash = head_hashfn(ht, new_tbl, entry);
- rht_lock_nested(new_tbl, &new_tbl->buckets[new_hash], SINGLE_DEPTH_NESTING);
+ flags = rht_lock_nested(new_tbl, &new_tbl->buckets[new_hash],
+ SINGLE_DEPTH_NESTING);
head = rht_ptr(new_tbl->buckets + new_hash, new_tbl, new_hash);
RCU_INIT_POINTER(entry->next, head);
- rht_assign_unlock(new_tbl, &new_tbl->buckets[new_hash], entry);
+ rht_assign_unlock(new_tbl, &new_tbl->buckets[new_hash], entry, flags);
if (pprev)
rcu_assign_pointer(*pprev, next);
@@ -266,19 +277,20 @@ static int rhashtable_rehash_chain(struct rhashtable *ht,
unsigned int old_hash)
{
struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
- struct rhash_lock_head **bkt = rht_bucket_var(old_tbl, old_hash);
+ struct rhash_lock_head __rcu **bkt = rht_bucket_var(old_tbl, old_hash);
+ unsigned long flags;
int err;
if (!bkt)
return 0;
- rht_lock(old_tbl, bkt);
+ flags = rht_lock(old_tbl, bkt);
while (!(err = rhashtable_rehash_one(ht, bkt, old_hash)))
;
if (err == -ENOENT)
err = 0;
- rht_unlock(old_tbl, bkt);
+ rht_unlock(old_tbl, bkt, flags);
return err;
}
@@ -476,7 +488,7 @@ fail:
}
static void *rhashtable_lookup_one(struct rhashtable *ht,
- struct rhash_lock_head **bkt,
+ struct rhash_lock_head __rcu **bkt,
struct bucket_table *tbl, unsigned int hash,
const void *key, struct rhash_head *obj)
{
@@ -526,12 +538,10 @@ static void *rhashtable_lookup_one(struct rhashtable *ht,
return ERR_PTR(-ENOENT);
}
-static struct bucket_table *rhashtable_insert_one(struct rhashtable *ht,
- struct rhash_lock_head **bkt,
- struct bucket_table *tbl,
- unsigned int hash,
- struct rhash_head *obj,
- void *data)
+static struct bucket_table *rhashtable_insert_one(
+ struct rhashtable *ht, struct rhash_lock_head __rcu **bkt,
+ struct bucket_table *tbl, unsigned int hash, struct rhash_head *obj,
+ void *data)
{
struct bucket_table *new_tbl;
struct rhash_head *head;
@@ -582,7 +592,8 @@ static void *rhashtable_try_insert(struct rhashtable *ht, const void *key,
{
struct bucket_table *new_tbl;
struct bucket_table *tbl;
- struct rhash_lock_head **bkt;
+ struct rhash_lock_head __rcu **bkt;
+ unsigned long flags;
unsigned int hash;
void *data;
@@ -600,7 +611,7 @@ static void *rhashtable_try_insert(struct rhashtable *ht, const void *key,
new_tbl = rht_dereference_rcu(tbl->future_tbl, ht);
data = ERR_PTR(-EAGAIN);
} else {
- rht_lock(tbl, bkt);
+ flags = rht_lock(tbl, bkt);
data = rhashtable_lookup_one(ht, bkt, tbl,
hash, key, obj);
new_tbl = rhashtable_insert_one(ht, bkt, tbl,
@@ -608,7 +619,7 @@ static void *rhashtable_try_insert(struct rhashtable *ht, const void *key,
if (PTR_ERR(new_tbl) != -EEXIST)
data = ERR_CAST(new_tbl);
- rht_unlock(tbl, bkt);
+ rht_unlock(tbl, bkt, flags);
}
} while (!IS_ERR_OR_NULL(new_tbl));
@@ -696,7 +707,7 @@ EXPORT_SYMBOL_GPL(rhashtable_walk_exit);
*
* Returns zero if successful.
*
- * Returns -EAGAIN if resize event occured. Note that the iterator
+ * Returns -EAGAIN if resize event occurred. Note that the iterator
* will rewind back to the beginning and you may use it immediately
* by calling rhashtable_walk_next.
*
@@ -1164,8 +1175,8 @@ void rhashtable_destroy(struct rhashtable *ht)
}
EXPORT_SYMBOL_GPL(rhashtable_destroy);
-struct rhash_lock_head **__rht_bucket_nested(const struct bucket_table *tbl,
- unsigned int hash)
+struct rhash_lock_head __rcu **__rht_bucket_nested(
+ const struct bucket_table *tbl, unsigned int hash)
{
const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
unsigned int index = hash & ((1 << tbl->nest) - 1);
@@ -1173,7 +1184,7 @@ struct rhash_lock_head **__rht_bucket_nested(const struct bucket_table *tbl,
unsigned int subhash = hash;
union nested_table *ntbl;
- ntbl = (union nested_table *)rcu_dereference_raw(tbl->buckets[0]);
+ ntbl = nested_table_top(tbl);
ntbl = rht_dereference_bucket_rcu(ntbl[index].table, tbl, hash);
subhash >>= tbl->nest;
@@ -1193,10 +1204,10 @@ struct rhash_lock_head **__rht_bucket_nested(const struct bucket_table *tbl,
}
EXPORT_SYMBOL_GPL(__rht_bucket_nested);
-struct rhash_lock_head **rht_bucket_nested(const struct bucket_table *tbl,
- unsigned int hash)
+struct rhash_lock_head __rcu **rht_bucket_nested(
+ const struct bucket_table *tbl, unsigned int hash)
{
- static struct rhash_lock_head *rhnull;
+ static struct rhash_lock_head __rcu *rhnull;
if (!rhnull)
INIT_RHT_NULLS_HEAD(rhnull);
@@ -1204,16 +1215,15 @@ struct rhash_lock_head **rht_bucket_nested(const struct bucket_table *tbl,
}
EXPORT_SYMBOL_GPL(rht_bucket_nested);
-struct rhash_lock_head **rht_bucket_nested_insert(struct rhashtable *ht,
- struct bucket_table *tbl,
- unsigned int hash)
+struct rhash_lock_head __rcu **rht_bucket_nested_insert(
+ struct rhashtable *ht, struct bucket_table *tbl, unsigned int hash)
{
const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
unsigned int index = hash & ((1 << tbl->nest) - 1);
unsigned int size = tbl->size >> tbl->nest;
union nested_table *ntbl;
- ntbl = (union nested_table *)rcu_dereference_raw(tbl->buckets[0]);
+ ntbl = nested_table_top(tbl);
hash >>= tbl->nest;
ntbl = nested_table_alloc(ht, &ntbl[index].table,
size <= (1 << shift));
diff --git a/lib/sbitmap.c b/lib/sbitmap.c
index 54f57cd117c6..92c6b1fd8989 100644
--- a/lib/sbitmap.c
+++ b/lib/sbitmap.c
@@ -9,59 +9,86 @@
#include <linux/sbitmap.h>
#include <linux/seq_file.h>
+static int init_alloc_hint(struct sbitmap *sb, gfp_t flags)
+{
+ unsigned depth = sb->depth;
+
+ sb->alloc_hint = alloc_percpu_gfp(unsigned int, flags);
+ if (!sb->alloc_hint)
+ return -ENOMEM;
+
+ if (depth && !sb->round_robin) {
+ int i;
+
+ for_each_possible_cpu(i)
+ *per_cpu_ptr(sb->alloc_hint, i) = get_random_u32_below(depth);
+ }
+ return 0;
+}
+
+static inline unsigned update_alloc_hint_before_get(struct sbitmap *sb,
+ unsigned int depth)
+{
+ unsigned hint;
+
+ hint = this_cpu_read(*sb->alloc_hint);
+ if (unlikely(hint >= depth)) {
+ hint = depth ? get_random_u32_below(depth) : 0;
+ this_cpu_write(*sb->alloc_hint, hint);
+ }
+
+ return hint;
+}
+
+static inline void update_alloc_hint_after_get(struct sbitmap *sb,
+ unsigned int depth,
+ unsigned int hint,
+ unsigned int nr)
+{
+ if (nr == -1) {
+ /* If the map is full, a hint won't do us much good. */
+ this_cpu_write(*sb->alloc_hint, 0);
+ } else if (nr == hint || unlikely(sb->round_robin)) {
+ /* Only update the hint if we used it. */
+ hint = nr + 1;
+ if (hint >= depth - 1)
+ hint = 0;
+ this_cpu_write(*sb->alloc_hint, hint);
+ }
+}
+
/*
* See if we have deferred clears that we can batch move
*/
-static inline bool sbitmap_deferred_clear(struct sbitmap *sb, int index)
+static inline bool sbitmap_deferred_clear(struct sbitmap_word *map)
{
- unsigned long mask, val;
- bool ret = false;
- unsigned long flags;
-
- spin_lock_irqsave(&sb->map[index].swap_lock, flags);
+ unsigned long mask;
- if (!sb->map[index].cleared)
- goto out_unlock;
+ if (!READ_ONCE(map->cleared))
+ return false;
/*
* First get a stable cleared mask, setting the old mask to 0.
*/
- do {
- mask = sb->map[index].cleared;
- } while (cmpxchg(&sb->map[index].cleared, mask, 0) != mask);
+ mask = xchg(&map->cleared, 0);
/*
* Now clear the masked bits in our free word
*/
- do {
- val = sb->map[index].word;
- } while (cmpxchg(&sb->map[index].word, val, val & ~mask) != val);
-
- ret = true;
-out_unlock:
- spin_unlock_irqrestore(&sb->map[index].swap_lock, flags);
- return ret;
+ atomic_long_andnot(mask, (atomic_long_t *)&map->word);
+ BUILD_BUG_ON(sizeof(atomic_long_t) != sizeof(map->word));
+ return true;
}
int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift,
- gfp_t flags, int node)
+ gfp_t flags, int node, bool round_robin,
+ bool alloc_hint)
{
unsigned int bits_per_word;
- unsigned int i;
- if (shift < 0) {
- shift = ilog2(BITS_PER_LONG);
- /*
- * If the bitmap is small, shrink the number of bits per word so
- * we spread over a few cachelines, at least. If less than 4
- * bits, just forget about it, it's not going to work optimally
- * anyway.
- */
- if (depth >= 4) {
- while ((4U << shift) > depth)
- shift--;
- }
- }
+ if (shift < 0)
+ shift = sbitmap_calculate_shift(depth);
+
bits_per_word = 1U << shift;
if (bits_per_word > BITS_PER_LONG)
return -EINVAL;
@@ -69,21 +96,26 @@ int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift,
sb->shift = shift;
sb->depth = depth;
sb->map_nr = DIV_ROUND_UP(sb->depth, bits_per_word);
+ sb->round_robin = round_robin;
if (depth == 0) {
sb->map = NULL;
return 0;
}
- sb->map = kcalloc_node(sb->map_nr, sizeof(*sb->map), flags, node);
- if (!sb->map)
- return -ENOMEM;
+ if (alloc_hint) {
+ if (init_alloc_hint(sb, flags))
+ return -ENOMEM;
+ } else {
+ sb->alloc_hint = NULL;
+ }
- for (i = 0; i < sb->map_nr; i++) {
- sb->map[i].depth = min(depth, bits_per_word);
- depth -= sb->map[i].depth;
- spin_lock_init(&sb->map[i].swap_lock);
+ sb->map = kvzalloc_node(sb->map_nr * sizeof(*sb->map), flags, node);
+ if (!sb->map) {
+ free_percpu(sb->alloc_hint);
+ return -ENOMEM;
}
+
return 0;
}
EXPORT_SYMBOL_GPL(sbitmap_init_node);
@@ -94,24 +126,21 @@ void sbitmap_resize(struct sbitmap *sb, unsigned int depth)
unsigned int i;
for (i = 0; i < sb->map_nr; i++)
- sbitmap_deferred_clear(sb, i);
+ sbitmap_deferred_clear(&sb->map[i]);
sb->depth = depth;
sb->map_nr = DIV_ROUND_UP(sb->depth, bits_per_word);
-
- for (i = 0; i < sb->map_nr; i++) {
- sb->map[i].depth = min(depth, bits_per_word);
- depth -= sb->map[i].depth;
- }
}
EXPORT_SYMBOL_GPL(sbitmap_resize);
static int __sbitmap_get_word(unsigned long *word, unsigned long depth,
unsigned int hint, bool wrap)
{
- unsigned int orig_hint = hint;
int nr;
+ /* don't wrap if starting from 0 */
+ wrap = wrap && hint;
+
while (1) {
nr = find_next_zero_bit(word, depth, hint);
if (unlikely(nr >= depth)) {
@@ -120,8 +149,8 @@ static int __sbitmap_get_word(unsigned long *word, unsigned long depth,
* offset to 0 in a failure case, so start from 0 to
* exhaust the map.
*/
- if (orig_hint && hint && wrap) {
- hint = orig_hint = 0;
+ if (hint && wrap) {
+ hint = 0;
continue;
}
return -1;
@@ -138,29 +167,59 @@ static int __sbitmap_get_word(unsigned long *word, unsigned long depth,
return nr;
}
-static int sbitmap_find_bit_in_index(struct sbitmap *sb, int index,
- unsigned int alloc_hint, bool round_robin)
+static int sbitmap_find_bit_in_word(struct sbitmap_word *map,
+ unsigned int depth,
+ unsigned int alloc_hint,
+ bool wrap)
{
int nr;
do {
- nr = __sbitmap_get_word(&sb->map[index].word,
- sb->map[index].depth, alloc_hint,
- !round_robin);
+ nr = __sbitmap_get_word(&map->word, depth,
+ alloc_hint, wrap);
if (nr != -1)
break;
- if (!sbitmap_deferred_clear(sb, index))
+ if (!sbitmap_deferred_clear(map))
break;
} while (1);
return nr;
}
-int sbitmap_get(struct sbitmap *sb, unsigned int alloc_hint, bool round_robin)
+static int sbitmap_find_bit(struct sbitmap *sb,
+ unsigned int depth,
+ unsigned int index,
+ unsigned int alloc_hint,
+ bool wrap)
{
- unsigned int i, index;
+ unsigned int i;
int nr = -1;
+ for (i = 0; i < sb->map_nr; i++) {
+ nr = sbitmap_find_bit_in_word(&sb->map[index],
+ min_t(unsigned int,
+ __map_depth(sb, index),
+ depth),
+ alloc_hint, wrap);
+
+ if (nr != -1) {
+ nr += index << sb->shift;
+ break;
+ }
+
+ /* Jump to next index. */
+ alloc_hint = 0;
+ if (++index >= sb->map_nr)
+ index = 0;
+ }
+
+ return nr;
+}
+
+static int __sbitmap_get(struct sbitmap *sb, unsigned int alloc_hint)
+{
+ unsigned int index;
+
index = SB_NR_TO_INDEX(sb, alloc_hint);
/*
@@ -168,59 +227,56 @@ int sbitmap_get(struct sbitmap *sb, unsigned int alloc_hint, bool round_robin)
* alloc_hint to find the right word index. No point in looping
* twice in find_next_zero_bit() for that case.
*/
- if (round_robin)
+ if (sb->round_robin)
alloc_hint = SB_NR_TO_BIT(sb, alloc_hint);
else
alloc_hint = 0;
- for (i = 0; i < sb->map_nr; i++) {
- nr = sbitmap_find_bit_in_index(sb, index, alloc_hint,
- round_robin);
- if (nr != -1) {
- nr += index << sb->shift;
- break;
- }
+ return sbitmap_find_bit(sb, UINT_MAX, index, alloc_hint,
+ !sb->round_robin);
+}
- /* Jump to next index. */
- alloc_hint = 0;
- if (++index >= sb->map_nr)
- index = 0;
- }
+int sbitmap_get(struct sbitmap *sb)
+{
+ int nr;
+ unsigned int hint, depth;
+
+ if (WARN_ON_ONCE(unlikely(!sb->alloc_hint)))
+ return -1;
+
+ depth = READ_ONCE(sb->depth);
+ hint = update_alloc_hint_before_get(sb, depth);
+ nr = __sbitmap_get(sb, hint);
+ update_alloc_hint_after_get(sb, depth, hint, nr);
return nr;
}
EXPORT_SYMBOL_GPL(sbitmap_get);
-int sbitmap_get_shallow(struct sbitmap *sb, unsigned int alloc_hint,
- unsigned long shallow_depth)
+static int __sbitmap_get_shallow(struct sbitmap *sb,
+ unsigned int alloc_hint,
+ unsigned long shallow_depth)
{
- unsigned int i, index;
- int nr = -1;
+ unsigned int index;
index = SB_NR_TO_INDEX(sb, alloc_hint);
+ alloc_hint = SB_NR_TO_BIT(sb, alloc_hint);
- for (i = 0; i < sb->map_nr; i++) {
-again:
- nr = __sbitmap_get_word(&sb->map[index].word,
- min(sb->map[index].depth, shallow_depth),
- SB_NR_TO_BIT(sb, alloc_hint), true);
- if (nr != -1) {
- nr += index << sb->shift;
- break;
- }
+ return sbitmap_find_bit(sb, shallow_depth, index, alloc_hint, true);
+}
- if (sbitmap_deferred_clear(sb, index))
- goto again;
+int sbitmap_get_shallow(struct sbitmap *sb, unsigned long shallow_depth)
+{
+ int nr;
+ unsigned int hint, depth;
- /* Jump to next index. */
- index++;
- alloc_hint = index << sb->shift;
+ if (WARN_ON_ONCE(unlikely(!sb->alloc_hint)))
+ return -1;
- if (index >= sb->map_nr) {
- index = 0;
- alloc_hint = 0;
- }
- }
+ depth = READ_ONCE(sb->depth);
+ hint = update_alloc_hint_before_get(sb, depth);
+ nr = __sbitmap_get_shallow(sb, hint, shallow_depth);
+ update_alloc_hint_after_get(sb, depth, hint, nr);
return nr;
}
@@ -238,52 +294,37 @@ bool sbitmap_any_bit_set(const struct sbitmap *sb)
}
EXPORT_SYMBOL_GPL(sbitmap_any_bit_set);
-bool sbitmap_any_bit_clear(const struct sbitmap *sb)
-{
- unsigned int i;
-
- for (i = 0; i < sb->map_nr; i++) {
- const struct sbitmap_word *word = &sb->map[i];
- unsigned long mask = word->word & ~word->cleared;
- unsigned long ret;
-
- ret = find_first_zero_bit(&mask, word->depth);
- if (ret < word->depth)
- return true;
- }
- return false;
-}
-EXPORT_SYMBOL_GPL(sbitmap_any_bit_clear);
-
static unsigned int __sbitmap_weight(const struct sbitmap *sb, bool set)
{
unsigned int i, weight = 0;
for (i = 0; i < sb->map_nr; i++) {
const struct sbitmap_word *word = &sb->map[i];
+ unsigned int word_depth = __map_depth(sb, i);
if (set)
- weight += bitmap_weight(&word->word, word->depth);
+ weight += bitmap_weight(&word->word, word_depth);
else
- weight += bitmap_weight(&word->cleared, word->depth);
+ weight += bitmap_weight(&word->cleared, word_depth);
}
return weight;
}
-static unsigned int sbitmap_weight(const struct sbitmap *sb)
+static unsigned int sbitmap_cleared(const struct sbitmap *sb)
{
- return __sbitmap_weight(sb, true);
+ return __sbitmap_weight(sb, false);
}
-static unsigned int sbitmap_cleared(const struct sbitmap *sb)
+unsigned int sbitmap_weight(const struct sbitmap *sb)
{
- return __sbitmap_weight(sb, false);
+ return __sbitmap_weight(sb, true) - sbitmap_cleared(sb);
}
+EXPORT_SYMBOL_GPL(sbitmap_weight);
void sbitmap_show(struct sbitmap *sb, struct seq_file *m)
{
seq_printf(m, "depth=%u\n", sb->depth);
- seq_printf(m, "busy=%u\n", sbitmap_weight(sb) - sbitmap_cleared(sb));
+ seq_printf(m, "busy=%u\n", sbitmap_weight(sb));
seq_printf(m, "cleared=%u\n", sbitmap_cleared(sb));
seq_printf(m, "bits_per_word=%u\n", 1U << sb->shift);
seq_printf(m, "map_nr=%u\n", sb->map_nr);
@@ -311,7 +352,10 @@ void sbitmap_bitmap_show(struct sbitmap *sb, struct seq_file *m)
for (i = 0; i < sb->map_nr; i++) {
unsigned long word = READ_ONCE(sb->map[i].word);
- unsigned int word_bits = READ_ONCE(sb->map[i].depth);
+ unsigned long cleared = READ_ONCE(sb->map[i].cleared);
+ unsigned int word_bits = __map_depth(sb, i);
+
+ word &= ~cleared;
while (word_bits > 0) {
unsigned int bits = min(8 - byte_bits, word_bits);
@@ -344,11 +388,6 @@ static unsigned int sbq_calc_wake_batch(struct sbitmap_queue *sbq,
unsigned int shallow_depth;
/*
- * For each batch, we wake up one queue. We need to make sure that our
- * batch size is small enough that the full depth of the bitmap,
- * potentially limited by a shallow depth, is enough to wake up all of
- * the queues.
- *
* Each full word of the bitmap has bits_per_word bits, and there might
* be a partial word. There are depth / bits_per_word full words and
* depth % bits_per_word bits left over. In bitwise arithmetic:
@@ -374,39 +413,27 @@ int sbitmap_queue_init_node(struct sbitmap_queue *sbq, unsigned int depth,
int ret;
int i;
- ret = sbitmap_init_node(&sbq->sb, depth, shift, flags, node);
+ ret = sbitmap_init_node(&sbq->sb, depth, shift, flags, node,
+ round_robin, true);
if (ret)
return ret;
- sbq->alloc_hint = alloc_percpu_gfp(unsigned int, flags);
- if (!sbq->alloc_hint) {
- sbitmap_free(&sbq->sb);
- return -ENOMEM;
- }
-
- if (depth && !round_robin) {
- for_each_possible_cpu(i)
- *per_cpu_ptr(sbq->alloc_hint, i) = prandom_u32() % depth;
- }
-
sbq->min_shallow_depth = UINT_MAX;
sbq->wake_batch = sbq_calc_wake_batch(sbq, depth);
atomic_set(&sbq->wake_index, 0);
atomic_set(&sbq->ws_active, 0);
+ atomic_set(&sbq->completion_cnt, 0);
+ atomic_set(&sbq->wakeup_cnt, 0);
sbq->ws = kzalloc_node(SBQ_WAIT_QUEUES * sizeof(*sbq->ws), flags, node);
if (!sbq->ws) {
- free_percpu(sbq->alloc_hint);
sbitmap_free(&sbq->sb);
return -ENOMEM;
}
- for (i = 0; i < SBQ_WAIT_QUEUES; i++) {
+ for (i = 0; i < SBQ_WAIT_QUEUES; i++)
init_waitqueue_head(&sbq->ws[i].wait);
- atomic_set(&sbq->ws[i].wait_cnt, sbq->wake_batch);
- }
- sbq->round_robin = round_robin;
return 0;
}
EXPORT_SYMBOL_GPL(sbitmap_queue_init_node);
@@ -414,22 +441,26 @@ EXPORT_SYMBOL_GPL(sbitmap_queue_init_node);
static void sbitmap_queue_update_wake_batch(struct sbitmap_queue *sbq,
unsigned int depth)
{
- unsigned int wake_batch = sbq_calc_wake_batch(sbq, depth);
- int i;
+ unsigned int wake_batch;
- if (sbq->wake_batch != wake_batch) {
+ wake_batch = sbq_calc_wake_batch(sbq, depth);
+ if (sbq->wake_batch != wake_batch)
WRITE_ONCE(sbq->wake_batch, wake_batch);
- /*
- * Pairs with the memory barrier in sbitmap_queue_wake_up()
- * to ensure that the batch size is updated before the wait
- * counts.
- */
- smp_mb();
- for (i = 0; i < SBQ_WAIT_QUEUES; i++)
- atomic_set(&sbq->ws[i].wait_cnt, 1);
- }
}
+void sbitmap_queue_recalculate_wake_batch(struct sbitmap_queue *sbq,
+ unsigned int users)
+{
+ unsigned int wake_batch;
+ unsigned int depth = (sbq->sb.depth + users - 1) / users;
+
+ wake_batch = clamp_val(depth / SBQ_WAIT_QUEUES,
+ 1, SBQ_WAKE_BATCH);
+
+ WRITE_ONCE(sbq->wake_batch, wake_batch);
+}
+EXPORT_SYMBOL_GPL(sbitmap_queue_recalculate_wake_batch);
+
void sbitmap_queue_resize(struct sbitmap_queue *sbq, unsigned int depth)
{
sbitmap_queue_update_wake_batch(sbq, depth);
@@ -439,62 +470,70 @@ EXPORT_SYMBOL_GPL(sbitmap_queue_resize);
int __sbitmap_queue_get(struct sbitmap_queue *sbq)
{
- unsigned int hint, depth;
- int nr;
-
- hint = this_cpu_read(*sbq->alloc_hint);
- depth = READ_ONCE(sbq->sb.depth);
- if (unlikely(hint >= depth)) {
- hint = depth ? prandom_u32() % depth : 0;
- this_cpu_write(*sbq->alloc_hint, hint);
- }
- nr = sbitmap_get(&sbq->sb, hint, sbq->round_robin);
-
- if (nr == -1) {
- /* If the map is full, a hint won't do us much good. */
- this_cpu_write(*sbq->alloc_hint, 0);
- } else if (nr == hint || unlikely(sbq->round_robin)) {
- /* Only update the hint if we used it. */
- hint = nr + 1;
- if (hint >= depth - 1)
- hint = 0;
- this_cpu_write(*sbq->alloc_hint, hint);
- }
-
- return nr;
+ return sbitmap_get(&sbq->sb);
}
EXPORT_SYMBOL_GPL(__sbitmap_queue_get);
-int __sbitmap_queue_get_shallow(struct sbitmap_queue *sbq,
- unsigned int shallow_depth)
+unsigned long __sbitmap_queue_get_batch(struct sbitmap_queue *sbq, int nr_tags,
+ unsigned int *offset)
{
+ struct sbitmap *sb = &sbq->sb;
unsigned int hint, depth;
- int nr;
+ unsigned long index, nr;
+ int i;
- WARN_ON_ONCE(shallow_depth < sbq->min_shallow_depth);
+ if (unlikely(sb->round_robin))
+ return 0;
- hint = this_cpu_read(*sbq->alloc_hint);
- depth = READ_ONCE(sbq->sb.depth);
- if (unlikely(hint >= depth)) {
- hint = depth ? prandom_u32() % depth : 0;
- this_cpu_write(*sbq->alloc_hint, hint);
- }
- nr = sbitmap_get_shallow(&sbq->sb, hint, shallow_depth);
+ depth = READ_ONCE(sb->depth);
+ hint = update_alloc_hint_before_get(sb, depth);
- if (nr == -1) {
- /* If the map is full, a hint won't do us much good. */
- this_cpu_write(*sbq->alloc_hint, 0);
- } else if (nr == hint || unlikely(sbq->round_robin)) {
- /* Only update the hint if we used it. */
- hint = nr + 1;
- if (hint >= depth - 1)
- hint = 0;
- this_cpu_write(*sbq->alloc_hint, hint);
+ index = SB_NR_TO_INDEX(sb, hint);
+
+ for (i = 0; i < sb->map_nr; i++) {
+ struct sbitmap_word *map = &sb->map[index];
+ unsigned long get_mask;
+ unsigned int map_depth = __map_depth(sb, index);
+
+ sbitmap_deferred_clear(map);
+ if (map->word == (1UL << (map_depth - 1)) - 1)
+ goto next;
+
+ nr = find_first_zero_bit(&map->word, map_depth);
+ if (nr + nr_tags <= map_depth) {
+ atomic_long_t *ptr = (atomic_long_t *) &map->word;
+ unsigned long val;
+
+ get_mask = ((1UL << nr_tags) - 1) << nr;
+ val = READ_ONCE(map->word);
+ while (!atomic_long_try_cmpxchg(ptr, &val,
+ get_mask | val))
+ ;
+ get_mask = (get_mask & ~val) >> nr;
+ if (get_mask) {
+ *offset = nr + (index << sb->shift);
+ update_alloc_hint_after_get(sb, depth, hint,
+ *offset + nr_tags - 1);
+ return get_mask;
+ }
+ }
+next:
+ /* Jump to next index. */
+ if (++index >= sb->map_nr)
+ index = 0;
}
- return nr;
+ return 0;
}
-EXPORT_SYMBOL_GPL(__sbitmap_queue_get_shallow);
+
+int sbitmap_queue_get_shallow(struct sbitmap_queue *sbq,
+ unsigned int shallow_depth)
+{
+ WARN_ON_ONCE(shallow_depth < sbq->min_shallow_depth);
+
+ return sbitmap_get_shallow(&sbq->sb, shallow_depth);
+}
+EXPORT_SYMBOL_GPL(sbitmap_queue_get_shallow);
void sbitmap_queue_min_shallow_depth(struct sbitmap_queue *sbq,
unsigned int min_shallow_depth)
@@ -504,78 +543,97 @@ void sbitmap_queue_min_shallow_depth(struct sbitmap_queue *sbq,
}
EXPORT_SYMBOL_GPL(sbitmap_queue_min_shallow_depth);
-static struct sbq_wait_state *sbq_wake_ptr(struct sbitmap_queue *sbq)
+static void __sbitmap_queue_wake_up(struct sbitmap_queue *sbq, int nr)
{
- int i, wake_index;
+ int i, wake_index, woken;
if (!atomic_read(&sbq->ws_active))
- return NULL;
+ return;
wake_index = atomic_read(&sbq->wake_index);
for (i = 0; i < SBQ_WAIT_QUEUES; i++) {
struct sbq_wait_state *ws = &sbq->ws[wake_index];
- if (waitqueue_active(&ws->wait)) {
- int o = atomic_read(&sbq->wake_index);
+ /*
+ * Advance the index before checking the current queue.
+ * It improves fairness, by ensuring the queue doesn't
+ * need to be fully emptied before trying to wake up
+ * from the next one.
+ */
+ wake_index = sbq_index_inc(wake_index);
- if (wake_index != o)
- atomic_cmpxchg(&sbq->wake_index, o, wake_index);
- return ws;
+ if (waitqueue_active(&ws->wait)) {
+ woken = wake_up_nr(&ws->wait, nr);
+ if (woken == nr)
+ break;
+ nr -= woken;
}
-
- wake_index = sbq_index_inc(wake_index);
}
- return NULL;
+ if (wake_index != atomic_read(&sbq->wake_index))
+ atomic_set(&sbq->wake_index, wake_index);
}
-static bool __sbq_wake_up(struct sbitmap_queue *sbq)
+void sbitmap_queue_wake_up(struct sbitmap_queue *sbq, int nr)
{
- struct sbq_wait_state *ws;
- unsigned int wake_batch;
- int wait_cnt;
+ unsigned int wake_batch = READ_ONCE(sbq->wake_batch);
+ unsigned int wakeups;
- ws = sbq_wake_ptr(sbq);
- if (!ws)
- return false;
+ if (!atomic_read(&sbq->ws_active))
+ return;
- wait_cnt = atomic_dec_return(&ws->wait_cnt);
- if (wait_cnt <= 0) {
- int ret;
+ atomic_add(nr, &sbq->completion_cnt);
+ wakeups = atomic_read(&sbq->wakeup_cnt);
- wake_batch = READ_ONCE(sbq->wake_batch);
+ do {
+ if (atomic_read(&sbq->completion_cnt) - wakeups < wake_batch)
+ return;
+ } while (!atomic_try_cmpxchg(&sbq->wakeup_cnt,
+ &wakeups, wakeups + wake_batch));
- /*
- * Pairs with the memory barrier in sbitmap_queue_resize() to
- * ensure that we see the batch size update before the wait
- * count is reset.
- */
- smp_mb__before_atomic();
+ __sbitmap_queue_wake_up(sbq, wake_batch);
+}
+EXPORT_SYMBOL_GPL(sbitmap_queue_wake_up);
- /*
- * For concurrent callers of this, the one that failed the
- * atomic_cmpxhcg() race should call this function again
- * to wakeup a new batch on a different 'ws'.
- */
- ret = atomic_cmpxchg(&ws->wait_cnt, wait_cnt, wake_batch);
- if (ret == wait_cnt) {
- sbq_index_atomic_inc(&sbq->wake_index);
- wake_up_nr(&ws->wait, wake_batch);
- return false;
- }
+static inline void sbitmap_update_cpu_hint(struct sbitmap *sb, int cpu, int tag)
+{
+ if (likely(!sb->round_robin && tag < sb->depth))
+ data_race(*per_cpu_ptr(sb->alloc_hint, cpu) = tag);
+}
- return true;
+void sbitmap_queue_clear_batch(struct sbitmap_queue *sbq, int offset,
+ int *tags, int nr_tags)
+{
+ struct sbitmap *sb = &sbq->sb;
+ unsigned long *addr = NULL;
+ unsigned long mask = 0;
+ int i;
+
+ smp_mb__before_atomic();
+ for (i = 0; i < nr_tags; i++) {
+ const int tag = tags[i] - offset;
+ unsigned long *this_addr;
+
+ /* since we're clearing a batch, skip the deferred map */
+ this_addr = &sb->map[SB_NR_TO_INDEX(sb, tag)].word;
+ if (!addr) {
+ addr = this_addr;
+ } else if (addr != this_addr) {
+ atomic_long_andnot(mask, (atomic_long_t *) addr);
+ mask = 0;
+ addr = this_addr;
+ }
+ mask |= (1UL << SB_NR_TO_BIT(sb, tag));
}
- return false;
-}
+ if (mask)
+ atomic_long_andnot(mask, (atomic_long_t *) addr);
-void sbitmap_queue_wake_up(struct sbitmap_queue *sbq)
-{
- while (__sbq_wake_up(sbq))
- ;
+ smp_mb__after_atomic();
+ sbitmap_queue_wake_up(sbq, nr_tags);
+ sbitmap_update_cpu_hint(&sbq->sb, raw_smp_processor_id(),
+ tags[nr_tags - 1] - offset);
}
-EXPORT_SYMBOL_GPL(sbitmap_queue_wake_up);
void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr,
unsigned int cpu)
@@ -583,7 +641,7 @@ void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr,
/*
* Once the clear bit is set, the bit may be allocated out.
*
- * Orders READ/WRITE on the asssociated instance(such as request
+ * Orders READ/WRITE on the associated instance(such as request
* of blk_mq) by this bit for avoiding race with re-allocation,
* and its pair is the memory barrier implied in __sbitmap_get_word.
*
@@ -600,10 +658,8 @@ void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr,
* waiter. See the comment on waitqueue_active().
*/
smp_mb__after_atomic();
- sbitmap_queue_wake_up(sbq);
-
- if (likely(!sbq->round_robin && nr < sbq->sb.depth))
- *per_cpu_ptr(sbq->alloc_hint, cpu) = nr;
+ sbitmap_queue_wake_up(sbq, 1);
+ sbitmap_update_cpu_hint(&sbq->sb, cpu, nr);
}
EXPORT_SYMBOL_GPL(sbitmap_queue_clear);
@@ -641,7 +697,7 @@ void sbitmap_queue_show(struct sbitmap_queue *sbq, struct seq_file *m)
if (!first)
seq_puts(m, ", ");
first = false;
- seq_printf(m, "%u", *per_cpu_ptr(sbq->alloc_hint, i));
+ seq_printf(m, "%u", *per_cpu_ptr(sbq->sb.alloc_hint, i));
}
seq_puts(m, "}\n");
@@ -652,14 +708,12 @@ void sbitmap_queue_show(struct sbitmap_queue *sbq, struct seq_file *m)
seq_puts(m, "ws={\n");
for (i = 0; i < SBQ_WAIT_QUEUES; i++) {
struct sbq_wait_state *ws = &sbq->ws[i];
-
- seq_printf(m, "\t{.wait_cnt=%d, .wait=%s},\n",
- atomic_read(&ws->wait_cnt),
+ seq_printf(m, "\t{.wait=%s},\n",
waitqueue_active(&ws->wait) ? "active" : "inactive");
}
seq_puts(m, "}\n");
- seq_printf(m, "round_robin=%d\n", sbq->round_robin);
+ seq_printf(m, "round_robin=%d\n", sbq->sb.round_robin);
seq_printf(m, "min_shallow_depth=%u\n", sbq->min_shallow_depth);
}
EXPORT_SYMBOL_GPL(sbitmap_queue_show);
@@ -671,8 +725,8 @@ void sbitmap_add_wait_queue(struct sbitmap_queue *sbq,
if (!sbq_wait->sbq) {
sbq_wait->sbq = sbq;
atomic_inc(&sbq->ws_active);
+ add_wait_queue(&ws->wait, &sbq_wait->wait);
}
- add_wait_queue(&ws->wait, &sbq_wait->wait);
}
EXPORT_SYMBOL_GPL(sbitmap_add_wait_queue);
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index eacb82468437..68b45c82c37a 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -9,6 +9,8 @@
#include <linux/scatterlist.h>
#include <linux/highmem.h>
#include <linux/kmemleak.h>
+#include <linux/bvec.h>
+#include <linux/uio.h>
/**
* sg_next - return the next scatterlist entry in a list
@@ -38,7 +40,7 @@ EXPORT_SYMBOL(sg_next);
* @sg: The scatterlist
*
* Description:
- * Allows to know how many entries are in sg, taking into acount
+ * Allows to know how many entries are in sg, taking into account
* chaining as well
*
**/
@@ -59,7 +61,7 @@ EXPORT_SYMBOL(sg_nents);
*
* Description:
* Determines the number of entries in sg that are required to meet
- * the supplied length, taking into acount chaining as well
+ * the supplied length, taking into account chaining as well
*
* Returns:
* the number of sg entries needed, negative error on failure
@@ -179,8 +181,10 @@ static void sg_kfree(struct scatterlist *sg, unsigned int nents)
* __sg_free_table - Free a previously mapped sg table
* @table: The sg table header to use
* @max_ents: The maximum number of entries per single scatterlist
- * @skip_first_chunk: don't free the (preallocated) first scatterlist chunk
+ * @nents_first_chunk: Number of entries int the (preallocated) first
+ * scatterlist chunk, 0 means no such preallocated first chunk
* @free_fn: Free function
+ * @num_ents: Number of entries in the table
*
* Description:
* Free an sg table previously allocated and setup with
@@ -189,16 +193,18 @@ static void sg_kfree(struct scatterlist *sg, unsigned int nents)
*
**/
void __sg_free_table(struct sg_table *table, unsigned int max_ents,
- bool skip_first_chunk, sg_free_fn *free_fn)
+ unsigned int nents_first_chunk, sg_free_fn *free_fn,
+ unsigned int num_ents)
{
struct scatterlist *sgl, *next;
+ unsigned curr_max_ents = nents_first_chunk ?: max_ents;
if (unlikely(!table->sgl))
return;
sgl = table->sgl;
- while (table->orig_nents) {
- unsigned int alloc_size = table->orig_nents;
+ while (num_ents) {
+ unsigned int alloc_size = num_ents;
unsigned int sg_size;
/*
@@ -207,21 +213,22 @@ void __sg_free_table(struct sg_table *table, unsigned int max_ents,
* sg_size is then one less than alloc size, since the last
* element is the chain pointer.
*/
- if (alloc_size > max_ents) {
- next = sg_chain_ptr(&sgl[max_ents - 1]);
- alloc_size = max_ents;
+ if (alloc_size > curr_max_ents) {
+ next = sg_chain_ptr(&sgl[curr_max_ents - 1]);
+ alloc_size = curr_max_ents;
sg_size = alloc_size - 1;
} else {
sg_size = alloc_size;
next = NULL;
}
- table->orig_nents -= sg_size;
- if (skip_first_chunk)
- skip_first_chunk = false;
+ num_ents -= sg_size;
+ if (nents_first_chunk)
+ nents_first_chunk = 0;
else
free_fn(sgl, alloc_size);
sgl = next;
+ curr_max_ents = max_ents;
}
table->sgl = NULL;
@@ -229,13 +236,27 @@ void __sg_free_table(struct sg_table *table, unsigned int max_ents,
EXPORT_SYMBOL(__sg_free_table);
/**
+ * sg_free_append_table - Free a previously allocated append sg table.
+ * @table: The mapped sg append table header
+ *
+ **/
+void sg_free_append_table(struct sg_append_table *table)
+{
+ __sg_free_table(&table->sgt, SG_MAX_SINGLE_ALLOC, 0, sg_kfree,
+ table->total_nents);
+}
+EXPORT_SYMBOL(sg_free_append_table);
+
+
+/**
* sg_free_table - Free a previously allocated sg table
* @table: The mapped sg table header
*
**/
void sg_free_table(struct sg_table *table)
{
- __sg_free_table(table, SG_MAX_SINGLE_ALLOC, false, sg_kfree);
+ __sg_free_table(table, SG_MAX_SINGLE_ALLOC, 0, sg_kfree,
+ table->orig_nents);
}
EXPORT_SYMBOL(sg_free_table);
@@ -244,6 +265,9 @@ EXPORT_SYMBOL(sg_free_table);
* @table: The sg table header to use
* @nents: Number of entries in sg list
* @max_ents: The maximum number of entries the allocator returns per call
+ * @first_chunk: first SGL if preallocated (may be %NULL)
+ * @nents_first_chunk: Number of entries in the (preallocated) first
+ * scatterlist chunk, 0 means no such preallocated chunk provided by user
* @gfp_mask: GFP allocation mask
* @alloc_fn: Allocator to use
*
@@ -260,10 +284,13 @@ EXPORT_SYMBOL(sg_free_table);
**/
int __sg_alloc_table(struct sg_table *table, unsigned int nents,
unsigned int max_ents, struct scatterlist *first_chunk,
- gfp_t gfp_mask, sg_alloc_fn *alloc_fn)
+ unsigned int nents_first_chunk, gfp_t gfp_mask,
+ sg_alloc_fn *alloc_fn)
{
struct scatterlist *sg, *prv;
unsigned int left;
+ unsigned curr_max_ents = nents_first_chunk ?: max_ents;
+ unsigned prv_max_ents;
memset(table, 0, sizeof(*table));
@@ -279,8 +306,8 @@ int __sg_alloc_table(struct sg_table *table, unsigned int nents,
do {
unsigned int sg_size, alloc_size = left;
- if (alloc_size > max_ents) {
- alloc_size = max_ents;
+ if (alloc_size > curr_max_ents) {
+ alloc_size = curr_max_ents;
sg_size = alloc_size - 1;
} else
sg_size = alloc_size;
@@ -303,7 +330,7 @@ int __sg_alloc_table(struct sg_table *table, unsigned int nents,
if (prv)
table->nents = ++table->orig_nents;
- return -ENOMEM;
+ return -ENOMEM;
}
sg_init_table(sg, alloc_size);
@@ -314,7 +341,7 @@ int __sg_alloc_table(struct sg_table *table, unsigned int nents,
* If this is not the first mapping, chain previous part.
*/
if (prv)
- sg_chain(prv, max_ents, sg);
+ sg_chain(prv, prv_max_ents, sg);
else
table->sgl = sg;
@@ -325,6 +352,8 @@ int __sg_alloc_table(struct sg_table *table, unsigned int nents,
sg_mark_end(&sg[sg_size - 1]);
prv = sg;
+ prv_max_ents = curr_max_ents;
+ curr_max_ents = max_ents;
} while (left);
return 0;
@@ -347,66 +376,141 @@ int sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask)
int ret;
ret = __sg_alloc_table(table, nents, SG_MAX_SINGLE_ALLOC,
- NULL, gfp_mask, sg_kmalloc);
+ NULL, 0, gfp_mask, sg_kmalloc);
if (unlikely(ret))
- __sg_free_table(table, SG_MAX_SINGLE_ALLOC, false, sg_kfree);
-
+ sg_free_table(table);
return ret;
}
EXPORT_SYMBOL(sg_alloc_table);
+static struct scatterlist *get_next_sg(struct sg_append_table *table,
+ struct scatterlist *cur,
+ unsigned long needed_sges,
+ gfp_t gfp_mask)
+{
+ struct scatterlist *new_sg, *next_sg;
+ unsigned int alloc_size;
+
+ if (cur) {
+ next_sg = sg_next(cur);
+ /* Check if last entry should be keeped for chainning */
+ if (!sg_is_last(next_sg) || needed_sges == 1)
+ return next_sg;
+ }
+
+ alloc_size = min_t(unsigned long, needed_sges, SG_MAX_SINGLE_ALLOC);
+ new_sg = sg_kmalloc(alloc_size, gfp_mask);
+ if (!new_sg)
+ return ERR_PTR(-ENOMEM);
+ sg_init_table(new_sg, alloc_size);
+ if (cur) {
+ table->total_nents += alloc_size - 1;
+ __sg_chain(next_sg, new_sg);
+ } else {
+ table->sgt.sgl = new_sg;
+ table->total_nents = alloc_size;
+ }
+ return new_sg;
+}
+
+static bool pages_are_mergeable(struct page *a, struct page *b)
+{
+ if (page_to_pfn(a) != page_to_pfn(b) + 1)
+ return false;
+ if (!zone_device_pages_have_same_pgmap(a, b))
+ return false;
+ return true;
+}
+
/**
- * __sg_alloc_table_from_pages - Allocate and initialize an sg table from
- * an array of pages
- * @sgt: The sg table header to use
- * @pages: Pointer to an array of page pointers
- * @n_pages: Number of pages in the pages array
+ * sg_alloc_append_table_from_pages - Allocate and initialize an append sg
+ * table from an array of pages
+ * @sgt_append: The sg append table to use
+ * @pages: Pointer to an array of page pointers
+ * @n_pages: Number of pages in the pages array
* @offset: Offset from start of the first page to the start of a buffer
* @size: Number of valid bytes in the buffer (after offset)
- * @max_segment: Maximum size of a scatterlist node in bytes (page aligned)
+ * @max_segment: Maximum size of a scatterlist element in bytes
+ * @left_pages: Left pages caller have to set after this call
* @gfp_mask: GFP allocation mask
*
- * Description:
- * Allocate and initialize an sg table from a list of pages. Contiguous
- * ranges of the pages are squashed into a single scatterlist node up to the
- * maximum size specified in @max_segment. An user may provide an offset at a
- * start and a size of valid data in a buffer specified by the page array.
- * The returned sg table is released by sg_free_table.
+ * Description:
+ * In the first call it allocate and initialize an sg table from a list of
+ * pages, else reuse the scatterlist from sgt_append. Contiguous ranges of
+ * the pages are squashed into a single scatterlist entry up to the maximum
+ * size specified in @max_segment. A user may provide an offset at a start
+ * and a size of valid data in a buffer specified by the page array. The
+ * returned sg table is released by sg_free_append_table
*
* Returns:
* 0 on success, negative error on failure
+ *
+ * Notes:
+ * If this function returns non-0 (eg failure), the caller must call
+ * sg_free_append_table() to cleanup any leftover allocations.
+ *
+ * In the fist call, sgt_append must by initialized.
*/
-int __sg_alloc_table_from_pages(struct sg_table *sgt, struct page **pages,
- unsigned int n_pages, unsigned int offset,
- unsigned long size, unsigned int max_segment,
- gfp_t gfp_mask)
+int sg_alloc_append_table_from_pages(struct sg_append_table *sgt_append,
+ struct page **pages, unsigned int n_pages, unsigned int offset,
+ unsigned long size, unsigned int max_segment,
+ unsigned int left_pages, gfp_t gfp_mask)
{
- unsigned int chunks, cur_page, seg_len, i;
- int ret;
- struct scatterlist *s;
+ unsigned int chunks, cur_page, seg_len, i, prv_len = 0;
+ unsigned int added_nents = 0;
+ struct scatterlist *s = sgt_append->prv;
+ struct page *last_pg;
- if (WARN_ON(!max_segment || offset_in_page(max_segment)))
+ /*
+ * The algorithm below requires max_segment to be aligned to PAGE_SIZE
+ * otherwise it can overshoot.
+ */
+ max_segment = ALIGN_DOWN(max_segment, PAGE_SIZE);
+ if (WARN_ON(max_segment < PAGE_SIZE))
return -EINVAL;
+ if (IS_ENABLED(CONFIG_ARCH_NO_SG_CHAIN) && sgt_append->prv)
+ return -EOPNOTSUPP;
+
+ if (sgt_append->prv) {
+ unsigned long next_pfn = (page_to_phys(sg_page(sgt_append->prv)) +
+ sgt_append->prv->offset + sgt_append->prv->length) / PAGE_SIZE;
+
+ if (WARN_ON(offset))
+ return -EINVAL;
+
+ /* Merge contiguous pages into the last SG */
+ prv_len = sgt_append->prv->length;
+ if (page_to_pfn(pages[0]) == next_pfn) {
+ last_pg = pfn_to_page(next_pfn - 1);
+ while (n_pages && pages_are_mergeable(pages[0], last_pg)) {
+ if (sgt_append->prv->length + PAGE_SIZE > max_segment)
+ break;
+ sgt_append->prv->length += PAGE_SIZE;
+ last_pg = pages[0];
+ pages++;
+ n_pages--;
+ }
+ if (!n_pages)
+ goto out;
+ }
+ }
+
/* compute number of contiguous chunks */
chunks = 1;
seg_len = 0;
for (i = 1; i < n_pages; i++) {
seg_len += PAGE_SIZE;
if (seg_len >= max_segment ||
- page_to_pfn(pages[i]) != page_to_pfn(pages[i - 1]) + 1) {
+ !pages_are_mergeable(pages[i], pages[i - 1])) {
chunks++;
seg_len = 0;
}
}
- ret = sg_alloc_table(sgt, chunks, gfp_mask);
- if (unlikely(ret))
- return ret;
-
/* merging chunks and putting them into the scatterlist */
cur_page = 0;
- for_each_sg(sgt->sgl, s, sgt->orig_nents, i) {
+ for (i = 0; i < chunks; i++) {
unsigned int j, chunk_size;
/* look for the end of the current chunk */
@@ -414,51 +518,82 @@ int __sg_alloc_table_from_pages(struct sg_table *sgt, struct page **pages,
for (j = cur_page + 1; j < n_pages; j++) {
seg_len += PAGE_SIZE;
if (seg_len >= max_segment ||
- page_to_pfn(pages[j]) !=
- page_to_pfn(pages[j - 1]) + 1)
+ !pages_are_mergeable(pages[j], pages[j - 1]))
break;
}
+ /* Pass how many chunks might be left */
+ s = get_next_sg(sgt_append, s, chunks - i + left_pages,
+ gfp_mask);
+ if (IS_ERR(s)) {
+ /*
+ * Adjust entry length to be as before function was
+ * called.
+ */
+ if (sgt_append->prv)
+ sgt_append->prv->length = prv_len;
+ return PTR_ERR(s);
+ }
chunk_size = ((j - cur_page) << PAGE_SHIFT) - offset;
sg_set_page(s, pages[cur_page],
min_t(unsigned long, size, chunk_size), offset);
+ added_nents++;
size -= chunk_size;
offset = 0;
cur_page = j;
}
-
+ sgt_append->sgt.nents += added_nents;
+ sgt_append->sgt.orig_nents = sgt_append->sgt.nents;
+ sgt_append->prv = s;
+out:
+ if (!left_pages)
+ sg_mark_end(s);
return 0;
}
-EXPORT_SYMBOL(__sg_alloc_table_from_pages);
+EXPORT_SYMBOL(sg_alloc_append_table_from_pages);
/**
- * sg_alloc_table_from_pages - Allocate and initialize an sg table from
- * an array of pages
+ * sg_alloc_table_from_pages_segment - Allocate and initialize an sg table from
+ * an array of pages and given maximum
+ * segment.
* @sgt: The sg table header to use
* @pages: Pointer to an array of page pointers
* @n_pages: Number of pages in the pages array
* @offset: Offset from start of the first page to the start of a buffer
* @size: Number of valid bytes in the buffer (after offset)
+ * @max_segment: Maximum size of a scatterlist element in bytes
* @gfp_mask: GFP allocation mask
*
* Description:
* Allocate and initialize an sg table from a list of pages. Contiguous
- * ranges of the pages are squashed into a single scatterlist node. A user
- * may provide an offset at a start and a size of valid data in a buffer
- * specified by the page array. The returned sg table is released by
- * sg_free_table.
+ * ranges of the pages are squashed into a single scatterlist node up to the
+ * maximum size specified in @max_segment. A user may provide an offset at a
+ * start and a size of valid data in a buffer specified by the page array.
*
- * Returns:
+ * The returned sg table is released by sg_free_table.
+ *
+ * Returns:
* 0 on success, negative error on failure
*/
-int sg_alloc_table_from_pages(struct sg_table *sgt, struct page **pages,
- unsigned int n_pages, unsigned int offset,
- unsigned long size, gfp_t gfp_mask)
+int sg_alloc_table_from_pages_segment(struct sg_table *sgt, struct page **pages,
+ unsigned int n_pages, unsigned int offset,
+ unsigned long size, unsigned int max_segment,
+ gfp_t gfp_mask)
{
- return __sg_alloc_table_from_pages(sgt, pages, n_pages, offset, size,
- SCATTERLIST_MAX_SEGMENT, gfp_mask);
+ struct sg_append_table append = {};
+ int err;
+
+ err = sg_alloc_append_table_from_pages(&append, pages, n_pages, offset,
+ size, max_segment, 0, gfp_mask);
+ if (err) {
+ sg_free_append_table(&append);
+ return err;
+ }
+ memcpy(sgt, &append.sgt, sizeof(*sgt));
+ WARN_ON(append.total_nents != sgt->orig_nents);
+ return 0;
}
-EXPORT_SYMBOL(sg_alloc_table_from_pages);
+EXPORT_SYMBOL(sg_alloc_table_from_pages_segment);
#ifdef CONFIG_SGL_ALLOC
@@ -494,7 +629,7 @@ struct scatterlist *sgl_alloc_order(unsigned long long length,
nalloc++;
}
sgl = kmalloc_array(nalloc, sizeof(struct scatterlist),
- (gfp & ~GFP_DMA) | __GFP_ZERO);
+ gfp & ~GFP_DMA);
if (!sgl)
return NULL;
@@ -504,7 +639,7 @@ struct scatterlist *sgl_alloc_order(unsigned long long length,
elem_len = min_t(u64, length, PAGE_SIZE << order);
page = alloc_pages(gfp, order);
if (!page) {
- sgl_free(sgl);
+ sgl_free_order(sgl, order);
return NULL;
}
@@ -654,6 +789,7 @@ EXPORT_SYMBOL(__sg_page_iter_dma_next);
* @miter: sg mapping iter to be started
* @sgl: sg list to iterate over
* @nents: number of sg entries
+ * @flags: sg iterator flags
*
* Description:
* Starts mapping iterator @miter.
@@ -706,8 +842,7 @@ static bool sg_miter_get_next_page(struct sg_mapping_iter *miter)
* stops @miter.
*
* Context:
- * Don't care if @miter is stopped, or not proceeded yet.
- * Otherwise, preemption disabled if the SG_MITER_ATOMIC is set.
+ * Don't care.
*
* Returns:
* true if @miter contains the valid mapping. false if end of sg
@@ -743,8 +878,7 @@ EXPORT_SYMBOL(sg_miter_skip);
* @miter->addr and @miter->length point to the current mapping.
*
* Context:
- * Preemption disabled if SG_MITER_ATOMIC. Preemption must stay disabled
- * till @miter is stopped. May sleep if !SG_MITER_ATOMIC.
+ * May sleep if !SG_MITER_ATOMIC.
*
* Returns:
* true if @miter contains the next mapping. false if end of sg
@@ -784,8 +918,7 @@ EXPORT_SYMBOL(sg_miter_next);
* need to be released during iteration.
*
* Context:
- * Preemption disabled if the SG_MITER_ATOMIC is set. Don't care
- * otherwise.
+ * Don't care otherwise.
*/
void sg_miter_stop(struct sg_mapping_iter *miter)
{
@@ -796,12 +929,11 @@ void sg_miter_stop(struct sg_mapping_iter *miter)
miter->__offset += miter->consumed;
miter->__remaining -= miter->consumed;
- if ((miter->__flags & SG_MITER_TO_SG) &&
- !PageSlab(miter->page))
- flush_kernel_dcache_page(miter->page);
+ if (miter->__flags & SG_MITER_TO_SG)
+ flush_dcache_page(miter->page);
if (miter->__flags & SG_MITER_ATOMIC) {
- WARN_ON_ONCE(preemptible());
+ WARN_ON_ONCE(!pagefault_disabled());
kunmap_atomic(miter->addr);
} else
kunmap(miter->page);
@@ -822,7 +954,7 @@ EXPORT_SYMBOL(sg_miter_stop);
* @buflen: The number of bytes to copy
* @skip: Number of bytes to skip before copying
* @to_buffer: transfer direction (true == from an sg list to a
- * buffer, false == from a buffer to an sg list
+ * buffer, false == from a buffer to an sg list)
*
* Returns the number of copied bytes.
*
@@ -842,7 +974,7 @@ size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, void *buf,
sg_miter_start(&miter, sgl, nents, sg_flags);
if (!sg_miter_skip(&miter, skip))
- return false;
+ return 0;
while ((offset < buflen) && sg_miter_next(&miter)) {
unsigned int len;
@@ -967,3 +1099,270 @@ size_t sg_zero_buffer(struct scatterlist *sgl, unsigned int nents,
return offset;
}
EXPORT_SYMBOL(sg_zero_buffer);
+
+/*
+ * Extract and pin a list of up to sg_max pages from UBUF- or IOVEC-class
+ * iterators, and add them to the scatterlist.
+ */
+static ssize_t extract_user_to_sg(struct iov_iter *iter,
+ ssize_t maxsize,
+ struct sg_table *sgtable,
+ unsigned int sg_max,
+ iov_iter_extraction_t extraction_flags)
+{
+ struct scatterlist *sg = sgtable->sgl + sgtable->nents;
+ struct page **pages;
+ unsigned int npages;
+ ssize_t ret = 0, res;
+ size_t len, off;
+
+ /* We decant the page list into the tail of the scatterlist */
+ pages = (void *)sgtable->sgl +
+ array_size(sg_max, sizeof(struct scatterlist));
+ pages -= sg_max;
+
+ do {
+ res = iov_iter_extract_pages(iter, &pages, maxsize, sg_max,
+ extraction_flags, &off);
+ if (res < 0)
+ goto failed;
+
+ len = res;
+ maxsize -= len;
+ ret += len;
+ npages = DIV_ROUND_UP(off + len, PAGE_SIZE);
+ sg_max -= npages;
+
+ for (; npages > 0; npages--) {
+ struct page *page = *pages;
+ size_t seg = min_t(size_t, PAGE_SIZE - off, len);
+
+ *pages++ = NULL;
+ sg_set_page(sg, page, seg, off);
+ sgtable->nents++;
+ sg++;
+ len -= seg;
+ off = 0;
+ }
+ } while (maxsize > 0 && sg_max > 0);
+
+ return ret;
+
+failed:
+ while (sgtable->nents > sgtable->orig_nents)
+ unpin_user_page(sg_page(&sgtable->sgl[--sgtable->nents]));
+ return res;
+}
+
+/*
+ * Extract up to sg_max pages from a BVEC-type iterator and add them to the
+ * scatterlist. The pages are not pinned.
+ */
+static ssize_t extract_bvec_to_sg(struct iov_iter *iter,
+ ssize_t maxsize,
+ struct sg_table *sgtable,
+ unsigned int sg_max,
+ iov_iter_extraction_t extraction_flags)
+{
+ const struct bio_vec *bv = iter->bvec;
+ struct scatterlist *sg = sgtable->sgl + sgtable->nents;
+ unsigned long start = iter->iov_offset;
+ unsigned int i;
+ ssize_t ret = 0;
+
+ for (i = 0; i < iter->nr_segs; i++) {
+ size_t off, len;
+
+ len = bv[i].bv_len;
+ if (start >= len) {
+ start -= len;
+ continue;
+ }
+
+ len = min_t(size_t, maxsize, len - start);
+ off = bv[i].bv_offset + start;
+
+ sg_set_page(sg, bv[i].bv_page, len, off);
+ sgtable->nents++;
+ sg++;
+ sg_max--;
+
+ ret += len;
+ maxsize -= len;
+ if (maxsize <= 0 || sg_max == 0)
+ break;
+ start = 0;
+ }
+
+ if (ret > 0)
+ iov_iter_advance(iter, ret);
+ return ret;
+}
+
+/*
+ * Extract up to sg_max pages from a KVEC-type iterator and add them to the
+ * scatterlist. This can deal with vmalloc'd buffers as well as kmalloc'd or
+ * static buffers. The pages are not pinned.
+ */
+static ssize_t extract_kvec_to_sg(struct iov_iter *iter,
+ ssize_t maxsize,
+ struct sg_table *sgtable,
+ unsigned int sg_max,
+ iov_iter_extraction_t extraction_flags)
+{
+ const struct kvec *kv = iter->kvec;
+ struct scatterlist *sg = sgtable->sgl + sgtable->nents;
+ unsigned long start = iter->iov_offset;
+ unsigned int i;
+ ssize_t ret = 0;
+
+ for (i = 0; i < iter->nr_segs; i++) {
+ struct page *page;
+ unsigned long kaddr;
+ size_t off, len, seg;
+
+ len = kv[i].iov_len;
+ if (start >= len) {
+ start -= len;
+ continue;
+ }
+
+ kaddr = (unsigned long)kv[i].iov_base + start;
+ off = kaddr & ~PAGE_MASK;
+ len = min_t(size_t, maxsize, len - start);
+ kaddr &= PAGE_MASK;
+
+ maxsize -= len;
+ ret += len;
+ do {
+ seg = min_t(size_t, len, PAGE_SIZE - off);
+ if (is_vmalloc_or_module_addr((void *)kaddr))
+ page = vmalloc_to_page((void *)kaddr);
+ else
+ page = virt_to_page((void *)kaddr);
+
+ sg_set_page(sg, page, len, off);
+ sgtable->nents++;
+ sg++;
+ sg_max--;
+
+ len -= seg;
+ kaddr += PAGE_SIZE;
+ off = 0;
+ } while (len > 0 && sg_max > 0);
+
+ if (maxsize <= 0 || sg_max == 0)
+ break;
+ start = 0;
+ }
+
+ if (ret > 0)
+ iov_iter_advance(iter, ret);
+ return ret;
+}
+
+/*
+ * Extract up to sg_max folios from an XARRAY-type iterator and add them to
+ * the scatterlist. The pages are not pinned.
+ */
+static ssize_t extract_xarray_to_sg(struct iov_iter *iter,
+ ssize_t maxsize,
+ struct sg_table *sgtable,
+ unsigned int sg_max,
+ iov_iter_extraction_t extraction_flags)
+{
+ struct scatterlist *sg = sgtable->sgl + sgtable->nents;
+ struct xarray *xa = iter->xarray;
+ struct folio *folio;
+ loff_t start = iter->xarray_start + iter->iov_offset;
+ pgoff_t index = start / PAGE_SIZE;
+ ssize_t ret = 0;
+ size_t offset, len;
+ XA_STATE(xas, xa, index);
+
+ rcu_read_lock();
+
+ xas_for_each(&xas, folio, ULONG_MAX) {
+ if (xas_retry(&xas, folio))
+ continue;
+ if (WARN_ON(xa_is_value(folio)))
+ break;
+ if (WARN_ON(folio_test_hugetlb(folio)))
+ break;
+
+ offset = offset_in_folio(folio, start);
+ len = min_t(size_t, maxsize, folio_size(folio) - offset);
+
+ sg_set_page(sg, folio_page(folio, 0), len, offset);
+ sgtable->nents++;
+ sg++;
+ sg_max--;
+
+ maxsize -= len;
+ ret += len;
+ if (maxsize <= 0 || sg_max == 0)
+ break;
+ }
+
+ rcu_read_unlock();
+ if (ret > 0)
+ iov_iter_advance(iter, ret);
+ return ret;
+}
+
+/**
+ * extract_iter_to_sg - Extract pages from an iterator and add to an sglist
+ * @iter: The iterator to extract from
+ * @maxsize: The amount of iterator to copy
+ * @sgtable: The scatterlist table to fill in
+ * @sg_max: Maximum number of elements in @sgtable that may be filled
+ * @extraction_flags: Flags to qualify the request
+ *
+ * Extract the page fragments from the given amount of the source iterator and
+ * add them to a scatterlist that refers to all of those bits, to a maximum
+ * addition of @sg_max elements.
+ *
+ * The pages referred to by UBUF- and IOVEC-type iterators are extracted and
+ * pinned; BVEC-, KVEC- and XARRAY-type are extracted but aren't pinned; PIPE-
+ * and DISCARD-type are not supported.
+ *
+ * No end mark is placed on the scatterlist; that's left to the caller.
+ *
+ * @extraction_flags can have ITER_ALLOW_P2PDMA set to request peer-to-peer DMA
+ * be allowed on the pages extracted.
+ *
+ * If successful, @sgtable->nents is updated to include the number of elements
+ * added and the number of bytes added is returned. @sgtable->orig_nents is
+ * left unaltered.
+ *
+ * The iov_iter_extract_mode() function should be used to query how cleanup
+ * should be performed.
+ */
+ssize_t extract_iter_to_sg(struct iov_iter *iter, size_t maxsize,
+ struct sg_table *sgtable, unsigned int sg_max,
+ iov_iter_extraction_t extraction_flags)
+{
+ if (maxsize == 0)
+ return 0;
+
+ switch (iov_iter_type(iter)) {
+ case ITER_UBUF:
+ case ITER_IOVEC:
+ return extract_user_to_sg(iter, maxsize, sgtable, sg_max,
+ extraction_flags);
+ case ITER_BVEC:
+ return extract_bvec_to_sg(iter, maxsize, sgtable, sg_max,
+ extraction_flags);
+ case ITER_KVEC:
+ return extract_kvec_to_sg(iter, maxsize, sgtable, sg_max,
+ extraction_flags);
+ case ITER_XARRAY:
+ return extract_xarray_to_sg(iter, maxsize, sgtable, sg_max,
+ extraction_flags);
+ default:
+ pr_err("%s(%u) unsupported\n", __func__, iov_iter_type(iter));
+ WARN_ON_ONCE(1);
+ return -EIO;
+ }
+}
+EXPORT_SYMBOL_GPL(extract_iter_to_sg);
diff --git a/lib/seq_buf.c b/lib/seq_buf.c
index bd807f545a9d..f3f3436d60a9 100644
--- a/lib/seq_buf.c
+++ b/lib/seq_buf.c
@@ -13,16 +13,26 @@
* seq_buf_init() more than once to reset the seq_buf to start
* from scratch.
*/
-#include <linux/uaccess.h>
-#include <linux/seq_file.h>
+
+#include <linux/bug.h>
+#include <linux/err.h>
+#include <linux/export.h>
+#include <linux/hex.h>
+#include <linux/minmax.h>
+#include <linux/printk.h>
#include <linux/seq_buf.h>
+#include <linux/seq_file.h>
+#include <linux/sprintf.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
/**
* seq_buf_can_fit - can the new data fit in the current buffer?
* @s: the seq_buf descriptor
* @len: The length to see if it can fit in the current buffer
*
- * Returns true if there's enough unused space in the seq_buf buffer
+ * Returns: true if there's enough unused space in the seq_buf buffer
* to fit the amount of new data according to @len.
*/
static bool seq_buf_can_fit(struct seq_buf *s, size_t len)
@@ -35,7 +45,7 @@ static bool seq_buf_can_fit(struct seq_buf *s, size_t len)
* @m: the seq_file descriptor that is the destination
* @s: the seq_buf descriptor that is the source.
*
- * Returns zero on success, non zero otherwise
+ * Returns: zero on success, non-zero otherwise.
*/
int seq_buf_print_seq(struct seq_file *m, struct seq_buf *s)
{
@@ -50,9 +60,9 @@ int seq_buf_print_seq(struct seq_file *m, struct seq_buf *s)
* @fmt: printf format string
* @args: va_list of arguments from a printf() type function
*
- * Writes a vnprintf() format into the sequencce buffer.
+ * Writes a vnprintf() format into the sequence buffer.
*
- * Returns zero on success, -1 on overflow.
+ * Returns: zero on success, -1 on overflow.
*/
int seq_buf_vprintf(struct seq_buf *s, const char *fmt, va_list args)
{
@@ -78,7 +88,7 @@ int seq_buf_vprintf(struct seq_buf *s, const char *fmt, va_list args)
*
* Writes a printf() format into the sequence buffer.
*
- * Returns zero on success, -1 on overflow.
+ * Returns: zero on success, -1 on overflow.
*/
int seq_buf_printf(struct seq_buf *s, const char *fmt, ...)
{
@@ -91,6 +101,37 @@ int seq_buf_printf(struct seq_buf *s, const char *fmt, ...)
return ret;
}
+EXPORT_SYMBOL_GPL(seq_buf_printf);
+
+/**
+ * seq_buf_do_printk - printk() seq_buf line by line
+ * @s: seq_buf descriptor
+ * @lvl: printk level
+ *
+ * printk()-s a multi-line sequential buffer line by line. The function
+ * makes sure that the buffer in @s is NUL-terminated and safe to read
+ * as a string.
+ */
+void seq_buf_do_printk(struct seq_buf *s, const char *lvl)
+{
+ const char *start, *lf;
+
+ if (s->size == 0 || s->len == 0)
+ return;
+
+ start = seq_buf_str(s);
+ while ((lf = strchr(start, '\n'))) {
+ int len = lf - start + 1;
+
+ printk("%s%.*s", lvl, len, start);
+ start = ++lf;
+ }
+
+ /* No trailing LF */
+ if (start < s->buffer + s->len)
+ printk("%s%s\n", lvl, start);
+}
+EXPORT_SYMBOL_GPL(seq_buf_do_printk);
#ifdef CONFIG_BINARY_PRINTF
/**
@@ -108,7 +149,7 @@ int seq_buf_printf(struct seq_buf *s, const char *fmt, ...)
* This function will take the format and the binary array and finish
* the conversion into the ASCII string within the buffer.
*
- * Returns zero on success, -1 on overflow.
+ * Returns: zero on success, -1 on overflow.
*/
int seq_buf_bprintf(struct seq_buf *s, const char *fmt, const u32 *binary)
{
@@ -136,7 +177,7 @@ int seq_buf_bprintf(struct seq_buf *s, const char *fmt, const u32 *binary)
*
* Copy a simple string into the sequence buffer.
*
- * Returns zero on success, -1 on overflow
+ * Returns: zero on success, -1 on overflow.
*/
int seq_buf_puts(struct seq_buf *s, const char *str)
{
@@ -156,6 +197,7 @@ int seq_buf_puts(struct seq_buf *s, const char *str)
seq_buf_set_overflow(s);
return -1;
}
+EXPORT_SYMBOL_GPL(seq_buf_puts);
/**
* seq_buf_putc - sequence printing of simple character
@@ -164,7 +206,7 @@ int seq_buf_puts(struct seq_buf *s, const char *str)
*
* Copy a single character into the sequence buffer.
*
- * Returns zero on success, -1 on overflow
+ * Returns: zero on success, -1 on overflow.
*/
int seq_buf_putc(struct seq_buf *s, unsigned char c)
{
@@ -177,9 +219,10 @@ int seq_buf_putc(struct seq_buf *s, unsigned char c)
seq_buf_set_overflow(s);
return -1;
}
+EXPORT_SYMBOL_GPL(seq_buf_putc);
/**
- * seq_buf_putmem - write raw data into the sequenc buffer
+ * seq_buf_putmem - write raw data into the sequence buffer
* @s: seq_buf descriptor
* @mem: The raw memory to copy into the buffer
* @len: The length of the raw memory to copy (in bytes)
@@ -188,7 +231,7 @@ int seq_buf_putc(struct seq_buf *s, unsigned char c)
* buffer and a strcpy() would not work. Using this function allows
* for such cases.
*
- * Returns zero on success, -1 on overflow
+ * Returns: zero on success, -1 on overflow.
*/
int seq_buf_putmem(struct seq_buf *s, const void *mem, unsigned int len)
{
@@ -216,7 +259,7 @@ int seq_buf_putmem(struct seq_buf *s, const void *mem, unsigned int len)
* raw memory into the buffer it writes its ASCII representation of it
* in hex characters.
*
- * Returns zero on success, -1 on overflow
+ * Returns: zero on success, -1 on overflow.
*/
int seq_buf_putmem_hex(struct seq_buf *s, const void *mem,
unsigned int len)
@@ -228,8 +271,10 @@ int seq_buf_putmem_hex(struct seq_buf *s, const void *mem,
WARN_ON(s->size == 0);
+ BUILD_BUG_ON(MAX_MEMHEX_BYTES * 2 >= HEX_CHARS);
+
while (len) {
- start_len = min(len, HEX_CHARS - 1);
+ start_len = min(len, MAX_MEMHEX_BYTES);
#ifdef __BIG_ENDIAN
for (i = 0, j = 0; i < start_len; i++) {
#else
@@ -242,12 +287,14 @@ int seq_buf_putmem_hex(struct seq_buf *s, const void *mem,
break;
/* j increments twice per loop */
- len -= j / 2;
hex[j++] = ' ';
seq_buf_putmem(s, hex, j);
if (seq_buf_has_overflowed(s))
return -1;
+
+ len -= start_len;
+ data += start_len;
}
return 0;
}
@@ -260,7 +307,7 @@ int seq_buf_putmem_hex(struct seq_buf *s, const void *mem,
*
* Write a path name into the sequence buffer.
*
- * Returns the number of written bytes on success, -1 on overflow
+ * Returns: the number of written bytes on success, -1 on overflow.
*/
int seq_buf_path(struct seq_buf *s, const struct path *path, const char *esc)
{
@@ -284,26 +331,28 @@ int seq_buf_path(struct seq_buf *s, const struct path *path, const char *esc)
}
/**
- * seq_buf_to_user - copy the squence buffer to user space
+ * seq_buf_to_user - copy the sequence buffer to user space
* @s: seq_buf descriptor
* @ubuf: The userspace memory location to copy to
+ * @start: The first byte in the buffer to copy
* @cnt: The amount to copy
*
* Copies the sequence buffer into the userspace memory pointed to
- * by @ubuf. It starts from the last read position (@s->readpos)
- * and writes up to @cnt characters or till it reaches the end of
- * the content in the buffer (@s->len), which ever comes first.
+ * by @ubuf. It starts from @start and writes up to @cnt characters
+ * or until it reaches the end of the content in the buffer (@s->len),
+ * whichever comes first.
*
+ * Returns:
* On success, it returns a positive number of the number of bytes
* it copied.
*
* On failure it returns -EBUSY if all of the content in the
* sequence has been already read, which includes nothing in the
- * sequence (@s->len == @s->readpos).
+ * sequence (@s->len == @start).
*
* Returns -EFAULT if the copy to userspace fails.
*/
-int seq_buf_to_user(struct seq_buf *s, char __user *ubuf, int cnt)
+int seq_buf_to_user(struct seq_buf *s, char __user *ubuf, size_t start, int cnt)
{
int len;
int ret;
@@ -313,18 +362,77 @@ int seq_buf_to_user(struct seq_buf *s, char __user *ubuf, int cnt)
len = seq_buf_used(s);
- if (len <= s->readpos)
+ if (len <= start)
return -EBUSY;
- len -= s->readpos;
+ len -= start;
if (cnt > len)
cnt = len;
- ret = copy_to_user(ubuf, s->buffer + s->readpos, cnt);
+ ret = copy_to_user(ubuf, s->buffer + start, cnt);
if (ret == cnt)
return -EFAULT;
- cnt -= ret;
+ return cnt - ret;
+}
+
+/**
+ * seq_buf_hex_dump - print formatted hex dump into the sequence buffer
+ * @s: seq_buf descriptor
+ * @prefix_str: string to prefix each line with;
+ * caller supplies trailing spaces for alignment if desired
+ * @prefix_type: controls whether prefix of an offset, address, or none
+ * is printed (%DUMP_PREFIX_OFFSET, %DUMP_PREFIX_ADDRESS, %DUMP_PREFIX_NONE)
+ * @rowsize: number of bytes to print per line; must be 16 or 32
+ * @groupsize: number of bytes to print at a time (1, 2, 4, 8; default = 1)
+ * @buf: data blob to dump
+ * @len: number of bytes in the @buf
+ * @ascii: include ASCII after the hex output
+ *
+ * Function is an analogue of print_hex_dump() and thus has similar interface.
+ *
+ * linebuf size is maximal length for one line.
+ * 32 * 3 - maximum bytes per line, each printed into 2 chars + 1 for
+ * separating space
+ * 2 - spaces separating hex dump and ASCII representation
+ * 32 - ASCII representation
+ * 1 - terminating '\0'
+ *
+ * Returns: zero on success, -1 on overflow.
+ */
+int seq_buf_hex_dump(struct seq_buf *s, const char *prefix_str, int prefix_type,
+ int rowsize, int groupsize,
+ const void *buf, size_t len, bool ascii)
+{
+ const u8 *ptr = buf;
+ int i, linelen, remaining = len;
+ unsigned char linebuf[32 * 3 + 2 + 32 + 1];
+ int ret;
- s->readpos += cnt;
- return cnt;
+ if (rowsize != 16 && rowsize != 32)
+ rowsize = 16;
+
+ for (i = 0; i < len; i += rowsize) {
+ linelen = min(remaining, rowsize);
+ remaining -= rowsize;
+
+ hex_dump_to_buffer(ptr + i, linelen, rowsize, groupsize,
+ linebuf, sizeof(linebuf), ascii);
+
+ switch (prefix_type) {
+ case DUMP_PREFIX_ADDRESS:
+ ret = seq_buf_printf(s, "%s%p: %s\n",
+ prefix_str, ptr + i, linebuf);
+ break;
+ case DUMP_PREFIX_OFFSET:
+ ret = seq_buf_printf(s, "%s%.8x: %s\n",
+ prefix_str, i, linebuf);
+ break;
+ default:
+ ret = seq_buf_printf(s, "%s%s\n", prefix_str, linebuf);
+ break;
+ }
+ if (ret)
+ return ret;
+ }
+ return 0;
}
diff --git a/lib/sg_pool.c b/lib/sg_pool.c
index cff20df2695e..9bfe60ca3f37 100644
--- a/lib/sg_pool.c
+++ b/lib/sg_pool.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/scatterlist.h>
#include <linux/mempool.h>
#include <linux/slab.h>
@@ -70,18 +70,28 @@ static struct scatterlist *sg_pool_alloc(unsigned int nents, gfp_t gfp_mask)
/**
* sg_free_table_chained - Free a previously mapped sg table
* @table: The sg table header to use
- * @first_chunk: was first_chunk not NULL in sg_alloc_table_chained?
+ * @nents_first_chunk: size of the first_chunk SGL passed to
+ * sg_alloc_table_chained
*
* Description:
* Free an sg table previously allocated and setup with
* sg_alloc_table_chained().
*
+ * @nents_first_chunk has to be same with that same parameter passed
+ * to sg_alloc_table_chained().
+ *
**/
-void sg_free_table_chained(struct sg_table *table, bool first_chunk)
+void sg_free_table_chained(struct sg_table *table,
+ unsigned nents_first_chunk)
{
- if (first_chunk && table->orig_nents <= SG_CHUNK_SIZE)
+ if (table->orig_nents <= nents_first_chunk)
return;
- __sg_free_table(table, SG_CHUNK_SIZE, first_chunk, sg_pool_free);
+
+ if (nents_first_chunk == 1)
+ nents_first_chunk = 0;
+
+ __sg_free_table(table, SG_CHUNK_SIZE, nents_first_chunk, sg_pool_free,
+ table->orig_nents);
}
EXPORT_SYMBOL_GPL(sg_free_table_chained);
@@ -90,31 +100,41 @@ EXPORT_SYMBOL_GPL(sg_free_table_chained);
* @table: The sg table header to use
* @nents: Number of entries in sg list
* @first_chunk: first SGL
+ * @nents_first_chunk: number of the SGL of @first_chunk
*
* Description:
* Allocate and chain SGLs in an sg table. If @nents@ is larger than
- * SG_CHUNK_SIZE a chained sg table will be setup.
+ * @nents_first_chunk a chained sg table will be setup. @first_chunk is
+ * ignored if nents_first_chunk <= 1 because user expects the SGL points
+ * non-chain SGL.
*
**/
int sg_alloc_table_chained(struct sg_table *table, int nents,
- struct scatterlist *first_chunk)
+ struct scatterlist *first_chunk, unsigned nents_first_chunk)
{
int ret;
BUG_ON(!nents);
- if (first_chunk) {
- if (nents <= SG_CHUNK_SIZE) {
+ if (first_chunk && nents_first_chunk) {
+ if (nents <= nents_first_chunk) {
table->nents = table->orig_nents = nents;
sg_init_table(table->sgl, nents);
return 0;
}
}
+ /* User supposes that the 1st SGL includes real entry */
+ if (nents_first_chunk <= 1) {
+ first_chunk = NULL;
+ nents_first_chunk = 0;
+ }
+
ret = __sg_alloc_table(table, nents, SG_CHUNK_SIZE,
- first_chunk, GFP_ATOMIC, sg_pool_alloc);
+ first_chunk, nents_first_chunk,
+ GFP_ATOMIC, sg_pool_alloc);
if (unlikely(ret))
- sg_free_table_chained(table, (bool)first_chunk);
+ sg_free_table_chained(table, nents_first_chunk);
return ret;
}
EXPORT_SYMBOL_GPL(sg_alloc_table_chained);
@@ -157,16 +177,4 @@ cleanup_sdb:
return -ENOMEM;
}
-static __exit void sg_pool_exit(void)
-{
- int i;
-
- for (i = 0; i < SG_MEMPOOL_NR; i++) {
- struct sg_pool *sgp = sg_pools + i;
- mempool_destroy(sgp->pool);
- kmem_cache_destroy(sgp->slab);
- }
-}
-
-module_init(sg_pool_init);
-module_exit(sg_pool_exit);
+subsys_initcall(sg_pool_init);
diff --git a/lib/sg_split.c b/lib/sg_split.c
index 9982c63d1063..60a0babebf2e 100644
--- a/lib/sg_split.c
+++ b/lib/sg_split.c
@@ -176,11 +176,13 @@ int sg_split(struct scatterlist *in, const int in_mapped_nents,
* The order of these 3 calls is important and should be kept.
*/
sg_split_phys(splitters, nb_splits);
- ret = sg_calculate_split(in, in_mapped_nents, nb_splits, skip,
- split_sizes, splitters, true);
- if (ret < 0)
- goto err;
- sg_split_mapped(splitters, nb_splits);
+ if (in_mapped_nents) {
+ ret = sg_calculate_split(in, in_mapped_nents, nb_splits, skip,
+ split_sizes, splitters, true);
+ if (ret < 0)
+ goto err;
+ sg_split_mapped(splitters, nb_splits);
+ }
for (i = 0; i < nb_splits; i++) {
out[i] = splitters[i].out_sg;
diff --git a/lib/sha256.c b/lib/sha256.c
deleted file mode 100644
index d9af148d4349..000000000000
--- a/lib/sha256.c
+++ /dev/null
@@ -1,279 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * SHA-256, as specified in
- * http://csrc.nist.gov/groups/STM/cavp/documents/shs/sha256-384-512.pdf
- *
- * SHA-256 code by Jean-Luc Cooke <jlcooke@certainkey.com>.
- *
- * Copyright (c) Jean-Luc Cooke <jlcooke@certainkey.com>
- * Copyright (c) Andrew McDonald <andrew@mcdonald.org.uk>
- * Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
- * Copyright (c) 2014 Red Hat Inc.
- */
-
-#include <linux/bitops.h>
-#include <linux/sha256.h>
-#include <linux/string.h>
-#include <asm/byteorder.h>
-
-static inline u32 Ch(u32 x, u32 y, u32 z)
-{
- return z ^ (x & (y ^ z));
-}
-
-static inline u32 Maj(u32 x, u32 y, u32 z)
-{
- return (x & y) | (z & (x | y));
-}
-
-#define e0(x) (ror32(x, 2) ^ ror32(x, 13) ^ ror32(x, 22))
-#define e1(x) (ror32(x, 6) ^ ror32(x, 11) ^ ror32(x, 25))
-#define s0(x) (ror32(x, 7) ^ ror32(x, 18) ^ (x >> 3))
-#define s1(x) (ror32(x, 17) ^ ror32(x, 19) ^ (x >> 10))
-
-static inline void LOAD_OP(int I, u32 *W, const u8 *input)
-{
- W[I] = __be32_to_cpu(((__be32 *)(input))[I]);
-}
-
-static inline void BLEND_OP(int I, u32 *W)
-{
- W[I] = s1(W[I-2]) + W[I-7] + s0(W[I-15]) + W[I-16];
-}
-
-static void sha256_transform(u32 *state, const u8 *input)
-{
- u32 a, b, c, d, e, f, g, h, t1, t2;
- u32 W[64];
- int i;
-
- /* load the input */
- for (i = 0; i < 16; i++)
- LOAD_OP(i, W, input);
-
- /* now blend */
- for (i = 16; i < 64; i++)
- BLEND_OP(i, W);
-
- /* load the state into our registers */
- a = state[0]; b = state[1]; c = state[2]; d = state[3];
- e = state[4]; f = state[5]; g = state[6]; h = state[7];
-
- /* now iterate */
- t1 = h + e1(e) + Ch(e, f, g) + 0x428a2f98 + W[0];
- t2 = e0(a) + Maj(a, b, c); d += t1; h = t1 + t2;
- t1 = g + e1(d) + Ch(d, e, f) + 0x71374491 + W[1];
- t2 = e0(h) + Maj(h, a, b); c += t1; g = t1 + t2;
- t1 = f + e1(c) + Ch(c, d, e) + 0xb5c0fbcf + W[2];
- t2 = e0(g) + Maj(g, h, a); b += t1; f = t1 + t2;
- t1 = e + e1(b) + Ch(b, c, d) + 0xe9b5dba5 + W[3];
- t2 = e0(f) + Maj(f, g, h); a += t1; e = t1 + t2;
- t1 = d + e1(a) + Ch(a, b, c) + 0x3956c25b + W[4];
- t2 = e0(e) + Maj(e, f, g); h += t1; d = t1 + t2;
- t1 = c + e1(h) + Ch(h, a, b) + 0x59f111f1 + W[5];
- t2 = e0(d) + Maj(d, e, f); g += t1; c = t1 + t2;
- t1 = b + e1(g) + Ch(g, h, a) + 0x923f82a4 + W[6];
- t2 = e0(c) + Maj(c, d, e); f += t1; b = t1 + t2;
- t1 = a + e1(f) + Ch(f, g, h) + 0xab1c5ed5 + W[7];
- t2 = e0(b) + Maj(b, c, d); e += t1; a = t1 + t2;
-
- t1 = h + e1(e) + Ch(e, f, g) + 0xd807aa98 + W[8];
- t2 = e0(a) + Maj(a, b, c); d += t1; h = t1 + t2;
- t1 = g + e1(d) + Ch(d, e, f) + 0x12835b01 + W[9];
- t2 = e0(h) + Maj(h, a, b); c += t1; g = t1 + t2;
- t1 = f + e1(c) + Ch(c, d, e) + 0x243185be + W[10];
- t2 = e0(g) + Maj(g, h, a); b += t1; f = t1 + t2;
- t1 = e + e1(b) + Ch(b, c, d) + 0x550c7dc3 + W[11];
- t2 = e0(f) + Maj(f, g, h); a += t1; e = t1 + t2;
- t1 = d + e1(a) + Ch(a, b, c) + 0x72be5d74 + W[12];
- t2 = e0(e) + Maj(e, f, g); h += t1; d = t1 + t2;
- t1 = c + e1(h) + Ch(h, a, b) + 0x80deb1fe + W[13];
- t2 = e0(d) + Maj(d, e, f); g += t1; c = t1 + t2;
- t1 = b + e1(g) + Ch(g, h, a) + 0x9bdc06a7 + W[14];
- t2 = e0(c) + Maj(c, d, e); f += t1; b = t1 + t2;
- t1 = a + e1(f) + Ch(f, g, h) + 0xc19bf174 + W[15];
- t2 = e0(b) + Maj(b, c, d); e += t1; a = t1+t2;
-
- t1 = h + e1(e) + Ch(e, f, g) + 0xe49b69c1 + W[16];
- t2 = e0(a) + Maj(a, b, c); d += t1; h = t1+t2;
- t1 = g + e1(d) + Ch(d, e, f) + 0xefbe4786 + W[17];
- t2 = e0(h) + Maj(h, a, b); c += t1; g = t1+t2;
- t1 = f + e1(c) + Ch(c, d, e) + 0x0fc19dc6 + W[18];
- t2 = e0(g) + Maj(g, h, a); b += t1; f = t1+t2;
- t1 = e + e1(b) + Ch(b, c, d) + 0x240ca1cc + W[19];
- t2 = e0(f) + Maj(f, g, h); a += t1; e = t1+t2;
- t1 = d + e1(a) + Ch(a, b, c) + 0x2de92c6f + W[20];
- t2 = e0(e) + Maj(e, f, g); h += t1; d = t1+t2;
- t1 = c + e1(h) + Ch(h, a, b) + 0x4a7484aa + W[21];
- t2 = e0(d) + Maj(d, e, f); g += t1; c = t1+t2;
- t1 = b + e1(g) + Ch(g, h, a) + 0x5cb0a9dc + W[22];
- t2 = e0(c) + Maj(c, d, e); f += t1; b = t1+t2;
- t1 = a + e1(f) + Ch(f, g, h) + 0x76f988da + W[23];
- t2 = e0(b) + Maj(b, c, d); e += t1; a = t1+t2;
-
- t1 = h + e1(e) + Ch(e, f, g) + 0x983e5152 + W[24];
- t2 = e0(a) + Maj(a, b, c); d += t1; h = t1+t2;
- t1 = g + e1(d) + Ch(d, e, f) + 0xa831c66d + W[25];
- t2 = e0(h) + Maj(h, a, b); c += t1; g = t1+t2;
- t1 = f + e1(c) + Ch(c, d, e) + 0xb00327c8 + W[26];
- t2 = e0(g) + Maj(g, h, a); b += t1; f = t1+t2;
- t1 = e + e1(b) + Ch(b, c, d) + 0xbf597fc7 + W[27];
- t2 = e0(f) + Maj(f, g, h); a += t1; e = t1+t2;
- t1 = d + e1(a) + Ch(a, b, c) + 0xc6e00bf3 + W[28];
- t2 = e0(e) + Maj(e, f, g); h += t1; d = t1+t2;
- t1 = c + e1(h) + Ch(h, a, b) + 0xd5a79147 + W[29];
- t2 = e0(d) + Maj(d, e, f); g += t1; c = t1+t2;
- t1 = b + e1(g) + Ch(g, h, a) + 0x06ca6351 + W[30];
- t2 = e0(c) + Maj(c, d, e); f += t1; b = t1+t2;
- t1 = a + e1(f) + Ch(f, g, h) + 0x14292967 + W[31];
- t2 = e0(b) + Maj(b, c, d); e += t1; a = t1+t2;
-
- t1 = h + e1(e) + Ch(e, f, g) + 0x27b70a85 + W[32];
- t2 = e0(a) + Maj(a, b, c); d += t1; h = t1+t2;
- t1 = g + e1(d) + Ch(d, e, f) + 0x2e1b2138 + W[33];
- t2 = e0(h) + Maj(h, a, b); c += t1; g = t1+t2;
- t1 = f + e1(c) + Ch(c, d, e) + 0x4d2c6dfc + W[34];
- t2 = e0(g) + Maj(g, h, a); b += t1; f = t1+t2;
- t1 = e + e1(b) + Ch(b, c, d) + 0x53380d13 + W[35];
- t2 = e0(f) + Maj(f, g, h); a += t1; e = t1+t2;
- t1 = d + e1(a) + Ch(a, b, c) + 0x650a7354 + W[36];
- t2 = e0(e) + Maj(e, f, g); h += t1; d = t1+t2;
- t1 = c + e1(h) + Ch(h, a, b) + 0x766a0abb + W[37];
- t2 = e0(d) + Maj(d, e, f); g += t1; c = t1+t2;
- t1 = b + e1(g) + Ch(g, h, a) + 0x81c2c92e + W[38];
- t2 = e0(c) + Maj(c, d, e); f += t1; b = t1+t2;
- t1 = a + e1(f) + Ch(f, g, h) + 0x92722c85 + W[39];
- t2 = e0(b) + Maj(b, c, d); e += t1; a = t1+t2;
-
- t1 = h + e1(e) + Ch(e, f, g) + 0xa2bfe8a1 + W[40];
- t2 = e0(a) + Maj(a, b, c); d += t1; h = t1+t2;
- t1 = g + e1(d) + Ch(d, e, f) + 0xa81a664b + W[41];
- t2 = e0(h) + Maj(h, a, b); c += t1; g = t1+t2;
- t1 = f + e1(c) + Ch(c, d, e) + 0xc24b8b70 + W[42];
- t2 = e0(g) + Maj(g, h, a); b += t1; f = t1+t2;
- t1 = e + e1(b) + Ch(b, c, d) + 0xc76c51a3 + W[43];
- t2 = e0(f) + Maj(f, g, h); a += t1; e = t1+t2;
- t1 = d + e1(a) + Ch(a, b, c) + 0xd192e819 + W[44];
- t2 = e0(e) + Maj(e, f, g); h += t1; d = t1+t2;
- t1 = c + e1(h) + Ch(h, a, b) + 0xd6990624 + W[45];
- t2 = e0(d) + Maj(d, e, f); g += t1; c = t1+t2;
- t1 = b + e1(g) + Ch(g, h, a) + 0xf40e3585 + W[46];
- t2 = e0(c) + Maj(c, d, e); f += t1; b = t1+t2;
- t1 = a + e1(f) + Ch(f, g, h) + 0x106aa070 + W[47];
- t2 = e0(b) + Maj(b, c, d); e += t1; a = t1+t2;
-
- t1 = h + e1(e) + Ch(e, f, g) + 0x19a4c116 + W[48];
- t2 = e0(a) + Maj(a, b, c); d += t1; h = t1+t2;
- t1 = g + e1(d) + Ch(d, e, f) + 0x1e376c08 + W[49];
- t2 = e0(h) + Maj(h, a, b); c += t1; g = t1+t2;
- t1 = f + e1(c) + Ch(c, d, e) + 0x2748774c + W[50];
- t2 = e0(g) + Maj(g, h, a); b += t1; f = t1+t2;
- t1 = e + e1(b) + Ch(b, c, d) + 0x34b0bcb5 + W[51];
- t2 = e0(f) + Maj(f, g, h); a += t1; e = t1+t2;
- t1 = d + e1(a) + Ch(a, b, c) + 0x391c0cb3 + W[52];
- t2 = e0(e) + Maj(e, f, g); h += t1; d = t1+t2;
- t1 = c + e1(h) + Ch(h, a, b) + 0x4ed8aa4a + W[53];
- t2 = e0(d) + Maj(d, e, f); g += t1; c = t1+t2;
- t1 = b + e1(g) + Ch(g, h, a) + 0x5b9cca4f + W[54];
- t2 = e0(c) + Maj(c, d, e); f += t1; b = t1+t2;
- t1 = a + e1(f) + Ch(f, g, h) + 0x682e6ff3 + W[55];
- t2 = e0(b) + Maj(b, c, d); e += t1; a = t1+t2;
-
- t1 = h + e1(e) + Ch(e, f, g) + 0x748f82ee + W[56];
- t2 = e0(a) + Maj(a, b, c); d += t1; h = t1+t2;
- t1 = g + e1(d) + Ch(d, e, f) + 0x78a5636f + W[57];
- t2 = e0(h) + Maj(h, a, b); c += t1; g = t1+t2;
- t1 = f + e1(c) + Ch(c, d, e) + 0x84c87814 + W[58];
- t2 = e0(g) + Maj(g, h, a); b += t1; f = t1+t2;
- t1 = e + e1(b) + Ch(b, c, d) + 0x8cc70208 + W[59];
- t2 = e0(f) + Maj(f, g, h); a += t1; e = t1+t2;
- t1 = d + e1(a) + Ch(a, b, c) + 0x90befffa + W[60];
- t2 = e0(e) + Maj(e, f, g); h += t1; d = t1+t2;
- t1 = c + e1(h) + Ch(h, a, b) + 0xa4506ceb + W[61];
- t2 = e0(d) + Maj(d, e, f); g += t1; c = t1+t2;
- t1 = b + e1(g) + Ch(g, h, a) + 0xbef9a3f7 + W[62];
- t2 = e0(c) + Maj(c, d, e); f += t1; b = t1+t2;
- t1 = a + e1(f) + Ch(f, g, h) + 0xc67178f2 + W[63];
- t2 = e0(b) + Maj(b, c, d); e += t1; a = t1+t2;
-
- state[0] += a; state[1] += b; state[2] += c; state[3] += d;
- state[4] += e; state[5] += f; state[6] += g; state[7] += h;
-
- /* clear any sensitive info... */
- a = b = c = d = e = f = g = h = t1 = t2 = 0;
- memset(W, 0, 64 * sizeof(u32));
-}
-
-int sha256_init(struct sha256_state *sctx)
-{
- sctx->state[0] = SHA256_H0;
- sctx->state[1] = SHA256_H1;
- sctx->state[2] = SHA256_H2;
- sctx->state[3] = SHA256_H3;
- sctx->state[4] = SHA256_H4;
- sctx->state[5] = SHA256_H5;
- sctx->state[6] = SHA256_H6;
- sctx->state[7] = SHA256_H7;
- sctx->count = 0;
-
- return 0;
-}
-
-int sha256_update(struct sha256_state *sctx, const u8 *data, unsigned int len)
-{
- unsigned int partial, done;
- const u8 *src;
-
- partial = sctx->count & 0x3f;
- sctx->count += len;
- done = 0;
- src = data;
-
- if ((partial + len) > 63) {
- if (partial) {
- done = -partial;
- memcpy(sctx->buf + partial, data, done + 64);
- src = sctx->buf;
- }
-
- do {
- sha256_transform(sctx->state, src);
- done += 64;
- src = data + done;
- } while (done + 63 < len);
-
- partial = 0;
- }
- memcpy(sctx->buf + partial, src, len - done);
-
- return 0;
-}
-
-int sha256_final(struct sha256_state *sctx, u8 *out)
-{
- __be32 *dst = (__be32 *)out;
- __be64 bits;
- unsigned int index, pad_len;
- int i;
- static const u8 padding[64] = { 0x80, };
-
- /* Save number of bits */
- bits = cpu_to_be64(sctx->count << 3);
-
- /* Pad out to 56 mod 64. */
- index = sctx->count & 0x3f;
- pad_len = (index < 56) ? (56 - index) : ((64+56) - index);
- sha256_update(sctx, padding, pad_len);
-
- /* Append length (before padding) */
- sha256_update(sctx, (const u8 *)&bits, sizeof(bits));
-
- /* Store state in digest */
- for (i = 0; i < 8; i++)
- dst[i] = cpu_to_be32(sctx->state[i]);
-
- /* Zeroize sensitive information. */
- memset(sctx, 0, sizeof(*sctx));
-
- return 0;
-}
diff --git a/lib/show_mem.c b/lib/show_mem.c
deleted file mode 100644
index 5c86ef4c899f..000000000000
--- a/lib/show_mem.c
+++ /dev/null
@@ -1,49 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Generic show_mem() implementation
- *
- * Copyright (C) 2008 Johannes Weiner <hannes@saeurebad.de>
- */
-
-#include <linux/mm.h>
-#include <linux/quicklist.h>
-#include <linux/cma.h>
-
-void show_mem(unsigned int filter, nodemask_t *nodemask)
-{
- pg_data_t *pgdat;
- unsigned long total = 0, reserved = 0, highmem = 0;
-
- printk("Mem-Info:\n");
- show_free_areas(filter, nodemask);
-
- for_each_online_pgdat(pgdat) {
- int zoneid;
-
- for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
- struct zone *zone = &pgdat->node_zones[zoneid];
- if (!populated_zone(zone))
- continue;
-
- total += zone->present_pages;
- reserved += zone->present_pages - zone_managed_pages(zone);
-
- if (is_highmem_idx(zoneid))
- highmem += zone->present_pages;
- }
- }
-
- printk("%lu pages RAM\n", total);
- printk("%lu pages HighMem/MovableOnly\n", highmem);
- printk("%lu pages reserved\n", reserved);
-#ifdef CONFIG_CMA
- printk("%lu pages cma reserved\n", totalcma_pages);
-#endif
-#ifdef CONFIG_QUICKLIST
- printk("%lu pages in pagetable cache\n",
- quicklist_total_size());
-#endif
-#ifdef CONFIG_MEMORY_FAILURE
- printk("%lu pages hwpoisoned\n", atomic_long_read(&num_poisoned_pages));
-#endif
-}
diff --git a/lib/siphash.c b/lib/siphash.c
index c47bb6ff2149..15bc5b6f368c 100644
--- a/lib/siphash.c
+++ b/lib/siphash.c
@@ -1,6 +1,5 @@
-/* Copyright (C) 2016 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
- *
- * This file is provided under a dual BSD/GPLv2 license.
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+/* Copyright (C) 2016-2022 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
*
* SipHash: a fast short-input PRF
* https://131002.net/siphash/
@@ -18,19 +17,13 @@
#include <asm/word-at-a-time.h>
#endif
-#define SIPROUND \
- do { \
- v0 += v1; v1 = rol64(v1, 13); v1 ^= v0; v0 = rol64(v0, 32); \
- v2 += v3; v3 = rol64(v3, 16); v3 ^= v2; \
- v0 += v3; v3 = rol64(v3, 21); v3 ^= v0; \
- v2 += v1; v1 = rol64(v1, 17); v1 ^= v2; v2 = rol64(v2, 32); \
- } while (0)
+#define SIPROUND SIPHASH_PERMUTATION(v0, v1, v2, v3)
#define PREAMBLE(len) \
- u64 v0 = 0x736f6d6570736575ULL; \
- u64 v1 = 0x646f72616e646f6dULL; \
- u64 v2 = 0x6c7967656e657261ULL; \
- u64 v3 = 0x7465646279746573ULL; \
+ u64 v0 = SIPHASH_CONST_0; \
+ u64 v1 = SIPHASH_CONST_1; \
+ u64 v2 = SIPHASH_CONST_2; \
+ u64 v3 = SIPHASH_CONST_3; \
u64 b = ((u64)(len)) << 56; \
v3 ^= key->key[1]; \
v2 ^= key->key[0]; \
@@ -49,6 +42,7 @@
SIPROUND; \
return (v0 ^ v1) ^ (v2 ^ v3);
+#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
u64 __siphash_aligned(const void *data, size_t len, const siphash_key_t *key)
{
const u8 *end = data + len - (len % sizeof(u64));
@@ -68,11 +62,11 @@ u64 __siphash_aligned(const void *data, size_t len, const siphash_key_t *key)
bytemask_from_count(left)));
#else
switch (left) {
- case 7: b |= ((u64)end[6]) << 48; /* fall through */
- case 6: b |= ((u64)end[5]) << 40; /* fall through */
- case 5: b |= ((u64)end[4]) << 32; /* fall through */
+ case 7: b |= ((u64)end[6]) << 48; fallthrough;
+ case 6: b |= ((u64)end[5]) << 40; fallthrough;
+ case 5: b |= ((u64)end[4]) << 32; fallthrough;
case 4: b |= le32_to_cpup(data); break;
- case 3: b |= ((u64)end[2]) << 16; /* fall through */
+ case 3: b |= ((u64)end[2]) << 16; fallthrough;
case 2: b |= le16_to_cpup(data); break;
case 1: b |= end[0];
}
@@ -80,8 +74,8 @@ u64 __siphash_aligned(const void *data, size_t len, const siphash_key_t *key)
POSTAMBLE
}
EXPORT_SYMBOL(__siphash_aligned);
+#endif
-#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
u64 __siphash_unaligned(const void *data, size_t len, const siphash_key_t *key)
{
const u8 *end = data + len - (len % sizeof(u64));
@@ -101,11 +95,11 @@ u64 __siphash_unaligned(const void *data, size_t len, const siphash_key_t *key)
bytemask_from_count(left)));
#else
switch (left) {
- case 7: b |= ((u64)end[6]) << 48; /* fall through */
- case 6: b |= ((u64)end[5]) << 40; /* fall through */
- case 5: b |= ((u64)end[4]) << 32; /* fall through */
+ case 7: b |= ((u64)end[6]) << 48; fallthrough;
+ case 6: b |= ((u64)end[5]) << 40; fallthrough;
+ case 5: b |= ((u64)end[4]) << 32; fallthrough;
case 4: b |= get_unaligned_le32(end); break;
- case 3: b |= ((u64)end[2]) << 16; /* fall through */
+ case 3: b |= ((u64)end[2]) << 16; fallthrough;
case 2: b |= get_unaligned_le16(end); break;
case 1: b |= end[0];
}
@@ -113,7 +107,6 @@ u64 __siphash_unaligned(const void *data, size_t len, const siphash_key_t *key)
POSTAMBLE
}
EXPORT_SYMBOL(__siphash_unaligned);
-#endif
/**
* siphash_1u64 - compute 64-bit siphash PRF value of a u64
@@ -250,6 +243,7 @@ EXPORT_SYMBOL(siphash_3u32);
HSIPROUND; \
return (v0 ^ v1) ^ (v2 ^ v3);
+#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
u32 __hsiphash_aligned(const void *data, size_t len, const hsiphash_key_t *key)
{
const u8 *end = data + len - (len % sizeof(u64));
@@ -268,11 +262,11 @@ u32 __hsiphash_aligned(const void *data, size_t len, const hsiphash_key_t *key)
bytemask_from_count(left)));
#else
switch (left) {
- case 7: b |= ((u64)end[6]) << 48; /* fall through */
- case 6: b |= ((u64)end[5]) << 40; /* fall through */
- case 5: b |= ((u64)end[4]) << 32; /* fall through */
+ case 7: b |= ((u64)end[6]) << 48; fallthrough;
+ case 6: b |= ((u64)end[5]) << 40; fallthrough;
+ case 5: b |= ((u64)end[4]) << 32; fallthrough;
case 4: b |= le32_to_cpup(data); break;
- case 3: b |= ((u64)end[2]) << 16; /* fall through */
+ case 3: b |= ((u64)end[2]) << 16; fallthrough;
case 2: b |= le16_to_cpup(data); break;
case 1: b |= end[0];
}
@@ -280,8 +274,8 @@ u32 __hsiphash_aligned(const void *data, size_t len, const hsiphash_key_t *key)
HPOSTAMBLE
}
EXPORT_SYMBOL(__hsiphash_aligned);
+#endif
-#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
u32 __hsiphash_unaligned(const void *data, size_t len,
const hsiphash_key_t *key)
{
@@ -301,11 +295,11 @@ u32 __hsiphash_unaligned(const void *data, size_t len,
bytemask_from_count(left)));
#else
switch (left) {
- case 7: b |= ((u64)end[6]) << 48; /* fall through */
- case 6: b |= ((u64)end[5]) << 40; /* fall through */
- case 5: b |= ((u64)end[4]) << 32; /* fall through */
+ case 7: b |= ((u64)end[6]) << 48; fallthrough;
+ case 6: b |= ((u64)end[5]) << 40; fallthrough;
+ case 5: b |= ((u64)end[4]) << 32; fallthrough;
case 4: b |= get_unaligned_le32(end); break;
- case 3: b |= ((u64)end[2]) << 16; /* fall through */
+ case 3: b |= ((u64)end[2]) << 16; fallthrough;
case 2: b |= get_unaligned_le16(end); break;
case 1: b |= end[0];
}
@@ -313,7 +307,6 @@ u32 __hsiphash_unaligned(const void *data, size_t len,
HPOSTAMBLE
}
EXPORT_SYMBOL(__hsiphash_unaligned);
-#endif
/**
* hsiphash_1u32 - compute 64-bit hsiphash PRF value of a u32
@@ -389,19 +382,13 @@ u32 hsiphash_4u32(const u32 first, const u32 second, const u32 third,
}
EXPORT_SYMBOL(hsiphash_4u32);
#else
-#define HSIPROUND \
- do { \
- v0 += v1; v1 = rol32(v1, 5); v1 ^= v0; v0 = rol32(v0, 16); \
- v2 += v3; v3 = rol32(v3, 8); v3 ^= v2; \
- v0 += v3; v3 = rol32(v3, 7); v3 ^= v0; \
- v2 += v1; v1 = rol32(v1, 13); v1 ^= v2; v2 = rol32(v2, 16); \
- } while (0)
+#define HSIPROUND HSIPHASH_PERMUTATION(v0, v1, v2, v3)
#define HPREAMBLE(len) \
- u32 v0 = 0; \
- u32 v1 = 0; \
- u32 v2 = 0x6c796765U; \
- u32 v3 = 0x74656462U; \
+ u32 v0 = HSIPHASH_CONST_0; \
+ u32 v1 = HSIPHASH_CONST_1; \
+ u32 v2 = HSIPHASH_CONST_2; \
+ u32 v3 = HSIPHASH_CONST_3; \
u32 b = ((u32)(len)) << 24; \
v3 ^= key->key[1]; \
v2 ^= key->key[0]; \
@@ -418,6 +405,7 @@ EXPORT_SYMBOL(hsiphash_4u32);
HSIPROUND; \
return v1 ^ v3;
+#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
u32 __hsiphash_aligned(const void *data, size_t len, const hsiphash_key_t *key)
{
const u8 *end = data + len - (len % sizeof(u32));
@@ -431,15 +419,15 @@ u32 __hsiphash_aligned(const void *data, size_t len, const hsiphash_key_t *key)
v0 ^= m;
}
switch (left) {
- case 3: b |= ((u32)end[2]) << 16; /* fall through */
+ case 3: b |= ((u32)end[2]) << 16; fallthrough;
case 2: b |= le16_to_cpup(data); break;
case 1: b |= end[0];
}
HPOSTAMBLE
}
EXPORT_SYMBOL(__hsiphash_aligned);
+#endif
-#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
u32 __hsiphash_unaligned(const void *data, size_t len,
const hsiphash_key_t *key)
{
@@ -454,14 +442,13 @@ u32 __hsiphash_unaligned(const void *data, size_t len,
v0 ^= m;
}
switch (left) {
- case 3: b |= ((u32)end[2]) << 16; /* fall through */
+ case 3: b |= ((u32)end[2]) << 16; fallthrough;
case 2: b |= get_unaligned_le16(end); break;
case 1: b |= end[0];
}
HPOSTAMBLE
}
EXPORT_SYMBOL(__hsiphash_unaligned);
-#endif
/**
* hsiphash_1u32 - compute 32-bit hsiphash PRF value of a u32
diff --git a/lib/test_siphash.c b/lib/siphash_kunit.c
index a6d854d933bf..a3c697e8be35 100644
--- a/lib/test_siphash.c
+++ b/lib/siphash_kunit.c
@@ -1,8 +1,7 @@
-/* Test cases for siphash.c
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+/* Copyright (C) 2016-2022 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
*
- * Copyright (C) 2016 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
- *
- * This file is provided under a dual BSD/GPLv2 license.
+ * Test cases for siphash.c
*
* SipHash: a fast short-input PRF
* https://131002.net/siphash/
@@ -14,6 +13,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <kunit/test.h>
#include <linux/siphash.h>
#include <linux/kernel.h>
#include <linux/string.h>
@@ -110,114 +110,88 @@ static const u32 test_vectors_hsiphash[64] = {
};
#endif
-static int __init siphash_test_init(void)
+#define chk(hash, vector, fmt...) \
+ KUNIT_EXPECT_EQ_MSG(test, hash, vector, fmt)
+
+static void siphash_test(struct kunit *test)
{
u8 in[64] __aligned(SIPHASH_ALIGNMENT);
u8 in_unaligned[65] __aligned(SIPHASH_ALIGNMENT);
u8 i;
- int ret = 0;
for (i = 0; i < 64; ++i) {
in[i] = i;
in_unaligned[i + 1] = i;
- if (siphash(in, i, &test_key_siphash) !=
- test_vectors_siphash[i]) {
- pr_info("siphash self-test aligned %u: FAIL\n", i + 1);
- ret = -EINVAL;
- }
- if (siphash(in_unaligned + 1, i, &test_key_siphash) !=
- test_vectors_siphash[i]) {
- pr_info("siphash self-test unaligned %u: FAIL\n", i + 1);
- ret = -EINVAL;
- }
- if (hsiphash(in, i, &test_key_hsiphash) !=
- test_vectors_hsiphash[i]) {
- pr_info("hsiphash self-test aligned %u: FAIL\n", i + 1);
- ret = -EINVAL;
- }
- if (hsiphash(in_unaligned + 1, i, &test_key_hsiphash) !=
- test_vectors_hsiphash[i]) {
- pr_info("hsiphash self-test unaligned %u: FAIL\n", i + 1);
- ret = -EINVAL;
- }
- }
- if (siphash_1u64(0x0706050403020100ULL, &test_key_siphash) !=
- test_vectors_siphash[8]) {
- pr_info("siphash self-test 1u64: FAIL\n");
- ret = -EINVAL;
- }
- if (siphash_2u64(0x0706050403020100ULL, 0x0f0e0d0c0b0a0908ULL,
- &test_key_siphash) != test_vectors_siphash[16]) {
- pr_info("siphash self-test 2u64: FAIL\n");
- ret = -EINVAL;
- }
- if (siphash_3u64(0x0706050403020100ULL, 0x0f0e0d0c0b0a0908ULL,
- 0x1716151413121110ULL, &test_key_siphash) !=
- test_vectors_siphash[24]) {
- pr_info("siphash self-test 3u64: FAIL\n");
- ret = -EINVAL;
+ chk(siphash(in, i, &test_key_siphash),
+ test_vectors_siphash[i],
+ "siphash self-test aligned %u: FAIL", i + 1);
+ chk(siphash(in_unaligned + 1, i, &test_key_siphash),
+ test_vectors_siphash[i],
+ "siphash self-test unaligned %u: FAIL", i + 1);
+ chk(hsiphash(in, i, &test_key_hsiphash),
+ test_vectors_hsiphash[i],
+ "hsiphash self-test aligned %u: FAIL", i + 1);
+ chk(hsiphash(in_unaligned + 1, i, &test_key_hsiphash),
+ test_vectors_hsiphash[i],
+ "hsiphash self-test unaligned %u: FAIL", i + 1);
}
- if (siphash_4u64(0x0706050403020100ULL, 0x0f0e0d0c0b0a0908ULL,
+ chk(siphash_1u64(0x0706050403020100ULL, &test_key_siphash),
+ test_vectors_siphash[8],
+ "siphash self-test 1u64: FAIL");
+ chk(siphash_2u64(0x0706050403020100ULL, 0x0f0e0d0c0b0a0908ULL,
+ &test_key_siphash),
+ test_vectors_siphash[16],
+ "siphash self-test 2u64: FAIL");
+ chk(siphash_3u64(0x0706050403020100ULL, 0x0f0e0d0c0b0a0908ULL,
+ 0x1716151413121110ULL, &test_key_siphash),
+ test_vectors_siphash[24],
+ "siphash self-test 3u64: FAIL");
+ chk(siphash_4u64(0x0706050403020100ULL, 0x0f0e0d0c0b0a0908ULL,
0x1716151413121110ULL, 0x1f1e1d1c1b1a1918ULL,
- &test_key_siphash) != test_vectors_siphash[32]) {
- pr_info("siphash self-test 4u64: FAIL\n");
- ret = -EINVAL;
- }
- if (siphash_1u32(0x03020100U, &test_key_siphash) !=
- test_vectors_siphash[4]) {
- pr_info("siphash self-test 1u32: FAIL\n");
- ret = -EINVAL;
- }
- if (siphash_2u32(0x03020100U, 0x07060504U, &test_key_siphash) !=
- test_vectors_siphash[8]) {
- pr_info("siphash self-test 2u32: FAIL\n");
- ret = -EINVAL;
- }
- if (siphash_3u32(0x03020100U, 0x07060504U,
- 0x0b0a0908U, &test_key_siphash) !=
- test_vectors_siphash[12]) {
- pr_info("siphash self-test 3u32: FAIL\n");
- ret = -EINVAL;
- }
- if (siphash_4u32(0x03020100U, 0x07060504U,
- 0x0b0a0908U, 0x0f0e0d0cU, &test_key_siphash) !=
- test_vectors_siphash[16]) {
- pr_info("siphash self-test 4u32: FAIL\n");
- ret = -EINVAL;
- }
- if (hsiphash_1u32(0x03020100U, &test_key_hsiphash) !=
- test_vectors_hsiphash[4]) {
- pr_info("hsiphash self-test 1u32: FAIL\n");
- ret = -EINVAL;
- }
- if (hsiphash_2u32(0x03020100U, 0x07060504U, &test_key_hsiphash) !=
- test_vectors_hsiphash[8]) {
- pr_info("hsiphash self-test 2u32: FAIL\n");
- ret = -EINVAL;
- }
- if (hsiphash_3u32(0x03020100U, 0x07060504U,
- 0x0b0a0908U, &test_key_hsiphash) !=
- test_vectors_hsiphash[12]) {
- pr_info("hsiphash self-test 3u32: FAIL\n");
- ret = -EINVAL;
- }
- if (hsiphash_4u32(0x03020100U, 0x07060504U,
- 0x0b0a0908U, 0x0f0e0d0cU, &test_key_hsiphash) !=
- test_vectors_hsiphash[16]) {
- pr_info("hsiphash self-test 4u32: FAIL\n");
- ret = -EINVAL;
- }
- if (!ret)
- pr_info("self-tests: pass\n");
- return ret;
+ &test_key_siphash),
+ test_vectors_siphash[32],
+ "siphash self-test 4u64: FAIL");
+ chk(siphash_1u32(0x03020100U, &test_key_siphash),
+ test_vectors_siphash[4],
+ "siphash self-test 1u32: FAIL");
+ chk(siphash_2u32(0x03020100U, 0x07060504U, &test_key_siphash),
+ test_vectors_siphash[8],
+ "siphash self-test 2u32: FAIL");
+ chk(siphash_3u32(0x03020100U, 0x07060504U,
+ 0x0b0a0908U, &test_key_siphash),
+ test_vectors_siphash[12],
+ "siphash self-test 3u32: FAIL");
+ chk(siphash_4u32(0x03020100U, 0x07060504U,
+ 0x0b0a0908U, 0x0f0e0d0cU, &test_key_siphash),
+ test_vectors_siphash[16],
+ "siphash self-test 4u32: FAIL");
+ chk(hsiphash_1u32(0x03020100U, &test_key_hsiphash),
+ test_vectors_hsiphash[4],
+ "hsiphash self-test 1u32: FAIL");
+ chk(hsiphash_2u32(0x03020100U, 0x07060504U, &test_key_hsiphash),
+ test_vectors_hsiphash[8],
+ "hsiphash self-test 2u32: FAIL");
+ chk(hsiphash_3u32(0x03020100U, 0x07060504U,
+ 0x0b0a0908U, &test_key_hsiphash),
+ test_vectors_hsiphash[12],
+ "hsiphash self-test 3u32: FAIL");
+ chk(hsiphash_4u32(0x03020100U, 0x07060504U,
+ 0x0b0a0908U, 0x0f0e0d0cU, &test_key_hsiphash),
+ test_vectors_hsiphash[16],
+ "hsiphash self-test 4u32: FAIL");
}
-static void __exit siphash_test_exit(void)
-{
-}
+static struct kunit_case siphash_test_cases[] = {
+ KUNIT_CASE(siphash_test),
+ {}
+};
+
+static struct kunit_suite siphash_test_suite = {
+ .name = "siphash",
+ .test_cases = siphash_test_cases,
+};
-module_init(siphash_test_init);
-module_exit(siphash_test_exit);
+kunit_test_suite(siphash_test_suite);
MODULE_AUTHOR("Jason A. Donenfeld <Jason@zx2c4.com>");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/lib/slub_kunit.c b/lib/slub_kunit.c
new file mode 100644
index 000000000000..d4a3730b08fa
--- /dev/null
+++ b/lib/slub_kunit.c
@@ -0,0 +1,190 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <kunit/test.h>
+#include <kunit/test-bug.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include "../mm/slab.h"
+
+static struct kunit_resource resource;
+static int slab_errors;
+
+/*
+ * Wrapper function for kmem_cache_create(), which reduces 2 parameters:
+ * 'align' and 'ctor', and sets SLAB_SKIP_KFENCE flag to avoid getting an
+ * object from kfence pool, where the operation could be caught by both
+ * our test and kfence sanity check.
+ */
+static struct kmem_cache *test_kmem_cache_create(const char *name,
+ unsigned int size, slab_flags_t flags)
+{
+ struct kmem_cache *s = kmem_cache_create(name, size, 0,
+ (flags | SLAB_NO_USER_FLAGS), NULL);
+ s->flags |= SLAB_SKIP_KFENCE;
+ return s;
+}
+
+static void test_clobber_zone(struct kunit *test)
+{
+ struct kmem_cache *s = test_kmem_cache_create("TestSlub_RZ_alloc", 64,
+ SLAB_RED_ZONE);
+ u8 *p = kmem_cache_alloc(s, GFP_KERNEL);
+
+ kasan_disable_current();
+ p[64] = 0x12;
+
+ validate_slab_cache(s);
+ KUNIT_EXPECT_EQ(test, 2, slab_errors);
+
+ kasan_enable_current();
+ kmem_cache_free(s, p);
+ kmem_cache_destroy(s);
+}
+
+#ifndef CONFIG_KASAN
+static void test_next_pointer(struct kunit *test)
+{
+ struct kmem_cache *s = test_kmem_cache_create("TestSlub_next_ptr_free",
+ 64, SLAB_POISON);
+ u8 *p = kmem_cache_alloc(s, GFP_KERNEL);
+ unsigned long tmp;
+ unsigned long *ptr_addr;
+
+ kmem_cache_free(s, p);
+
+ ptr_addr = (unsigned long *)(p + s->offset);
+ tmp = *ptr_addr;
+ p[s->offset] = 0x12;
+
+ /*
+ * Expecting three errors.
+ * One for the corrupted freechain and the other one for the wrong
+ * count of objects in use. The third error is fixing broken cache.
+ */
+ validate_slab_cache(s);
+ KUNIT_EXPECT_EQ(test, 3, slab_errors);
+
+ /*
+ * Try to repair corrupted freepointer.
+ * Still expecting two errors. The first for the wrong count
+ * of objects in use.
+ * The second error is for fixing broken cache.
+ */
+ *ptr_addr = tmp;
+ slab_errors = 0;
+
+ validate_slab_cache(s);
+ KUNIT_EXPECT_EQ(test, 2, slab_errors);
+
+ /*
+ * Previous validation repaired the count of objects in use.
+ * Now expecting no error.
+ */
+ slab_errors = 0;
+ validate_slab_cache(s);
+ KUNIT_EXPECT_EQ(test, 0, slab_errors);
+
+ kmem_cache_destroy(s);
+}
+
+static void test_first_word(struct kunit *test)
+{
+ struct kmem_cache *s = test_kmem_cache_create("TestSlub_1th_word_free",
+ 64, SLAB_POISON);
+ u8 *p = kmem_cache_alloc(s, GFP_KERNEL);
+
+ kmem_cache_free(s, p);
+ *p = 0x78;
+
+ validate_slab_cache(s);
+ KUNIT_EXPECT_EQ(test, 2, slab_errors);
+
+ kmem_cache_destroy(s);
+}
+
+static void test_clobber_50th_byte(struct kunit *test)
+{
+ struct kmem_cache *s = test_kmem_cache_create("TestSlub_50th_word_free",
+ 64, SLAB_POISON);
+ u8 *p = kmem_cache_alloc(s, GFP_KERNEL);
+
+ kmem_cache_free(s, p);
+ p[50] = 0x9a;
+
+ validate_slab_cache(s);
+ KUNIT_EXPECT_EQ(test, 2, slab_errors);
+
+ kmem_cache_destroy(s);
+}
+#endif
+
+static void test_clobber_redzone_free(struct kunit *test)
+{
+ struct kmem_cache *s = test_kmem_cache_create("TestSlub_RZ_free", 64,
+ SLAB_RED_ZONE);
+ u8 *p = kmem_cache_alloc(s, GFP_KERNEL);
+
+ kasan_disable_current();
+ kmem_cache_free(s, p);
+ p[64] = 0xab;
+
+ validate_slab_cache(s);
+ KUNIT_EXPECT_EQ(test, 2, slab_errors);
+
+ kasan_enable_current();
+ kmem_cache_destroy(s);
+}
+
+static void test_kmalloc_redzone_access(struct kunit *test)
+{
+ struct kmem_cache *s = test_kmem_cache_create("TestSlub_RZ_kmalloc", 32,
+ SLAB_KMALLOC|SLAB_STORE_USER|SLAB_RED_ZONE);
+ u8 *p = kmalloc_trace(s, GFP_KERNEL, 18);
+
+ kasan_disable_current();
+
+ /* Suppress the -Warray-bounds warning */
+ OPTIMIZER_HIDE_VAR(p);
+ p[18] = 0xab;
+ p[19] = 0xab;
+
+ validate_slab_cache(s);
+ KUNIT_EXPECT_EQ(test, 2, slab_errors);
+
+ kasan_enable_current();
+ kmem_cache_free(s, p);
+ kmem_cache_destroy(s);
+}
+
+static int test_init(struct kunit *test)
+{
+ slab_errors = 0;
+
+ kunit_add_named_resource(test, NULL, NULL, &resource,
+ "slab_errors", &slab_errors);
+ return 0;
+}
+
+static struct kunit_case test_cases[] = {
+ KUNIT_CASE(test_clobber_zone),
+
+#ifndef CONFIG_KASAN
+ KUNIT_CASE(test_next_pointer),
+ KUNIT_CASE(test_first_word),
+ KUNIT_CASE(test_clobber_50th_byte),
+#endif
+
+ KUNIT_CASE(test_clobber_redzone_free),
+ KUNIT_CASE(test_kmalloc_redzone_access),
+ {}
+};
+
+static struct kunit_suite test_suite = {
+ .name = "slub_test",
+ .init = test_init,
+ .test_cases = test_cases,
+};
+kunit_test_suite(test_suite);
+
+MODULE_LICENSE("GPL");
diff --git a/lib/smp_processor_id.c b/lib/smp_processor_id.c
index 157d9e31f6c2..a2bb7738c373 100644
--- a/lib/smp_processor_id.c
+++ b/lib/smp_processor_id.c
@@ -8,7 +8,7 @@
#include <linux/kprobes.h>
#include <linux/sched.h>
-notrace static nokprobe_inline
+noinstr static
unsigned int check_preemption_disabled(const char *what1, const char *what2)
{
int this_cpu = raw_smp_processor_id();
@@ -19,12 +19,13 @@ unsigned int check_preemption_disabled(const char *what1, const char *what2)
if (irqs_disabled())
goto out;
- /*
- * Kernel threads bound to a single CPU can safely use
- * smp_processor_id():
- */
- if (cpumask_equal(&current->cpus_allowed, cpumask_of(this_cpu)))
+ if (is_percpu_thread())
+ goto out;
+
+#ifdef CONFIG_SMP
+ if (current->migration_disabled)
goto out;
+#endif
/*
* It is valid to assume CPU-locality during early bootup:
@@ -37,6 +38,7 @@ unsigned int check_preemption_disabled(const char *what1, const char *what2)
*/
preempt_disable_notrace();
+ instrumentation_begin();
if (!printk_ratelimit())
goto out_enable;
@@ -47,21 +49,20 @@ unsigned int check_preemption_disabled(const char *what1, const char *what2)
dump_stack();
out_enable:
+ instrumentation_end();
preempt_enable_no_resched_notrace();
out:
return this_cpu;
}
-notrace unsigned int debug_smp_processor_id(void)
+noinstr unsigned int debug_smp_processor_id(void)
{
return check_preemption_disabled("smp_processor_id", "");
}
EXPORT_SYMBOL(debug_smp_processor_id);
-NOKPROBE_SYMBOL(debug_smp_processor_id);
-notrace void __this_cpu_preempt_check(const char *op)
+noinstr void __this_cpu_preempt_check(const char *op)
{
check_preemption_disabled("__this_cpu_", op);
}
EXPORT_SYMBOL(__this_cpu_preempt_check);
-NOKPROBE_SYMBOL(__this_cpu_preempt_check);
diff --git a/lib/sort.c b/lib/sort.c
index cf408aec3733..b399bf10d675 100644
--- a/lib/sort.c
+++ b/lib/sort.c
@@ -51,7 +51,7 @@ static bool is_aligned(const void *base, size_t size, unsigned char align)
* which basically all CPUs have, to minimize loop overhead computations.
*
* For some reason, on x86 gcc 7.3.0 adds a redundant test of n at the
- * bottom of the loop, even though the zero flag is stil valid from the
+ * bottom of the loop, even though the zero flag is still valid from the
* subtract (since the intervening mov instructions don't alter the flags).
* Gcc 8.1.0 doesn't have that problem.
*/
@@ -117,23 +117,32 @@ static void swap_bytes(void *a, void *b, size_t n)
} while (n);
}
-typedef void (*swap_func_t)(void *a, void *b, int size);
-
/*
* The values are arbitrary as long as they can't be confused with
* a pointer, but small integers make for the smallest compare
* instructions.
*/
-#define SWAP_WORDS_64 (swap_func_t)0
-#define SWAP_WORDS_32 (swap_func_t)1
-#define SWAP_BYTES (swap_func_t)2
+#define SWAP_WORDS_64 (swap_r_func_t)0
+#define SWAP_WORDS_32 (swap_r_func_t)1
+#define SWAP_BYTES (swap_r_func_t)2
+#define SWAP_WRAPPER (swap_r_func_t)3
+
+struct wrapper {
+ cmp_func_t cmp;
+ swap_func_t swap;
+};
/*
* The function pointer is last to make tail calls most efficient if the
* compiler decides not to inline this function.
*/
-static void do_swap(void *a, void *b, size_t size, swap_func_t swap_func)
+static void do_swap(void *a, void *b, size_t size, swap_r_func_t swap_func, const void *priv)
{
+ if (swap_func == SWAP_WRAPPER) {
+ ((const struct wrapper *)priv)->swap(a, b, (int)size);
+ return;
+ }
+
if (swap_func == SWAP_WORDS_64)
swap_words_64(a, b, size);
else if (swap_func == SWAP_WORDS_32)
@@ -141,7 +150,16 @@ static void do_swap(void *a, void *b, size_t size, swap_func_t swap_func)
else if (swap_func == SWAP_BYTES)
swap_bytes(a, b, size);
else
- swap_func(a, b, (int)size);
+ swap_func(a, b, (int)size, priv);
+}
+
+#define _CMP_WRAPPER ((cmp_r_func_t)0L)
+
+static int do_cmp(const void *a, const void *b, cmp_r_func_t cmp, const void *priv)
+{
+ if (cmp == _CMP_WRAPPER)
+ return ((const struct wrapper *)priv)->cmp(a, b);
+ return cmp(a, b, priv);
}
/**
@@ -171,12 +189,13 @@ static size_t parent(size_t i, unsigned int lsbit, size_t size)
}
/**
- * sort - sort an array of elements
+ * sort_r - sort an array of elements
* @base: pointer to data to sort
* @num: number of elements
* @size: size of each element
* @cmp_func: pointer to comparison function
* @swap_func: pointer to swap function or NULL
+ * @priv: third argument passed to comparison function
*
* This function does a heapsort on the given array. You may provide
* a swap_func function if you need to do something more than a memory
@@ -188,9 +207,10 @@ static size_t parent(size_t i, unsigned int lsbit, size_t size)
* O(n*n) worst-case behavior and extra memory requirements that make
* it less suitable for kernel use.
*/
-void sort(void *base, size_t num, size_t size,
- int (*cmp_func)(const void *, const void *),
- void (*swap_func)(void *, void *, int size))
+void sort_r(void *base, size_t num, size_t size,
+ cmp_r_func_t cmp_func,
+ swap_r_func_t swap_func,
+ const void *priv)
{
/* pre-scale counters for performance */
size_t n = num * size, a = (num/2) * size;
@@ -199,6 +219,10 @@ void sort(void *base, size_t num, size_t size,
if (!a) /* num < 2 || size == 0 */
return;
+ /* called from 'sort' without swap function, let's pick the default */
+ if (swap_func == SWAP_WRAPPER && !((struct wrapper *)priv)->swap)
+ swap_func = NULL;
+
if (!swap_func) {
if (is_aligned(base, size, 8))
swap_func = SWAP_WORDS_64;
@@ -221,7 +245,7 @@ void sort(void *base, size_t num, size_t size,
if (a) /* Building heap: sift down --a */
a -= size;
else if (n -= size) /* Sorting: Extract root to --n */
- do_swap(base, base + n, size, swap_func);
+ do_swap(base, base + n, size, swap_func, priv);
else /* Sort complete */
break;
@@ -238,18 +262,31 @@ void sort(void *base, size_t num, size_t size,
* average, 3/4 worst-case.)
*/
for (b = a; c = 2*b + size, (d = c + size) < n;)
- b = cmp_func(base + c, base + d) >= 0 ? c : d;
+ b = do_cmp(base + c, base + d, cmp_func, priv) >= 0 ? c : d;
if (d == n) /* Special case last leaf with no sibling */
b = c;
/* Now backtrack from "b" to the correct location for "a" */
- while (b != a && cmp_func(base + a, base + b) >= 0)
+ while (b != a && do_cmp(base + a, base + b, cmp_func, priv) >= 0)
b = parent(b, lsbit, size);
c = b; /* Where "a" belongs */
while (b != a) { /* Shift it into place */
b = parent(b, lsbit, size);
- do_swap(base + b, base + c, size, swap_func);
+ do_swap(base + b, base + c, size, swap_func, priv);
}
}
}
+EXPORT_SYMBOL(sort_r);
+
+void sort(void *base, size_t num, size_t size,
+ cmp_func_t cmp_func,
+ swap_func_t swap_func)
+{
+ struct wrapper w = {
+ .cmp = cmp_func,
+ .swap = swap_func,
+ };
+
+ return sort_r(base, num, size, _CMP_WRAPPER, SWAP_WRAPPER, &w);
+}
EXPORT_SYMBOL(sort);
diff --git a/lib/stackdepot.c b/lib/stackdepot.c
index 66cab785bea0..4a7055a63d9f 100644
--- a/lib/stackdepot.c
+++ b/lib/stackdepot.c
@@ -1,162 +1,565 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Generic stack depot for storing stack traces.
+ * Stack depot - a stack trace storage that avoids duplication.
*
- * Some debugging tools need to save stack traces of certain events which can
- * be later presented to the user. For example, KASAN needs to safe alloc and
- * free stacks for each object, but storing two stack traces per object
- * requires too much memory (e.g. SLUB_DEBUG needs 256 bytes per object for
- * that).
- *
- * Instead, stack depot maintains a hashtable of unique stacktraces. Since alloc
- * and free stacks repeat a lot, we save about 100x space.
- * Stacks are never removed from depot, so we store them contiguously one after
- * another in a contiguos memory allocation.
+ * Internally, stack depot maintains a hash table of unique stacktraces. The
+ * stack traces themselves are stored contiguously one after another in a set
+ * of separate page allocations.
*
* Author: Alexander Potapenko <glider@google.com>
* Copyright (C) 2016 Google, Inc.
*
- * Based on code by Dmitry Chernenkov.
+ * Based on the code by Dmitry Chernenkov.
*/
+#define pr_fmt(fmt) "stackdepot: " fmt
+
+#include <linux/debugfs.h>
#include <linux/gfp.h>
#include <linux/jhash.h>
#include <linux/kernel.h>
+#include <linux/kmsan.h>
+#include <linux/list.h>
#include <linux/mm.h>
-#include <linux/percpu.h>
+#include <linux/mutex.h>
+#include <linux/poison.h>
#include <linux/printk.h>
+#include <linux/rculist.h>
+#include <linux/rcupdate.h>
+#include <linux/refcount.h>
#include <linux/slab.h>
+#include <linux/spinlock.h>
#include <linux/stacktrace.h>
#include <linux/stackdepot.h>
#include <linux/string.h>
#include <linux/types.h>
+#include <linux/memblock.h>
+#include <linux/kasan-enabled.h>
+
+#define DEPOT_HANDLE_BITS (sizeof(depot_stack_handle_t) * 8)
-#define DEPOT_STACK_BITS (sizeof(depot_stack_handle_t) * 8)
-
-#define STACK_ALLOC_NULL_PROTECTION_BITS 1
-#define STACK_ALLOC_ORDER 2 /* 'Slab' size order for stack depot, 4 pages */
-#define STACK_ALLOC_SIZE (1LL << (PAGE_SHIFT + STACK_ALLOC_ORDER))
-#define STACK_ALLOC_ALIGN 4
-#define STACK_ALLOC_OFFSET_BITS (STACK_ALLOC_ORDER + PAGE_SHIFT - \
- STACK_ALLOC_ALIGN)
-#define STACK_ALLOC_INDEX_BITS (DEPOT_STACK_BITS - \
- STACK_ALLOC_NULL_PROTECTION_BITS - STACK_ALLOC_OFFSET_BITS)
-#define STACK_ALLOC_SLABS_CAP 8192
-#define STACK_ALLOC_MAX_SLABS \
- (((1LL << (STACK_ALLOC_INDEX_BITS)) < STACK_ALLOC_SLABS_CAP) ? \
- (1LL << (STACK_ALLOC_INDEX_BITS)) : STACK_ALLOC_SLABS_CAP)
-
-/* The compact structure to store the reference to stacks. */
+#define DEPOT_POOL_ORDER 2 /* Pool size order, 4 pages */
+#define DEPOT_POOL_SIZE (1LL << (PAGE_SHIFT + DEPOT_POOL_ORDER))
+#define DEPOT_STACK_ALIGN 4
+#define DEPOT_OFFSET_BITS (DEPOT_POOL_ORDER + PAGE_SHIFT - DEPOT_STACK_ALIGN)
+#define DEPOT_POOL_INDEX_BITS (DEPOT_HANDLE_BITS - DEPOT_OFFSET_BITS - \
+ STACK_DEPOT_EXTRA_BITS)
+#define DEPOT_POOLS_CAP 8192
+#define DEPOT_MAX_POOLS \
+ (((1LL << (DEPOT_POOL_INDEX_BITS)) < DEPOT_POOLS_CAP) ? \
+ (1LL << (DEPOT_POOL_INDEX_BITS)) : DEPOT_POOLS_CAP)
+
+/* Compact structure that stores a reference to a stack. */
union handle_parts {
depot_stack_handle_t handle;
struct {
- u32 slabindex : STACK_ALLOC_INDEX_BITS;
- u32 offset : STACK_ALLOC_OFFSET_BITS;
- u32 valid : STACK_ALLOC_NULL_PROTECTION_BITS;
+ u32 pool_index : DEPOT_POOL_INDEX_BITS;
+ u32 offset : DEPOT_OFFSET_BITS;
+ u32 extra : STACK_DEPOT_EXTRA_BITS;
};
};
struct stack_record {
- struct stack_record *next; /* Link in the hashtable */
- u32 hash; /* Hash in the hastable */
- u32 size; /* Number of frames in the stack */
- union handle_parts handle;
- unsigned long entries[1]; /* Variable-sized array of entries. */
+ struct list_head hash_list; /* Links in the hash table */
+ u32 hash; /* Hash in hash table */
+ u32 size; /* Number of stored frames */
+ union handle_parts handle; /* Constant after initialization */
+ refcount_t count;
+ union {
+ unsigned long entries[CONFIG_STACKDEPOT_MAX_FRAMES]; /* Frames */
+ struct {
+ /*
+ * An important invariant of the implementation is to
+ * only place a stack record onto the freelist iff its
+ * refcount is zero. Because stack records with a zero
+ * refcount are never considered as valid, it is safe to
+ * union @entries and freelist management state below.
+ * Conversely, as soon as an entry is off the freelist
+ * and its refcount becomes non-zero, the below must not
+ * be accessed until being placed back on the freelist.
+ */
+ struct list_head free_list; /* Links in the freelist */
+ unsigned long rcu_state; /* RCU cookie */
+ };
+ };
};
-static void *stack_slabs[STACK_ALLOC_MAX_SLABS];
+static bool stack_depot_disabled;
+static bool __stack_depot_early_init_requested __initdata = IS_ENABLED(CONFIG_STACKDEPOT_ALWAYS_INIT);
+static bool __stack_depot_early_init_passed __initdata;
+
+/* Use one hash table bucket per 16 KB of memory. */
+#define STACK_HASH_TABLE_SCALE 14
+/* Limit the number of buckets between 4K and 1M. */
+#define STACK_BUCKET_NUMBER_ORDER_MIN 12
+#define STACK_BUCKET_NUMBER_ORDER_MAX 20
+/* Initial seed for jhash2. */
+#define STACK_HASH_SEED 0x9747b28c
+
+/* Hash table of stored stack records. */
+static struct list_head *stack_table;
+/* Fixed order of the number of table buckets. Used when KASAN is enabled. */
+static unsigned int stack_bucket_number_order;
+/* Hash mask for indexing the table. */
+static unsigned int stack_hash_mask;
-static int depot_index;
-static int next_slab_inited;
-static size_t depot_offset;
-static DEFINE_SPINLOCK(depot_lock);
+/* Array of memory regions that store stack records. */
+static void *stack_pools[DEPOT_MAX_POOLS];
+/* Newly allocated pool that is not yet added to stack_pools. */
+static void *new_pool;
+/* Number of pools in stack_pools. */
+static int pools_num;
+/* Offset to the unused space in the currently used pool. */
+static size_t pool_offset = DEPOT_POOL_SIZE;
+/* Freelist of stack records within stack_pools. */
+static LIST_HEAD(free_stacks);
+/* The lock must be held when performing pool or freelist modifications. */
+static DEFINE_RAW_SPINLOCK(pool_lock);
-static bool init_stack_slab(void **prealloc)
+/* Statistics counters for debugfs. */
+enum depot_counter_id {
+ DEPOT_COUNTER_REFD_ALLOCS,
+ DEPOT_COUNTER_REFD_FREES,
+ DEPOT_COUNTER_REFD_INUSE,
+ DEPOT_COUNTER_FREELIST_SIZE,
+ DEPOT_COUNTER_PERSIST_COUNT,
+ DEPOT_COUNTER_PERSIST_BYTES,
+ DEPOT_COUNTER_COUNT,
+};
+static long counters[DEPOT_COUNTER_COUNT];
+static const char *const counter_names[] = {
+ [DEPOT_COUNTER_REFD_ALLOCS] = "refcounted_allocations",
+ [DEPOT_COUNTER_REFD_FREES] = "refcounted_frees",
+ [DEPOT_COUNTER_REFD_INUSE] = "refcounted_in_use",
+ [DEPOT_COUNTER_FREELIST_SIZE] = "freelist_size",
+ [DEPOT_COUNTER_PERSIST_COUNT] = "persistent_count",
+ [DEPOT_COUNTER_PERSIST_BYTES] = "persistent_bytes",
+};
+static_assert(ARRAY_SIZE(counter_names) == DEPOT_COUNTER_COUNT);
+
+static int __init disable_stack_depot(char *str)
{
- if (!*prealloc)
- return false;
+ return kstrtobool(str, &stack_depot_disabled);
+}
+early_param("stack_depot_disable", disable_stack_depot);
+
+void __init stack_depot_request_early_init(void)
+{
+ /* Too late to request early init now. */
+ WARN_ON(__stack_depot_early_init_passed);
+
+ __stack_depot_early_init_requested = true;
+}
+
+/* Initialize list_head's within the hash table. */
+static void init_stack_table(unsigned long entries)
+{
+ unsigned long i;
+
+ for (i = 0; i < entries; i++)
+ INIT_LIST_HEAD(&stack_table[i]);
+}
+
+/* Allocates a hash table via memblock. Can only be used during early boot. */
+int __init stack_depot_early_init(void)
+{
+ unsigned long entries = 0;
+
+ /* This function must be called only once, from mm_init(). */
+ if (WARN_ON(__stack_depot_early_init_passed))
+ return 0;
+ __stack_depot_early_init_passed = true;
+
/*
- * This smp_load_acquire() pairs with smp_store_release() to
- * |next_slab_inited| below and in depot_alloc_stack().
+ * Print disabled message even if early init has not been requested:
+ * stack_depot_init() will not print one.
*/
- if (smp_load_acquire(&next_slab_inited))
- return true;
- if (stack_slabs[depot_index] == NULL) {
- stack_slabs[depot_index] = *prealloc;
- } else {
- stack_slabs[depot_index + 1] = *prealloc;
+ if (stack_depot_disabled) {
+ pr_info("disabled\n");
+ return 0;
+ }
+
+ /*
+ * If KASAN is enabled, use the maximum order: KASAN is frequently used
+ * in fuzzing scenarios, which leads to a large number of different
+ * stack traces being stored in stack depot.
+ */
+ if (kasan_enabled() && !stack_bucket_number_order)
+ stack_bucket_number_order = STACK_BUCKET_NUMBER_ORDER_MAX;
+
+ /*
+ * Check if early init has been requested after setting
+ * stack_bucket_number_order: stack_depot_init() uses its value.
+ */
+ if (!__stack_depot_early_init_requested)
+ return 0;
+
+ /*
+ * If stack_bucket_number_order is not set, leave entries as 0 to rely
+ * on the automatic calculations performed by alloc_large_system_hash().
+ */
+ if (stack_bucket_number_order)
+ entries = 1UL << stack_bucket_number_order;
+ pr_info("allocating hash table via alloc_large_system_hash\n");
+ stack_table = alloc_large_system_hash("stackdepot",
+ sizeof(struct list_head),
+ entries,
+ STACK_HASH_TABLE_SCALE,
+ HASH_EARLY,
+ NULL,
+ &stack_hash_mask,
+ 1UL << STACK_BUCKET_NUMBER_ORDER_MIN,
+ 1UL << STACK_BUCKET_NUMBER_ORDER_MAX);
+ if (!stack_table) {
+ pr_err("hash table allocation failed, disabling\n");
+ stack_depot_disabled = true;
+ return -ENOMEM;
+ }
+ if (!entries) {
/*
- * This smp_store_release pairs with smp_load_acquire() from
- * |next_slab_inited| above and in depot_save_stack().
+ * Obtain the number of entries that was calculated by
+ * alloc_large_system_hash().
*/
- smp_store_release(&next_slab_inited, 1);
+ entries = stack_hash_mask + 1;
}
- *prealloc = NULL;
+ init_stack_table(entries);
+
+ return 0;
+}
+
+/* Allocates a hash table via kvcalloc. Can be used after boot. */
+int stack_depot_init(void)
+{
+ static DEFINE_MUTEX(stack_depot_init_mutex);
+ unsigned long entries;
+ int ret = 0;
+
+ mutex_lock(&stack_depot_init_mutex);
+
+ if (stack_depot_disabled || stack_table)
+ goto out_unlock;
+
+ /*
+ * Similarly to stack_depot_early_init, use stack_bucket_number_order
+ * if assigned, and rely on automatic scaling otherwise.
+ */
+ if (stack_bucket_number_order) {
+ entries = 1UL << stack_bucket_number_order;
+ } else {
+ int scale = STACK_HASH_TABLE_SCALE;
+
+ entries = nr_free_buffer_pages();
+ entries = roundup_pow_of_two(entries);
+
+ if (scale > PAGE_SHIFT)
+ entries >>= (scale - PAGE_SHIFT);
+ else
+ entries <<= (PAGE_SHIFT - scale);
+ }
+
+ if (entries < 1UL << STACK_BUCKET_NUMBER_ORDER_MIN)
+ entries = 1UL << STACK_BUCKET_NUMBER_ORDER_MIN;
+ if (entries > 1UL << STACK_BUCKET_NUMBER_ORDER_MAX)
+ entries = 1UL << STACK_BUCKET_NUMBER_ORDER_MAX;
+
+ pr_info("allocating hash table of %lu entries via kvcalloc\n", entries);
+ stack_table = kvcalloc(entries, sizeof(struct list_head), GFP_KERNEL);
+ if (!stack_table) {
+ pr_err("hash table allocation failed, disabling\n");
+ stack_depot_disabled = true;
+ ret = -ENOMEM;
+ goto out_unlock;
+ }
+ stack_hash_mask = entries - 1;
+ init_stack_table(entries);
+
+out_unlock:
+ mutex_unlock(&stack_depot_init_mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(stack_depot_init);
+
+/*
+ * Initializes new stack pool, and updates the list of pools.
+ */
+static bool depot_init_pool(void **prealloc)
+{
+ lockdep_assert_held(&pool_lock);
+
+ if (unlikely(pools_num >= DEPOT_MAX_POOLS)) {
+ /* Bail out if we reached the pool limit. */
+ WARN_ON_ONCE(pools_num > DEPOT_MAX_POOLS); /* should never happen */
+ WARN_ON_ONCE(!new_pool); /* to avoid unnecessary pre-allocation */
+ WARN_ONCE(1, "Stack depot reached limit capacity");
+ return false;
+ }
+
+ if (!new_pool && *prealloc) {
+ /* We have preallocated memory, use it. */
+ WRITE_ONCE(new_pool, *prealloc);
+ *prealloc = NULL;
+ }
+
+ if (!new_pool)
+ return false; /* new_pool and *prealloc are NULL */
+
+ /* Save reference to the pool to be used by depot_fetch_stack(). */
+ stack_pools[pools_num] = new_pool;
+
+ /*
+ * Stack depot tries to keep an extra pool allocated even before it runs
+ * out of space in the currently used pool.
+ *
+ * To indicate that a new preallocation is needed new_pool is reset to
+ * NULL; do not reset to NULL if we have reached the maximum number of
+ * pools.
+ */
+ if (pools_num < DEPOT_MAX_POOLS)
+ WRITE_ONCE(new_pool, NULL);
+ else
+ WRITE_ONCE(new_pool, STACK_DEPOT_POISON);
+
+ /* Pairs with concurrent READ_ONCE() in depot_fetch_stack(). */
+ WRITE_ONCE(pools_num, pools_num + 1);
+ ASSERT_EXCLUSIVE_WRITER(pools_num);
+
+ pool_offset = 0;
+
return true;
}
-/* Allocation of a new stack in raw storage */
-static struct stack_record *depot_alloc_stack(unsigned long *entries, int size,
- u32 hash, void **prealloc, gfp_t alloc_flags)
+/* Keeps the preallocated memory to be used for a new stack depot pool. */
+static void depot_keep_new_pool(void **prealloc)
+{
+ lockdep_assert_held(&pool_lock);
+
+ /*
+ * If a new pool is already saved or the maximum number of
+ * pools is reached, do not use the preallocated memory.
+ */
+ if (new_pool)
+ return;
+
+ WRITE_ONCE(new_pool, *prealloc);
+ *prealloc = NULL;
+}
+
+/*
+ * Try to initialize a new stack record from the current pool, a cached pool, or
+ * the current pre-allocation.
+ */
+static struct stack_record *depot_pop_free_pool(void **prealloc, size_t size)
{
- int required_size = offsetof(struct stack_record, entries) +
- sizeof(unsigned long) * size;
struct stack_record *stack;
+ void *current_pool;
+ u32 pool_index;
- required_size = ALIGN(required_size, 1 << STACK_ALLOC_ALIGN);
+ lockdep_assert_held(&pool_lock);
- if (unlikely(depot_offset + required_size > STACK_ALLOC_SIZE)) {
- if (unlikely(depot_index + 1 >= STACK_ALLOC_MAX_SLABS)) {
- WARN_ONCE(1, "Stack depot reached limit capacity");
+ if (pool_offset + size > DEPOT_POOL_SIZE) {
+ if (!depot_init_pool(prealloc))
return NULL;
- }
- depot_index++;
- depot_offset = 0;
+ }
+
+ if (WARN_ON_ONCE(pools_num < 1))
+ return NULL;
+ pool_index = pools_num - 1;
+ current_pool = stack_pools[pool_index];
+ if (WARN_ON_ONCE(!current_pool))
+ return NULL;
+
+ stack = current_pool + pool_offset;
+
+ /* Pre-initialize handle once. */
+ stack->handle.pool_index = pool_index;
+ stack->handle.offset = pool_offset >> DEPOT_STACK_ALIGN;
+ stack->handle.extra = 0;
+ INIT_LIST_HEAD(&stack->hash_list);
+
+ pool_offset += size;
+
+ return stack;
+}
+
+/* Try to find next free usable entry from the freelist. */
+static struct stack_record *depot_pop_free(void)
+{
+ struct stack_record *stack;
+
+ lockdep_assert_held(&pool_lock);
+
+ if (list_empty(&free_stacks))
+ return NULL;
+
+ /*
+ * We maintain the invariant that the elements in front are least
+ * recently used, and are therefore more likely to be associated with an
+ * RCU grace period in the past. Consequently it is sufficient to only
+ * check the first entry.
+ */
+ stack = list_first_entry(&free_stacks, struct stack_record, free_list);
+ if (!poll_state_synchronize_rcu(stack->rcu_state))
+ return NULL;
+
+ list_del(&stack->free_list);
+ counters[DEPOT_COUNTER_FREELIST_SIZE]--;
+
+ return stack;
+}
+
+static inline size_t depot_stack_record_size(struct stack_record *s, unsigned int nr_entries)
+{
+ const size_t used = flex_array_size(s, entries, nr_entries);
+ const size_t unused = sizeof(s->entries) - used;
+
+ WARN_ON_ONCE(sizeof(s->entries) < used);
+
+ return ALIGN(sizeof(struct stack_record) - unused, 1 << DEPOT_STACK_ALIGN);
+}
+
+/* Allocates a new stack in a stack depot pool. */
+static struct stack_record *
+depot_alloc_stack(unsigned long *entries, unsigned int nr_entries, u32 hash, depot_flags_t flags, void **prealloc)
+{
+ struct stack_record *stack = NULL;
+ size_t record_size;
+
+ lockdep_assert_held(&pool_lock);
+
+ /* This should already be checked by public API entry points. */
+ if (WARN_ON_ONCE(!nr_entries))
+ return NULL;
+
+ /* Limit number of saved frames to CONFIG_STACKDEPOT_MAX_FRAMES. */
+ if (nr_entries > CONFIG_STACKDEPOT_MAX_FRAMES)
+ nr_entries = CONFIG_STACKDEPOT_MAX_FRAMES;
+
+ if (flags & STACK_DEPOT_FLAG_GET) {
/*
- * smp_store_release() here pairs with smp_load_acquire() from
- * |next_slab_inited| in depot_save_stack() and
- * init_stack_slab().
+ * Evictable entries have to allocate the max. size so they may
+ * safely be re-used by differently sized allocations.
*/
- if (depot_index + 1 < STACK_ALLOC_MAX_SLABS)
- smp_store_release(&next_slab_inited, 0);
+ record_size = depot_stack_record_size(stack, CONFIG_STACKDEPOT_MAX_FRAMES);
+ stack = depot_pop_free();
+ } else {
+ record_size = depot_stack_record_size(stack, nr_entries);
}
- init_stack_slab(prealloc);
- if (stack_slabs[depot_index] == NULL)
- return NULL;
- stack = stack_slabs[depot_index] + depot_offset;
+ if (!stack) {
+ stack = depot_pop_free_pool(prealloc, record_size);
+ if (!stack)
+ return NULL;
+ }
+ /* Save the stack trace. */
stack->hash = hash;
- stack->size = size;
- stack->handle.slabindex = depot_index;
- stack->handle.offset = depot_offset >> STACK_ALLOC_ALIGN;
- stack->handle.valid = 1;
- memcpy(stack->entries, entries, size * sizeof(unsigned long));
- depot_offset += required_size;
+ stack->size = nr_entries;
+ /* stack->handle is already filled in by depot_pop_free_pool(). */
+ memcpy(stack->entries, entries, flex_array_size(stack, entries, nr_entries));
+
+ if (flags & STACK_DEPOT_FLAG_GET) {
+ refcount_set(&stack->count, 1);
+ counters[DEPOT_COUNTER_REFD_ALLOCS]++;
+ counters[DEPOT_COUNTER_REFD_INUSE]++;
+ } else {
+ /* Warn on attempts to switch to refcounting this entry. */
+ refcount_set(&stack->count, REFCOUNT_SATURATED);
+ counters[DEPOT_COUNTER_PERSIST_COUNT]++;
+ counters[DEPOT_COUNTER_PERSIST_BYTES] += record_size;
+ }
+
+ /*
+ * Let KMSAN know the stored stack record is initialized. This shall
+ * prevent false positive reports if instrumented code accesses it.
+ */
+ kmsan_unpoison_memory(stack, record_size);
return stack;
}
-#define STACK_HASH_ORDER 20
-#define STACK_HASH_SIZE (1L << STACK_HASH_ORDER)
-#define STACK_HASH_MASK (STACK_HASH_SIZE - 1)
-#define STACK_HASH_SEED 0x9747b28c
+static struct stack_record *depot_fetch_stack(depot_stack_handle_t handle)
+{
+ const int pools_num_cached = READ_ONCE(pools_num);
+ union handle_parts parts = { .handle = handle };
+ void *pool;
+ size_t offset = parts.offset << DEPOT_STACK_ALIGN;
+ struct stack_record *stack;
-static struct stack_record *stack_table[STACK_HASH_SIZE] = {
- [0 ... STACK_HASH_SIZE - 1] = NULL
-};
+ lockdep_assert_not_held(&pool_lock);
+
+ if (parts.pool_index > pools_num_cached) {
+ WARN(1, "pool index %d out of bounds (%d) for stack id %08x\n",
+ parts.pool_index, pools_num_cached, handle);
+ return NULL;
+ }
+
+ pool = stack_pools[parts.pool_index];
+ if (WARN_ON(!pool))
+ return NULL;
+
+ stack = pool + offset;
+ if (WARN_ON(!refcount_read(&stack->count)))
+ return NULL;
+
+ return stack;
+}
+
+/* Links stack into the freelist. */
+static void depot_free_stack(struct stack_record *stack)
+{
+ unsigned long flags;
+
+ lockdep_assert_not_held(&pool_lock);
+
+ raw_spin_lock_irqsave(&pool_lock, flags);
+ printk_deferred_enter();
-/* Calculate hash for a stack */
+ /*
+ * Remove the entry from the hash list. Concurrent list traversal may
+ * still observe the entry, but since the refcount is zero, this entry
+ * will no longer be considered as valid.
+ */
+ list_del_rcu(&stack->hash_list);
+
+ /*
+ * Due to being used from constrained contexts such as the allocators,
+ * NMI, or even RCU itself, stack depot cannot rely on primitives that
+ * would sleep (such as synchronize_rcu()) or recursively call into
+ * stack depot again (such as call_rcu()).
+ *
+ * Instead, get an RCU cookie, so that we can ensure this entry isn't
+ * moved onto another list until the next grace period, and concurrent
+ * RCU list traversal remains safe.
+ */
+ stack->rcu_state = get_state_synchronize_rcu();
+
+ /*
+ * Add the entry to the freelist tail, so that older entries are
+ * considered first - their RCU cookie is more likely to no longer be
+ * associated with the current grace period.
+ */
+ list_add_tail(&stack->free_list, &free_stacks);
+
+ counters[DEPOT_COUNTER_FREELIST_SIZE]++;
+ counters[DEPOT_COUNTER_REFD_FREES]++;
+ counters[DEPOT_COUNTER_REFD_INUSE]--;
+
+ printk_deferred_exit();
+ raw_spin_unlock_irqrestore(&pool_lock, flags);
+}
+
+/* Calculates the hash for a stack. */
static inline u32 hash_stack(unsigned long *entries, unsigned int size)
{
return jhash2((u32 *)entries,
- size * sizeof(unsigned long) / sizeof(u32),
- STACK_HASH_SEED);
+ array_size(size, sizeof(*entries)) / sizeof(u32),
+ STACK_HASH_SEED);
}
-/* Use our own, non-instrumented version of memcmp().
- *
- * We actually don't care about the order, just the equality.
+/*
+ * Non-instrumented version of memcmp().
+ * Does not check the lexicographical order, only the equality.
*/
static inline
int stackdepot_memcmp(const unsigned long *u1, const unsigned long *u2,
@@ -169,89 +572,99 @@ int stackdepot_memcmp(const unsigned long *u1, const unsigned long *u2,
return 0;
}
-/* Find a stack that is equal to the one stored in entries in the hash */
-static inline struct stack_record *find_stack(struct stack_record *bucket,
- unsigned long *entries, int size,
- u32 hash)
+/* Finds a stack in a bucket of the hash table. */
+static inline struct stack_record *find_stack(struct list_head *bucket,
+ unsigned long *entries, int size,
+ u32 hash, depot_flags_t flags)
{
- struct stack_record *found;
+ struct stack_record *stack, *ret = NULL;
- for (found = bucket; found; found = found->next) {
- if (found->hash == hash &&
- found->size == size &&
- !stackdepot_memcmp(entries, found->entries, size))
- return found;
+ /*
+ * Stack depot may be used from instrumentation that instruments RCU or
+ * tracing itself; use variant that does not call into RCU and cannot be
+ * traced.
+ *
+ * Note: Such use cases must take care when using refcounting to evict
+ * unused entries, because the stack record free-then-reuse code paths
+ * do call into RCU.
+ */
+ rcu_read_lock_sched_notrace();
+
+ list_for_each_entry_rcu(stack, bucket, hash_list) {
+ if (stack->hash != hash || stack->size != size)
+ continue;
+
+ /*
+ * This may race with depot_free_stack() accessing the freelist
+ * management state unioned with @entries. The refcount is zero
+ * in that case and the below refcount_inc_not_zero() will fail.
+ */
+ if (data_race(stackdepot_memcmp(entries, stack->entries, size)))
+ continue;
+
+ /*
+ * Try to increment refcount. If this succeeds, the stack record
+ * is valid and has not yet been freed.
+ *
+ * If STACK_DEPOT_FLAG_GET is not used, it is undefined behavior
+ * to then call stack_depot_put() later, and we can assume that
+ * a stack record is never placed back on the freelist.
+ */
+ if ((flags & STACK_DEPOT_FLAG_GET) && !refcount_inc_not_zero(&stack->count))
+ continue;
+
+ ret = stack;
+ break;
}
- return NULL;
-}
-/**
- * stack_depot_fetch - Fetch stack entries from a depot
- *
- * @handle: Stack depot handle which was returned from
- * stack_depot_save().
- * @entries: Pointer to store the entries address
- *
- * Return: The number of trace entries for this depot.
- */
-unsigned int stack_depot_fetch(depot_stack_handle_t handle,
- unsigned long **entries)
-{
- union handle_parts parts = { .handle = handle };
- void *slab = stack_slabs[parts.slabindex];
- size_t offset = parts.offset << STACK_ALLOC_ALIGN;
- struct stack_record *stack = slab + offset;
+ rcu_read_unlock_sched_notrace();
- *entries = stack->entries;
- return stack->size;
+ return ret;
}
-EXPORT_SYMBOL_GPL(stack_depot_fetch);
-/**
- * stack_depot_save - Save a stack trace from an array
- *
- * @entries: Pointer to storage array
- * @nr_entries: Size of the storage array
- * @alloc_flags: Allocation gfp flags
- *
- * Return: The handle of the stack struct stored in depot
- */
-depot_stack_handle_t stack_depot_save(unsigned long *entries,
- unsigned int nr_entries,
- gfp_t alloc_flags)
+depot_stack_handle_t stack_depot_save_flags(unsigned long *entries,
+ unsigned int nr_entries,
+ gfp_t alloc_flags,
+ depot_flags_t depot_flags)
{
- struct stack_record *found = NULL, **bucket;
- depot_stack_handle_t retval = 0;
+ struct list_head *bucket;
+ struct stack_record *found = NULL;
+ depot_stack_handle_t handle = 0;
struct page *page = NULL;
void *prealloc = NULL;
+ bool can_alloc = depot_flags & STACK_DEPOT_FLAG_CAN_ALLOC;
unsigned long flags;
u32 hash;
- if (unlikely(nr_entries == 0))
- goto fast_exit;
-
- hash = hash_stack(entries, nr_entries);
- bucket = &stack_table[hash & STACK_HASH_MASK];
+ if (WARN_ON(depot_flags & ~STACK_DEPOT_FLAGS_MASK))
+ return 0;
/*
- * Fast path: look the stack trace up without locking.
- * The smp_load_acquire() here pairs with smp_store_release() to
- * |bucket| below.
+ * If this stack trace is from an interrupt, including anything before
+ * interrupt entry usually leads to unbounded stack depot growth.
+ *
+ * Since use of filter_irq_stacks() is a requirement to ensure stack
+ * depot can efficiently deduplicate interrupt stacks, always
+ * filter_irq_stacks() to simplify all callers' use of stack depot.
*/
- found = find_stack(smp_load_acquire(bucket), entries,
- nr_entries, hash);
+ nr_entries = filter_irq_stacks(entries, nr_entries);
+
+ if (unlikely(nr_entries == 0) || stack_depot_disabled)
+ return 0;
+
+ hash = hash_stack(entries, nr_entries);
+ bucket = &stack_table[hash & stack_hash_mask];
+
+ /* Fast path: look the stack trace up without locking. */
+ found = find_stack(bucket, entries, nr_entries, hash, depot_flags);
if (found)
goto exit;
/*
- * Check if the current or the next stack slab need to be initialized.
- * If so, allocate the memory - we won't be able to do that under the
- * lock.
- *
- * The smp_load_acquire() here pairs with smp_store_release() to
- * |next_slab_inited| in depot_alloc_stack() and init_stack_slab().
+ * Allocate memory for a new pool if required now:
+ * we won't be able to do that under the lock.
*/
- if (unlikely(!smp_load_acquire(&next_slab_inited))) {
+ if (unlikely(can_alloc && !READ_ONCE(new_pool))) {
/*
* Zero out zone modifiers, as we don't have specific zone
* requirements. Keep the flags related to allocation in atomic
@@ -260,44 +673,177 @@ depot_stack_handle_t stack_depot_save(unsigned long *entries,
alloc_flags &= ~GFP_ZONEMASK;
alloc_flags &= (GFP_ATOMIC | GFP_KERNEL);
alloc_flags |= __GFP_NOWARN;
- page = alloc_pages(alloc_flags, STACK_ALLOC_ORDER);
+ page = alloc_pages(alloc_flags, DEPOT_POOL_ORDER);
if (page)
prealloc = page_address(page);
}
- spin_lock_irqsave(&depot_lock, flags);
+ raw_spin_lock_irqsave(&pool_lock, flags);
+ printk_deferred_enter();
- found = find_stack(*bucket, entries, nr_entries, hash);
+ /* Try to find again, to avoid concurrently inserting duplicates. */
+ found = find_stack(bucket, entries, nr_entries, hash, depot_flags);
if (!found) {
struct stack_record *new =
- depot_alloc_stack(entries, nr_entries,
- hash, &prealloc, alloc_flags);
+ depot_alloc_stack(entries, nr_entries, hash, depot_flags, &prealloc);
+
if (new) {
- new->next = *bucket;
/*
- * This smp_store_release() pairs with
- * smp_load_acquire() from |bucket| above.
+ * This releases the stack record into the bucket and
+ * makes it visible to readers in find_stack().
*/
- smp_store_release(bucket, new);
+ list_add_rcu(&new->hash_list, bucket);
found = new;
}
- } else if (prealloc) {
+ }
+
+ if (prealloc) {
/*
- * We didn't need to store this stack trace, but let's keep
- * the preallocated memory for the future.
+ * Either stack depot already contains this stack trace, or
+ * depot_alloc_stack() did not consume the preallocated memory.
+ * Try to keep the preallocated memory for future.
*/
- WARN_ON(!init_stack_slab(&prealloc));
+ depot_keep_new_pool(&prealloc);
}
- spin_unlock_irqrestore(&depot_lock, flags);
+ printk_deferred_exit();
+ raw_spin_unlock_irqrestore(&pool_lock, flags);
exit:
if (prealloc) {
- /* Nobody used this memory, ok to free it. */
- free_pages((unsigned long)prealloc, STACK_ALLOC_ORDER);
+ /* Stack depot didn't use this memory, free it. */
+ free_pages((unsigned long)prealloc, DEPOT_POOL_ORDER);
}
if (found)
- retval = found->handle.handle;
-fast_exit:
- return retval;
+ handle = found->handle.handle;
+ return handle;
+}
+EXPORT_SYMBOL_GPL(stack_depot_save_flags);
+
+depot_stack_handle_t stack_depot_save(unsigned long *entries,
+ unsigned int nr_entries,
+ gfp_t alloc_flags)
+{
+ return stack_depot_save_flags(entries, nr_entries, alloc_flags,
+ STACK_DEPOT_FLAG_CAN_ALLOC);
}
EXPORT_SYMBOL_GPL(stack_depot_save);
+
+unsigned int stack_depot_fetch(depot_stack_handle_t handle,
+ unsigned long **entries)
+{
+ struct stack_record *stack;
+
+ *entries = NULL;
+ /*
+ * Let KMSAN know *entries is initialized. This shall prevent false
+ * positive reports if instrumented code accesses it.
+ */
+ kmsan_unpoison_memory(entries, sizeof(*entries));
+
+ if (!handle || stack_depot_disabled)
+ return 0;
+
+ stack = depot_fetch_stack(handle);
+ /*
+ * Should never be NULL, otherwise this is a use-after-put (or just a
+ * corrupt handle).
+ */
+ if (WARN(!stack, "corrupt handle or use after stack_depot_put()"))
+ return 0;
+
+ *entries = stack->entries;
+ return stack->size;
+}
+EXPORT_SYMBOL_GPL(stack_depot_fetch);
+
+void stack_depot_put(depot_stack_handle_t handle)
+{
+ struct stack_record *stack;
+
+ if (!handle || stack_depot_disabled)
+ return;
+
+ stack = depot_fetch_stack(handle);
+ /*
+ * Should always be able to find the stack record, otherwise this is an
+ * unbalanced put attempt (or corrupt handle).
+ */
+ if (WARN(!stack, "corrupt handle or unbalanced stack_depot_put()"))
+ return;
+
+ if (refcount_dec_and_test(&stack->count))
+ depot_free_stack(stack);
+}
+EXPORT_SYMBOL_GPL(stack_depot_put);
+
+void stack_depot_print(depot_stack_handle_t stack)
+{
+ unsigned long *entries;
+ unsigned int nr_entries;
+
+ nr_entries = stack_depot_fetch(stack, &entries);
+ if (nr_entries > 0)
+ stack_trace_print(entries, nr_entries, 0);
+}
+EXPORT_SYMBOL_GPL(stack_depot_print);
+
+int stack_depot_snprint(depot_stack_handle_t handle, char *buf, size_t size,
+ int spaces)
+{
+ unsigned long *entries;
+ unsigned int nr_entries;
+
+ nr_entries = stack_depot_fetch(handle, &entries);
+ return nr_entries ? stack_trace_snprint(buf, size, entries, nr_entries,
+ spaces) : 0;
+}
+EXPORT_SYMBOL_GPL(stack_depot_snprint);
+
+depot_stack_handle_t __must_check stack_depot_set_extra_bits(
+ depot_stack_handle_t handle, unsigned int extra_bits)
+{
+ union handle_parts parts = { .handle = handle };
+
+ /* Don't set extra bits on empty handles. */
+ if (!handle)
+ return 0;
+
+ parts.extra = extra_bits;
+ return parts.handle;
+}
+EXPORT_SYMBOL(stack_depot_set_extra_bits);
+
+unsigned int stack_depot_get_extra_bits(depot_stack_handle_t handle)
+{
+ union handle_parts parts = { .handle = handle };
+
+ return parts.extra;
+}
+EXPORT_SYMBOL(stack_depot_get_extra_bits);
+
+static int stats_show(struct seq_file *seq, void *v)
+{
+ /*
+ * data race ok: These are just statistics counters, and approximate
+ * statistics are ok for debugging.
+ */
+ seq_printf(seq, "pools: %d\n", data_race(pools_num));
+ for (int i = 0; i < DEPOT_COUNTER_COUNT; i++)
+ seq_printf(seq, "%s: %ld\n", counter_names[i], data_race(counters[i]));
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(stats);
+
+static int depot_debugfs_init(void)
+{
+ struct dentry *dir;
+
+ if (stack_depot_disabled)
+ return 0;
+
+ dir = debugfs_create_dir("stackdepot", NULL);
+ debugfs_create_file("stats", 0444, dir, NULL, &stats_fops);
+ return 0;
+}
+late_initcall(depot_debugfs_init);
diff --git a/lib/stackinit_kunit.c b/lib/stackinit_kunit.c
new file mode 100644
index 000000000000..05947a2feb93
--- /dev/null
+++ b/lib/stackinit_kunit.c
@@ -0,0 +1,461 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Test cases for compiler-based stack variable zeroing via
+ * -ftrivial-auto-var-init={zero,pattern} or CONFIG_GCC_PLUGIN_STRUCTLEAK*.
+ * For example, see:
+ * "Running tests with kunit_tool" at Documentation/dev-tools/kunit/start.rst
+ * ./tools/testing/kunit/kunit.py run stackinit [--raw_output] \
+ * --make_option LLVM=1 \
+ * --kconfig_add CONFIG_INIT_STACK_ALL_ZERO=y
+ *
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <kunit/test.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/string.h>
+
+/* Exfiltration buffer. */
+#define MAX_VAR_SIZE 128
+static u8 check_buf[MAX_VAR_SIZE];
+
+/* Character array to trigger stack protector in all functions. */
+#define VAR_BUFFER 32
+
+/* Volatile mask to convince compiler to copy memory with 0xff. */
+static volatile u8 forced_mask = 0xff;
+
+/* Location and size tracking to validate fill and test are colocated. */
+static void *fill_start, *target_start;
+static size_t fill_size, target_size;
+
+static bool stackinit_range_contains(char *haystack_start, size_t haystack_size,
+ char *needle_start, size_t needle_size)
+{
+ if (needle_start >= haystack_start &&
+ needle_start + needle_size <= haystack_start + haystack_size)
+ return true;
+ return false;
+}
+
+/* Whether the test is expected to fail. */
+#define WANT_SUCCESS 0
+#define XFAIL 1
+
+#define DO_NOTHING_TYPE_SCALAR(var_type) var_type
+#define DO_NOTHING_TYPE_STRING(var_type) void
+#define DO_NOTHING_TYPE_STRUCT(var_type) void
+
+#define DO_NOTHING_RETURN_SCALAR(ptr) *(ptr)
+#define DO_NOTHING_RETURN_STRING(ptr) /**/
+#define DO_NOTHING_RETURN_STRUCT(ptr) /**/
+
+#define DO_NOTHING_CALL_SCALAR(var, name) \
+ (var) = do_nothing_ ## name(&(var))
+#define DO_NOTHING_CALL_STRING(var, name) \
+ do_nothing_ ## name(var)
+#define DO_NOTHING_CALL_STRUCT(var, name) \
+ do_nothing_ ## name(&(var))
+
+#define FETCH_ARG_SCALAR(var) &var
+#define FETCH_ARG_STRING(var) var
+#define FETCH_ARG_STRUCT(var) &var
+
+#define FILL_SIZE_STRING 16
+
+#define INIT_CLONE_SCALAR /**/
+#define INIT_CLONE_STRING [FILL_SIZE_STRING]
+#define INIT_CLONE_STRUCT /**/
+
+#define ZERO_CLONE_SCALAR(zero) memset(&(zero), 0x00, sizeof(zero))
+#define ZERO_CLONE_STRING(zero) memset(&(zero), 0x00, sizeof(zero))
+/*
+ * For the struct, intentionally poison padding to see if it gets
+ * copied out in direct assignments.
+ * */
+#define ZERO_CLONE_STRUCT(zero) \
+ do { \
+ memset(&(zero), 0xFF, sizeof(zero)); \
+ zero.one = 0; \
+ zero.two = 0; \
+ zero.three = 0; \
+ zero.four = 0; \
+ } while (0)
+
+#define INIT_SCALAR_none(var_type) /**/
+#define INIT_SCALAR_zero(var_type) = 0
+
+#define INIT_STRING_none(var_type) [FILL_SIZE_STRING] /**/
+#define INIT_STRING_zero(var_type) [FILL_SIZE_STRING] = { }
+
+#define INIT_STRUCT_none(var_type) /**/
+#define INIT_STRUCT_zero(var_type) = { }
+
+
+#define __static_partial { .two = 0, }
+#define __static_all { .one = 0, \
+ .two = 0, \
+ .three = 0, \
+ .four = 0, \
+ }
+#define __dynamic_partial { .two = arg->two, }
+#define __dynamic_all { .one = arg->one, \
+ .two = arg->two, \
+ .three = arg->three, \
+ .four = arg->four, \
+ }
+#define __runtime_partial var.two = 0
+#define __runtime_all var.one = 0; \
+ var.two = 0; \
+ var.three = 0; \
+ var.four = 0
+
+#define INIT_STRUCT_static_partial(var_type) \
+ = __static_partial
+#define INIT_STRUCT_static_all(var_type) \
+ = __static_all
+#define INIT_STRUCT_dynamic_partial(var_type) \
+ = __dynamic_partial
+#define INIT_STRUCT_dynamic_all(var_type) \
+ = __dynamic_all
+#define INIT_STRUCT_runtime_partial(var_type) \
+ ; __runtime_partial
+#define INIT_STRUCT_runtime_all(var_type) \
+ ; __runtime_all
+
+#define INIT_STRUCT_assigned_static_partial(var_type) \
+ ; var = (var_type)__static_partial
+#define INIT_STRUCT_assigned_static_all(var_type) \
+ ; var = (var_type)__static_all
+#define INIT_STRUCT_assigned_dynamic_partial(var_type) \
+ ; var = (var_type)__dynamic_partial
+#define INIT_STRUCT_assigned_dynamic_all(var_type) \
+ ; var = (var_type)__dynamic_all
+
+#define INIT_STRUCT_assigned_copy(var_type) \
+ ; var = *(arg)
+
+/*
+ * @name: unique string name for the test
+ * @var_type: type to be tested for zeroing initialization
+ * @which: is this a SCALAR, STRING, or STRUCT type?
+ * @init_level: what kind of initialization is performed
+ * @xfail: is this test expected to fail?
+ */
+#define DEFINE_TEST_DRIVER(name, var_type, which, xfail) \
+/* Returns 0 on success, 1 on failure. */ \
+static noinline void test_ ## name (struct kunit *test) \
+{ \
+ var_type zero INIT_CLONE_ ## which; \
+ int ignored; \
+ u8 sum = 0, i; \
+ \
+ /* Notice when a new test is larger than expected. */ \
+ BUILD_BUG_ON(sizeof(zero) > MAX_VAR_SIZE); \
+ \
+ /* Fill clone type with zero for per-field init. */ \
+ ZERO_CLONE_ ## which(zero); \
+ /* Clear entire check buffer for 0xFF overlap test. */ \
+ memset(check_buf, 0x00, sizeof(check_buf)); \
+ /* Fill stack with 0xFF. */ \
+ ignored = leaf_ ##name((unsigned long)&ignored, 1, \
+ FETCH_ARG_ ## which(zero)); \
+ /* Verify all bytes overwritten with 0xFF. */ \
+ for (sum = 0, i = 0; i < target_size; i++) \
+ sum += (check_buf[i] != 0xFF); \
+ KUNIT_ASSERT_EQ_MSG(test, sum, 0, \
+ "leaf fill was not 0xFF!?\n"); \
+ /* Clear entire check buffer for later bit tests. */ \
+ memset(check_buf, 0x00, sizeof(check_buf)); \
+ /* Extract stack-defined variable contents. */ \
+ ignored = leaf_ ##name((unsigned long)&ignored, 0, \
+ FETCH_ARG_ ## which(zero)); \
+ \
+ /* Validate that compiler lined up fill and target. */ \
+ KUNIT_ASSERT_TRUE_MSG(test, \
+ stackinit_range_contains(fill_start, fill_size, \
+ target_start, target_size), \
+ "stack fill missed target!? " \
+ "(fill %zu wide, target offset by %d)\n", \
+ fill_size, \
+ (int)((ssize_t)(uintptr_t)fill_start - \
+ (ssize_t)(uintptr_t)target_start)); \
+ \
+ /* Look for any bytes still 0xFF in check region. */ \
+ for (sum = 0, i = 0; i < target_size; i++) \
+ sum += (check_buf[i] == 0xFF); \
+ \
+ if (sum != 0 && xfail) \
+ kunit_skip(test, \
+ "XFAIL uninit bytes: %d\n", \
+ sum); \
+ KUNIT_ASSERT_EQ_MSG(test, sum, 0, \
+ "uninit bytes: %d\n", sum); \
+}
+#define DEFINE_TEST(name, var_type, which, init_level, xfail) \
+/* no-op to force compiler into ignoring "uninitialized" vars */\
+static noinline DO_NOTHING_TYPE_ ## which(var_type) \
+do_nothing_ ## name(var_type *ptr) \
+{ \
+ /* Will always be true, but compiler doesn't know. */ \
+ if ((unsigned long)ptr > 0x2) \
+ return DO_NOTHING_RETURN_ ## which(ptr); \
+ else \
+ return DO_NOTHING_RETURN_ ## which(ptr + 1); \
+} \
+static noinline int leaf_ ## name(unsigned long sp, bool fill, \
+ var_type *arg) \
+{ \
+ char buf[VAR_BUFFER]; \
+ var_type var \
+ INIT_ ## which ## _ ## init_level(var_type); \
+ \
+ target_start = &var; \
+ target_size = sizeof(var); \
+ /* \
+ * Keep this buffer around to make sure we've got a \
+ * stack frame of SOME kind... \
+ */ \
+ memset(buf, (char)(sp & 0xff), sizeof(buf)); \
+ /* Fill variable with 0xFF. */ \
+ if (fill) { \
+ fill_start = &var; \
+ fill_size = sizeof(var); \
+ memset(fill_start, \
+ (char)((sp & 0xff) | forced_mask), \
+ fill_size); \
+ } \
+ \
+ /* Silence "never initialized" warnings. */ \
+ DO_NOTHING_CALL_ ## which(var, name); \
+ \
+ /* Exfiltrate "var". */ \
+ memcpy(check_buf, target_start, target_size); \
+ \
+ return (int)buf[0] | (int)buf[sizeof(buf) - 1]; \
+} \
+DEFINE_TEST_DRIVER(name, var_type, which, xfail)
+
+/* Structure with no padding. */
+struct test_packed {
+ unsigned long one;
+ unsigned long two;
+ unsigned long three;
+ unsigned long four;
+};
+
+/* Simple structure with padding likely to be covered by compiler. */
+struct test_small_hole {
+ size_t one;
+ char two;
+ /* 3 byte padding hole here. */
+ int three;
+ unsigned long four;
+};
+
+/* Trigger unhandled padding in a structure. */
+struct test_big_hole {
+ u8 one;
+ u8 two;
+ u8 three;
+ /* 61 byte padding hole here. */
+ u8 four __aligned(64);
+} __aligned(64);
+
+struct test_trailing_hole {
+ char *one;
+ char *two;
+ char *three;
+ char four;
+ /* "sizeof(unsigned long) - 1" byte padding hole here. */
+};
+
+/* Test if STRUCTLEAK is clearing structs with __user fields. */
+struct test_user {
+ u8 one;
+ unsigned long two;
+ char __user *three;
+ unsigned long four;
+};
+
+#define ALWAYS_PASS WANT_SUCCESS
+#define ALWAYS_FAIL XFAIL
+
+#ifdef CONFIG_INIT_STACK_NONE
+# define USER_PASS XFAIL
+# define BYREF_PASS XFAIL
+# define STRONG_PASS XFAIL
+#elif defined(CONFIG_GCC_PLUGIN_STRUCTLEAK_USER)
+# define USER_PASS WANT_SUCCESS
+# define BYREF_PASS XFAIL
+# define STRONG_PASS XFAIL
+#elif defined(CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF)
+# define USER_PASS WANT_SUCCESS
+# define BYREF_PASS WANT_SUCCESS
+# define STRONG_PASS XFAIL
+#else
+# define USER_PASS WANT_SUCCESS
+# define BYREF_PASS WANT_SUCCESS
+# define STRONG_PASS WANT_SUCCESS
+#endif
+
+#define DEFINE_SCALAR_TEST(name, init, xfail) \
+ DEFINE_TEST(name ## _ ## init, name, SCALAR, \
+ init, xfail)
+
+#define DEFINE_SCALAR_TESTS(init, xfail) \
+ DEFINE_SCALAR_TEST(u8, init, xfail); \
+ DEFINE_SCALAR_TEST(u16, init, xfail); \
+ DEFINE_SCALAR_TEST(u32, init, xfail); \
+ DEFINE_SCALAR_TEST(u64, init, xfail); \
+ DEFINE_TEST(char_array_ ## init, unsigned char, \
+ STRING, init, xfail)
+
+#define DEFINE_STRUCT_TEST(name, init, xfail) \
+ DEFINE_TEST(name ## _ ## init, \
+ struct test_ ## name, STRUCT, init, \
+ xfail)
+
+#define DEFINE_STRUCT_TESTS(init, xfail) \
+ DEFINE_STRUCT_TEST(small_hole, init, xfail); \
+ DEFINE_STRUCT_TEST(big_hole, init, xfail); \
+ DEFINE_STRUCT_TEST(trailing_hole, init, xfail); \
+ DEFINE_STRUCT_TEST(packed, init, xfail)
+
+#define DEFINE_STRUCT_INITIALIZER_TESTS(base, xfail) \
+ DEFINE_STRUCT_TESTS(base ## _ ## partial, \
+ xfail); \
+ DEFINE_STRUCT_TESTS(base ## _ ## all, xfail)
+
+/* These should be fully initialized all the time! */
+DEFINE_SCALAR_TESTS(zero, ALWAYS_PASS);
+DEFINE_STRUCT_TESTS(zero, ALWAYS_PASS);
+/* Struct initializers: padding may be left uninitialized. */
+DEFINE_STRUCT_INITIALIZER_TESTS(static, STRONG_PASS);
+DEFINE_STRUCT_INITIALIZER_TESTS(dynamic, STRONG_PASS);
+DEFINE_STRUCT_INITIALIZER_TESTS(runtime, STRONG_PASS);
+DEFINE_STRUCT_INITIALIZER_TESTS(assigned_static, STRONG_PASS);
+DEFINE_STRUCT_INITIALIZER_TESTS(assigned_dynamic, STRONG_PASS);
+DEFINE_STRUCT_TESTS(assigned_copy, ALWAYS_FAIL);
+/* No initialization without compiler instrumentation. */
+DEFINE_SCALAR_TESTS(none, STRONG_PASS);
+DEFINE_STRUCT_TESTS(none, BYREF_PASS);
+/* Initialization of members with __user attribute. */
+DEFINE_TEST(user, struct test_user, STRUCT, none, USER_PASS);
+
+/*
+ * Check two uses through a variable declaration outside either path,
+ * which was noticed as a special case in porting earlier stack init
+ * compiler logic.
+ */
+static int noinline __leaf_switch_none(int path, bool fill)
+{
+ switch (path) {
+ /*
+ * This is intentionally unreachable. To silence the
+ * warning, build with -Wno-switch-unreachable
+ */
+ uint64_t var[10];
+
+ case 1:
+ target_start = &var;
+ target_size = sizeof(var);
+ if (fill) {
+ fill_start = &var;
+ fill_size = sizeof(var);
+
+ memset(fill_start, forced_mask | 0x55, fill_size);
+ }
+ memcpy(check_buf, target_start, target_size);
+ break;
+ case 2:
+ target_start = &var;
+ target_size = sizeof(var);
+ if (fill) {
+ fill_start = &var;
+ fill_size = sizeof(var);
+
+ memset(fill_start, forced_mask | 0xaa, fill_size);
+ }
+ memcpy(check_buf, target_start, target_size);
+ break;
+ default:
+ var[1] = 5;
+ return var[1] & forced_mask;
+ }
+ return 0;
+}
+
+static noinline int leaf_switch_1_none(unsigned long sp, bool fill,
+ uint64_t *arg)
+{
+ return __leaf_switch_none(1, fill);
+}
+
+static noinline int leaf_switch_2_none(unsigned long sp, bool fill,
+ uint64_t *arg)
+{
+ return __leaf_switch_none(2, fill);
+}
+
+/*
+ * These are expected to fail for most configurations because neither
+ * GCC nor Clang have a way to perform initialization of variables in
+ * non-code areas (i.e. in a switch statement before the first "case").
+ * https://bugs.llvm.org/show_bug.cgi?id=44916
+ */
+DEFINE_TEST_DRIVER(switch_1_none, uint64_t, SCALAR, ALWAYS_FAIL);
+DEFINE_TEST_DRIVER(switch_2_none, uint64_t, SCALAR, ALWAYS_FAIL);
+
+#define KUNIT_test_scalars(init) \
+ KUNIT_CASE(test_u8_ ## init), \
+ KUNIT_CASE(test_u16_ ## init), \
+ KUNIT_CASE(test_u32_ ## init), \
+ KUNIT_CASE(test_u64_ ## init), \
+ KUNIT_CASE(test_char_array_ ## init)
+
+#define KUNIT_test_structs(init) \
+ KUNIT_CASE(test_small_hole_ ## init), \
+ KUNIT_CASE(test_big_hole_ ## init), \
+ KUNIT_CASE(test_trailing_hole_ ## init),\
+ KUNIT_CASE(test_packed_ ## init) \
+
+static struct kunit_case stackinit_test_cases[] = {
+ /* These are explicitly initialized and should always pass. */
+ KUNIT_test_scalars(zero),
+ KUNIT_test_structs(zero),
+ /* Padding here appears to be accidentally always initialized? */
+ KUNIT_test_structs(dynamic_partial),
+ KUNIT_test_structs(assigned_dynamic_partial),
+ /* Padding initialization depends on compiler behaviors. */
+ KUNIT_test_structs(static_partial),
+ KUNIT_test_structs(static_all),
+ KUNIT_test_structs(dynamic_all),
+ KUNIT_test_structs(runtime_partial),
+ KUNIT_test_structs(runtime_all),
+ KUNIT_test_structs(assigned_static_partial),
+ KUNIT_test_structs(assigned_static_all),
+ KUNIT_test_structs(assigned_dynamic_all),
+ /* Everything fails this since it effectively performs a memcpy(). */
+ KUNIT_test_structs(assigned_copy),
+ /* STRUCTLEAK_BYREF_ALL should cover everything from here down. */
+ KUNIT_test_scalars(none),
+ KUNIT_CASE(test_switch_1_none),
+ KUNIT_CASE(test_switch_2_none),
+ /* STRUCTLEAK_BYREF should cover from here down. */
+ KUNIT_test_structs(none),
+ /* STRUCTLEAK will only cover this. */
+ KUNIT_CASE(test_user),
+ {}
+};
+
+static struct kunit_suite stackinit_test_suite = {
+ .name = "stackinit",
+ .test_cases = stackinit_test_cases,
+};
+
+kunit_test_suites(&stackinit_test_suite);
+
+MODULE_LICENSE("GPL");
diff --git a/lib/strcat_kunit.c b/lib/strcat_kunit.c
new file mode 100644
index 000000000000..e21be95514af
--- /dev/null
+++ b/lib/strcat_kunit.c
@@ -0,0 +1,104 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Kernel module for testing 'strcat' family of functions.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <kunit/test.h>
+#include <linux/string.h>
+
+static volatile int unconst;
+
+static void strcat_test(struct kunit *test)
+{
+ char dest[8];
+
+ /* Destination is terminated. */
+ memset(dest, 0, sizeof(dest));
+ KUNIT_EXPECT_EQ(test, strlen(dest), 0);
+ /* Empty copy does nothing. */
+ KUNIT_EXPECT_TRUE(test, strcat(dest, "") == dest);
+ KUNIT_EXPECT_STREQ(test, dest, "");
+ /* 4 characters copied in, stops at %NUL. */
+ KUNIT_EXPECT_TRUE(test, strcat(dest, "four\000123") == dest);
+ KUNIT_EXPECT_STREQ(test, dest, "four");
+ KUNIT_EXPECT_EQ(test, dest[5], '\0');
+ /* 2 more characters copied in okay. */
+ KUNIT_EXPECT_TRUE(test, strcat(dest, "AB") == dest);
+ KUNIT_EXPECT_STREQ(test, dest, "fourAB");
+}
+
+static void strncat_test(struct kunit *test)
+{
+ char dest[8];
+
+ /* Destination is terminated. */
+ memset(dest, 0, sizeof(dest));
+ KUNIT_EXPECT_EQ(test, strlen(dest), 0);
+ /* Empty copy of size 0 does nothing. */
+ KUNIT_EXPECT_TRUE(test, strncat(dest, "", 0 + unconst) == dest);
+ KUNIT_EXPECT_STREQ(test, dest, "");
+ /* Empty copy of size 1 does nothing too. */
+ KUNIT_EXPECT_TRUE(test, strncat(dest, "", 1 + unconst) == dest);
+ KUNIT_EXPECT_STREQ(test, dest, "");
+ /* Copy of max 0 characters should do nothing. */
+ KUNIT_EXPECT_TRUE(test, strncat(dest, "asdf", 0 + unconst) == dest);
+ KUNIT_EXPECT_STREQ(test, dest, "");
+
+ /* 4 characters copied in, even if max is 8. */
+ KUNIT_EXPECT_TRUE(test, strncat(dest, "four\000123", 8 + unconst) == dest);
+ KUNIT_EXPECT_STREQ(test, dest, "four");
+ KUNIT_EXPECT_EQ(test, dest[5], '\0');
+ KUNIT_EXPECT_EQ(test, dest[6], '\0');
+ /* 2 characters copied in okay, 2 ignored. */
+ KUNIT_EXPECT_TRUE(test, strncat(dest, "ABCD", 2 + unconst) == dest);
+ KUNIT_EXPECT_STREQ(test, dest, "fourAB");
+}
+
+static void strlcat_test(struct kunit *test)
+{
+ char dest[8] = "";
+ int len = sizeof(dest) + unconst;
+
+ /* Destination is terminated. */
+ KUNIT_EXPECT_EQ(test, strlen(dest), 0);
+ /* Empty copy is size 0. */
+ KUNIT_EXPECT_EQ(test, strlcat(dest, "", len), 0);
+ KUNIT_EXPECT_STREQ(test, dest, "");
+ /* Size 1 should keep buffer terminated, report size of source only. */
+ KUNIT_EXPECT_EQ(test, strlcat(dest, "four", 1 + unconst), 4);
+ KUNIT_EXPECT_STREQ(test, dest, "");
+
+ /* 4 characters copied in. */
+ KUNIT_EXPECT_EQ(test, strlcat(dest, "four", len), 4);
+ KUNIT_EXPECT_STREQ(test, dest, "four");
+ /* 2 characters copied in okay, gets to 6 total. */
+ KUNIT_EXPECT_EQ(test, strlcat(dest, "AB", len), 6);
+ KUNIT_EXPECT_STREQ(test, dest, "fourAB");
+ /* 2 characters ignored if max size (7) reached. */
+ KUNIT_EXPECT_EQ(test, strlcat(dest, "CD", 7 + unconst), 8);
+ KUNIT_EXPECT_STREQ(test, dest, "fourAB");
+ /* 1 of 2 characters skipped, now at true max size. */
+ KUNIT_EXPECT_EQ(test, strlcat(dest, "EFG", len), 9);
+ KUNIT_EXPECT_STREQ(test, dest, "fourABE");
+ /* Everything else ignored, now at full size. */
+ KUNIT_EXPECT_EQ(test, strlcat(dest, "1234", len), 11);
+ KUNIT_EXPECT_STREQ(test, dest, "fourABE");
+}
+
+static struct kunit_case strcat_test_cases[] = {
+ KUNIT_CASE(strcat_test),
+ KUNIT_CASE(strncat_test),
+ KUNIT_CASE(strlcat_test),
+ {}
+};
+
+static struct kunit_suite strcat_test_suite = {
+ .name = "strcat",
+ .test_cases = strcat_test_cases,
+};
+
+kunit_test_suite(strcat_test_suite);
+
+MODULE_LICENSE("GPL");
diff --git a/lib/string.c b/lib/string.c
index 6016eb3ac73d..6891d15ce991 100644
--- a/lib/string.c
+++ b/lib/string.c
@@ -6,20 +6,15 @@
*/
/*
- * stupid library routines.. The optimized versions should generally be found
- * as inline code in <asm-xx/string.h>
+ * This file should be used only for "library" routines that may have
+ * alternative implementations on specific architectures (generally
+ * found in <asm-xx/string.h>), or get overloaded by FORTIFY_SOURCE.
+ * (Specifically, this file is built with __NO_FORTIFY.)
*
- * These are buggy as well..
- *
- * * Fri Jun 25 1999, Ingo Oeser <ioe@informatik.tu-chemnitz.de>
- * - Added strsep() which will replace strtok() soon (because strsep() is
- * reentrant and should be faster). Use only strsep() in new code, please.
- *
- * * Sat Feb 09 2002, Jason Thomas <jason@topic.com.au>,
- * Matthew Hawkins <matt@mh.dropbear.id.au>
- * - Kissed strtok() goodbye
+ * Other helper functions should live in string_helpers.c.
*/
+#define __NO_FORTIFY
#include <linux/types.h>
#include <linux/string.h>
#include <linux/ctype.h>
@@ -29,6 +24,7 @@
#include <linux/errno.h>
#include <linux/slab.h>
+#include <asm/unaligned.h>
#include <asm/byteorder.h>
#include <asm/word-at-a-time.h>
#include <asm/page.h>
@@ -80,12 +76,6 @@ EXPORT_SYMBOL(strcasecmp);
#endif
#ifndef __HAVE_ARCH_STRCPY
-/**
- * strcpy - Copy a %NUL terminated string
- * @dest: Where to copy the string to
- * @src: Where to copy the string from
- */
-#undef strcpy
char *strcpy(char *dest, const char *src)
{
char *tmp = dest;
@@ -98,19 +88,6 @@ EXPORT_SYMBOL(strcpy);
#endif
#ifndef __HAVE_ARCH_STRNCPY
-/**
- * strncpy - Copy a length-limited, C-string
- * @dest: Where to copy the string to
- * @src: Where to copy the string from
- * @count: The maximum number of bytes to copy
- *
- * The result is not %NUL-terminated if the source exceeds
- * @count bytes.
- *
- * In the case where the length of @src is less than that of
- * count, the remainder of @dest will be padded with %NUL.
- *
- */
char *strncpy(char *dest, const char *src, size_t count)
{
char *tmp = dest;
@@ -126,63 +103,14 @@ char *strncpy(char *dest, const char *src, size_t count)
EXPORT_SYMBOL(strncpy);
#endif
-#ifndef __HAVE_ARCH_STRLCPY
-/**
- * strlcpy - Copy a C-string into a sized buffer
- * @dest: Where to copy the string to
- * @src: Where to copy the string from
- * @size: size of destination buffer
- *
- * Compatible with ``*BSD``: the result is always a valid
- * NUL-terminated string that fits in the buffer (unless,
- * of course, the buffer size is zero). It does not pad
- * out the result like strncpy() does.
- */
-size_t strlcpy(char *dest, const char *src, size_t size)
-{
- size_t ret = strlen(src);
-
- if (size) {
- size_t len = (ret >= size) ? size - 1 : ret;
- memcpy(dest, src, len);
- dest[len] = '\0';
- }
- return ret;
-}
-EXPORT_SYMBOL(strlcpy);
-#endif
-
#ifndef __HAVE_ARCH_STRSCPY
-/**
- * strscpy - Copy a C-string into a sized buffer
- * @dest: Where to copy the string to
- * @src: Where to copy the string from
- * @count: Size of destination buffer
- *
- * Copy the string, or as much of it as fits, into the dest buffer. The
- * behavior is undefined if the string buffers overlap. The destination
- * buffer is always NUL terminated, unless it's zero-sized.
- *
- * Preferred to strlcpy() since the API doesn't require reading memory
- * from the src string beyond the specified "count" bytes, and since
- * the return value is easier to error-check than strlcpy()'s.
- * In addition, the implementation is robust to the string changing out
- * from underneath it, unlike the current strlcpy() implementation.
- *
- * Preferred to strncpy() since it always returns a valid string, and
- * doesn't unnecessarily force the tail of the destination buffer to be
- * zeroed. If zeroing is desired please use strscpy_pad().
- *
- * Return: The number of characters copied (not including the trailing
- * %NUL) or -E2BIG if the destination buffer wasn't big enough.
- */
ssize_t strscpy(char *dest, const char *src, size_t count)
{
const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
size_t max = count;
long res = 0;
- if (count == 0)
+ if (count == 0 || WARN_ON_ONCE(count > INT_MAX))
return -E2BIG;
#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
@@ -201,6 +129,14 @@ ssize_t strscpy(char *dest, const char *src, size_t count)
max = 0;
#endif
+ /*
+ * read_word_at_a_time() below may read uninitialized bytes after the
+ * trailing zero and use them in comparisons. Disable this optimization
+ * under KMSAN to prevent false positive reports.
+ */
+ if (IS_ENABLED(CONFIG_KMSAN))
+ max = 0;
+
while (max >= sizeof(unsigned long)) {
unsigned long c, data;
@@ -238,45 +174,30 @@ EXPORT_SYMBOL(strscpy);
#endif
/**
- * strscpy_pad() - Copy a C-string into a sized buffer
- * @dest: Where to copy the string to
- * @src: Where to copy the string from
- * @count: Size of destination buffer
- *
- * Copy the string, or as much of it as fits, into the dest buffer. The
- * behavior is undefined if the string buffers overlap. The destination
- * buffer is always %NUL terminated, unless it's zero-sized.
- *
- * If the source string is shorter than the destination buffer, zeros
- * the tail of the destination buffer.
- *
- * For full explanation of why you may want to consider using the
- * 'strscpy' functions please see the function docstring for strscpy().
+ * stpcpy - copy a string from src to dest returning a pointer to the new end
+ * of dest, including src's %NUL-terminator. May overrun dest.
+ * @dest: pointer to end of string being copied into. Must be large enough
+ * to receive copy.
+ * @src: pointer to the beginning of string being copied from. Must not overlap
+ * dest.
*
- * Return: The number of characters copied (not including the trailing
- * %NUL) or -E2BIG if the destination buffer wasn't big enough.
+ * stpcpy differs from strcpy in a key way: the return value is a pointer
+ * to the new %NUL-terminating character in @dest. (For strcpy, the return
+ * value is a pointer to the start of @dest). This interface is considered
+ * unsafe as it doesn't perform bounds checking of the inputs. As such it's
+ * not recommended for usage. Instead, its definition is provided in case
+ * the compiler lowers other libcalls to stpcpy.
*/
-ssize_t strscpy_pad(char *dest, const char *src, size_t count)
+char *stpcpy(char *__restrict__ dest, const char *__restrict__ src);
+char *stpcpy(char *__restrict__ dest, const char *__restrict__ src)
{
- ssize_t written;
-
- written = strscpy(dest, src, count);
- if (written < 0 || written == count - 1)
- return written;
-
- memset(dest + written + 1, 0, count - written - 1);
-
- return written;
+ while ((*dest++ = *src++) != '\0')
+ /* nothing */;
+ return --dest;
}
-EXPORT_SYMBOL(strscpy_pad);
+EXPORT_SYMBOL(stpcpy);
#ifndef __HAVE_ARCH_STRCAT
-/**
- * strcat - Append one %NUL-terminated string to another
- * @dest: The string to be appended to
- * @src: The string to append to it
- */
-#undef strcat
char *strcat(char *dest, const char *src)
{
char *tmp = dest;
@@ -291,15 +212,6 @@ EXPORT_SYMBOL(strcat);
#endif
#ifndef __HAVE_ARCH_STRNCAT
-/**
- * strncat - Append a length-limited, C-string to another
- * @dest: The string to be appended to
- * @src: The string to append to it
- * @count: The maximum numbers of bytes to copy
- *
- * Note that in contrast to strncpy(), strncat() ensures the result is
- * terminated.
- */
char *strncat(char *dest, const char *src, size_t count)
{
char *tmp = dest;
@@ -320,12 +232,6 @@ EXPORT_SYMBOL(strncat);
#endif
#ifndef __HAVE_ARCH_STRLCAT
-/**
- * strlcat - Append a length-limited, C-string to another
- * @dest: The string to be appended to
- * @src: The string to append to it
- * @count: The size of the destination buffer.
- */
size_t strlcat(char *dest, const char *src, size_t count)
{
size_t dsize = strlen(dest);
@@ -339,7 +245,7 @@ size_t strlcat(char *dest, const char *src, size_t count)
count -= dsize;
if (len >= count)
len = count-1;
- memcpy(dest, src, len);
+ __builtin_memcpy(dest, src, len);
dest[len] = 0;
return res;
}
@@ -352,7 +258,6 @@ EXPORT_SYMBOL(strlcat);
* @cs: One string
* @ct: Another string
*/
-#undef strcmp
int strcmp(const char *cs, const char *ct)
{
unsigned char c1, c2;
@@ -400,6 +305,9 @@ EXPORT_SYMBOL(strncmp);
* strchr - Find the first occurrence of a character in a string
* @s: The string to be searched
* @c: The character to search for
+ *
+ * Note that the %NUL-terminator is considered part of the string, and can
+ * be searched for.
*/
char *strchr(const char *s, int c)
{
@@ -429,6 +337,23 @@ char *strchrnul(const char *s, int c)
EXPORT_SYMBOL(strchrnul);
#endif
+/**
+ * strnchrnul - Find and return a character in a length limited string,
+ * or end of string
+ * @s: The string to be searched
+ * @count: The number of characters to be searched
+ * @c: The character to search for
+ *
+ * Returns pointer to the first occurrence of 'c' in s. If c is not found,
+ * then return a pointer to the last character of the string.
+ */
+char *strnchrnul(const char *s, size_t count, int c)
+{
+ while (count-- && *s && *s != (char)c)
+ s++;
+ return (char *)s;
+}
+
#ifndef __HAVE_ARCH_STRRCHR
/**
* strrchr - Find the last occurrence of a character in a string
@@ -453,62 +378,24 @@ EXPORT_SYMBOL(strrchr);
* @s: The string to be searched
* @count: The number of characters to be searched
* @c: The character to search for
+ *
+ * Note that the %NUL-terminator is considered part of the string, and can
+ * be searched for.
*/
char *strnchr(const char *s, size_t count, int c)
{
- for (; count-- && *s != '\0'; ++s)
+ while (count--) {
if (*s == (char)c)
return (char *)s;
+ if (*s++ == '\0')
+ break;
+ }
return NULL;
}
EXPORT_SYMBOL(strnchr);
#endif
-/**
- * skip_spaces - Removes leading whitespace from @str.
- * @str: The string to be stripped.
- *
- * Returns a pointer to the first non-whitespace character in @str.
- */
-char *skip_spaces(const char *str)
-{
- while (isspace(*str))
- ++str;
- return (char *)str;
-}
-EXPORT_SYMBOL(skip_spaces);
-
-/**
- * strim - Removes leading and trailing whitespace from @s.
- * @s: The string to be stripped.
- *
- * Note that the first trailing whitespace is replaced with a %NUL-terminator
- * in the given string @s. Returns a pointer to the first non-whitespace
- * character in @s.
- */
-char *strim(char *s)
-{
- size_t size;
- char *end;
-
- size = strlen(s);
- if (!size)
- return s;
-
- end = s + size - 1;
- while (end >= s && isspace(*end))
- end--;
- *(end + 1) = '\0';
-
- return skip_spaces(s);
-}
-EXPORT_SYMBOL(strim);
-
#ifndef __HAVE_ARCH_STRLEN
-/**
- * strlen - Find the length of a string
- * @s: The string to be sized
- */
size_t strlen(const char *s)
{
const char *sc;
@@ -521,11 +408,6 @@ EXPORT_SYMBOL(strlen);
#endif
#ifndef __HAVE_ARCH_STRNLEN
-/**
- * strnlen - Find the length of a length-limited string
- * @s: The string to be sized
- * @count: The maximum number of bytes to search
- */
size_t strnlen(const char *s, size_t count)
{
const char *sc;
@@ -546,21 +428,13 @@ EXPORT_SYMBOL(strnlen);
size_t strspn(const char *s, const char *accept)
{
const char *p;
- const char *a;
- size_t count = 0;
for (p = s; *p != '\0'; ++p) {
- for (a = accept; *a != '\0'; ++a) {
- if (*p == *a)
- break;
- }
- if (*a == '\0')
- return count;
- ++count;
+ if (!strchr(accept, *p))
+ break;
}
- return count;
+ return p - s;
}
-
EXPORT_SYMBOL(strspn);
#endif
@@ -573,17 +447,12 @@ EXPORT_SYMBOL(strspn);
size_t strcspn(const char *s, const char *reject)
{
const char *p;
- const char *r;
- size_t count = 0;
for (p = s; *p != '\0'; ++p) {
- for (r = reject; *r != '\0'; ++r) {
- if (*p == *r)
- return count;
- }
- ++count;
+ if (strchr(reject, *p))
+ break;
}
- return count;
+ return p - s;
}
EXPORT_SYMBOL(strcspn);
#endif
@@ -596,13 +465,11 @@ EXPORT_SYMBOL(strcspn);
*/
char *strpbrk(const char *cs, const char *ct)
{
- const char *sc1, *sc2;
+ const char *sc;
- for (sc1 = cs; *sc1 != '\0'; ++sc1) {
- for (sc2 = ct; *sc2 != '\0'; ++sc2) {
- if (*sc1 == *sc2)
- return (char *)sc1;
- }
+ for (sc = cs; *sc != '\0'; ++sc) {
+ if (strchr(ct, *sc))
+ return (char *)sc;
}
return NULL;
}
@@ -638,85 +505,6 @@ char *strsep(char **s, const char *ct)
EXPORT_SYMBOL(strsep);
#endif
-/**
- * sysfs_streq - return true if strings are equal, modulo trailing newline
- * @s1: one string
- * @s2: another string
- *
- * This routine returns true iff two strings are equal, treating both
- * NUL and newline-then-NUL as equivalent string terminations. It's
- * geared for use with sysfs input strings, which generally terminate
- * with newlines but are compared against values without newlines.
- */
-bool sysfs_streq(const char *s1, const char *s2)
-{
- while (*s1 && *s1 == *s2) {
- s1++;
- s2++;
- }
-
- if (*s1 == *s2)
- return true;
- if (!*s1 && *s2 == '\n' && !s2[1])
- return true;
- if (*s1 == '\n' && !s1[1] && !*s2)
- return true;
- return false;
-}
-EXPORT_SYMBOL(sysfs_streq);
-
-/**
- * match_string - matches given string in an array
- * @array: array of strings
- * @n: number of strings in the array or -1 for NULL terminated arrays
- * @string: string to match with
- *
- * Return:
- * index of a @string in the @array if matches, or %-EINVAL otherwise.
- */
-int match_string(const char * const *array, size_t n, const char *string)
-{
- int index;
- const char *item;
-
- for (index = 0; index < n; index++) {
- item = array[index];
- if (!item)
- break;
- if (!strcmp(item, string))
- return index;
- }
-
- return -EINVAL;
-}
-EXPORT_SYMBOL(match_string);
-
-/**
- * __sysfs_match_string - matches given string in an array
- * @array: array of strings
- * @n: number of strings in the array or -1 for NULL terminated arrays
- * @str: string to match with
- *
- * Returns index of @str in the @array or -EINVAL, just like match_string().
- * Uses sysfs_streq instead of strcmp for matching.
- */
-int __sysfs_match_string(const char * const *array, size_t n, const char *str)
-{
- const char *item;
- int index;
-
- for (index = 0; index < n; index++) {
- item = array[index];
- if (!item)
- break;
- if (sysfs_streq(item, str))
- return index;
- }
-
- return -EINVAL;
-}
-EXPORT_SYMBOL(__sysfs_match_string);
-
#ifndef __HAVE_ARCH_MEMSET
/**
* memset - Fill a region of memory with the given value
@@ -737,27 +525,6 @@ void *memset(void *s, int c, size_t count)
EXPORT_SYMBOL(memset);
#endif
-/**
- * memzero_explicit - Fill a region of memory (e.g. sensitive
- * keying data) with 0s.
- * @s: Pointer to the start of the area.
- * @count: The size of the area.
- *
- * Note: usually using memset() is just fine (!), but in cases
- * where clearing out _local_ data at the end of a scope is
- * necessary, memzero_explicit() should be used instead in
- * order to prevent the compiler from optimising away zeroing.
- *
- * memzero_explicit() doesn't need an arch-specific version as
- * it just invokes the one of memset() implicitly.
- */
-void memzero_explicit(void *s, size_t count)
-{
- memset(s, 0, count);
- barrier_data(s);
-}
-EXPORT_SYMBOL(memzero_explicit);
-
#ifndef __HAVE_ARCH_MEMSET16
/**
* memset16() - Fill a memory area with a uint16_t
@@ -891,6 +658,21 @@ __visible int memcmp(const void *cs, const void *ct, size_t count)
const unsigned char *su1, *su2;
int res = 0;
+#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+ if (count >= sizeof(unsigned long)) {
+ const unsigned long *u1 = cs;
+ const unsigned long *u2 = ct;
+ do {
+ if (get_unaligned(u1) != get_unaligned(u2))
+ break;
+ u1++;
+ u2++;
+ count -= sizeof(unsigned long);
+ } while (count >= sizeof(unsigned long));
+ cs = u1;
+ ct = u2;
+ }
+#endif
for (su1 = cs, su2 = ct; 0 < count; ++su1, ++su2, count--)
if ((res = *su1 - *su2) != 0)
break;
@@ -911,7 +693,6 @@ EXPORT_SYMBOL(memcmp);
* while this particular implementation is a simple (tail) call to memcmp, do
* not rely on anything but whether the return value is zero or non-zero.
*/
-#undef bcmp
int bcmp(const void *a, const void *b, size_t len)
{
return memcmp(a, b, len);
@@ -934,7 +715,7 @@ void *memscan(void *addr, int c, size_t size)
unsigned char *p = addr;
while (size) {
- if (*p == c)
+ if (*p == (unsigned char)c)
return (void *)p;
p++;
size--;
@@ -1082,27 +863,3 @@ void *memchr_inv(const void *start, int c, size_t bytes)
return check_bytes8(start, value, bytes % 8);
}
EXPORT_SYMBOL(memchr_inv);
-
-/**
- * strreplace - Replace all occurrences of character in string.
- * @s: The string to operate on.
- * @old: The character being replaced.
- * @new: The character @old is replaced with.
- *
- * Returns pointer to the nul byte at the end of @s.
- */
-char *strreplace(char *s, char old, char new)
-{
- for (; *s; ++s)
- if (*s == old)
- *s = new;
- return s;
-}
-EXPORT_SYMBOL(strreplace);
-
-void fortify_panic(const char *name)
-{
- pr_emerg("detected buffer overflow in %s\n", name);
- BUG();
-}
-EXPORT_SYMBOL(fortify_panic);
diff --git a/lib/string_helpers.c b/lib/string_helpers.c
index 4403e1924f73..7713f73e66b0 100644
--- a/lib/string_helpers.c
+++ b/lib/string_helpers.c
@@ -10,6 +10,7 @@
#include <linux/math64.h>
#include <linux/export.h>
#include <linux/ctype.h>
+#include <linux/device.h>
#include <linux/errno.h>
#include <linux/fs.h>
#include <linux/limits.h>
@@ -30,9 +31,11 @@
* giving the size in the required units. @buf should have room for
* at least 9 bytes and will always be zero terminated.
*
+ * Return value: number of characters of output that would have been written
+ * (which may be greater than len, if output was truncated).
*/
-void string_get_size(u64 size, u64 blk_size, const enum string_size_units units,
- char *buf, int len)
+int string_get_size(u64 size, u64 blk_size, const enum string_size_units units,
+ char *buf, int len)
{
static const char *const units_10[] = {
"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"
@@ -125,11 +128,55 @@ void string_get_size(u64 size, u64 blk_size, const enum string_size_units units,
else
unit = units_str[units][i];
- snprintf(buf, len, "%u%s %s", (u32)size,
- tmp, unit);
+ return snprintf(buf, len, "%u%s %s", (u32)size,
+ tmp, unit);
}
EXPORT_SYMBOL(string_get_size);
+/**
+ * parse_int_array_user - Split string into a sequence of integers
+ * @from: The user space buffer to read from
+ * @count: The maximum number of bytes to read
+ * @array: Returned pointer to sequence of integers
+ *
+ * On success @array is allocated and initialized with a sequence of
+ * integers extracted from the @from plus an additional element that
+ * begins the sequence and specifies the integers count.
+ *
+ * Caller takes responsibility for freeing @array when it is no longer
+ * needed.
+ */
+int parse_int_array_user(const char __user *from, size_t count, int **array)
+{
+ int *ints, nints;
+ char *buf;
+ int ret = 0;
+
+ buf = memdup_user_nul(from, count);
+ if (IS_ERR(buf))
+ return PTR_ERR(buf);
+
+ get_options(buf, 0, &nints);
+ if (!nints) {
+ ret = -ENOENT;
+ goto free_buf;
+ }
+
+ ints = kcalloc(nints + 1, sizeof(*ints), GFP_KERNEL);
+ if (!ints) {
+ ret = -ENOMEM;
+ goto free_buf;
+ }
+
+ get_options(buf, nints + 1, ints);
+ *array = ints;
+
+free_buf:
+ kfree(buf);
+ return ret;
+}
+EXPORT_SYMBOL(parse_int_array_user);
+
static bool unescape_space(char **src, char **dst)
{
char *p = *dst, *q = *src;
@@ -231,35 +278,36 @@ static bool unescape_special(char **src, char **dst)
* @src: source buffer (escaped)
* @dst: destination buffer (unescaped)
* @size: size of the destination buffer (0 to unlimit)
- * @flags: combination of the flags (bitwise OR):
- * %UNESCAPE_SPACE:
+ * @flags: combination of the flags.
+ *
+ * Description:
+ * The function unquotes characters in the given string.
+ *
+ * Because the size of the output will be the same as or less than the size of
+ * the input, the transformation may be performed in place.
+ *
+ * Caller must provide valid source and destination pointers. Be aware that
+ * destination buffer will always be NULL-terminated. Source string must be
+ * NULL-terminated as well. The supported flags are::
+ *
+ * UNESCAPE_SPACE:
* '\f' - form feed
* '\n' - new line
* '\r' - carriage return
* '\t' - horizontal tab
* '\v' - vertical tab
- * %UNESCAPE_OCTAL:
+ * UNESCAPE_OCTAL:
* '\NNN' - byte with octal value NNN (1 to 3 digits)
- * %UNESCAPE_HEX:
+ * UNESCAPE_HEX:
* '\xHH' - byte with hexadecimal value HH (1 to 2 digits)
- * %UNESCAPE_SPECIAL:
+ * UNESCAPE_SPECIAL:
* '\"' - double quote
* '\\' - backslash
* '\a' - alert (BEL)
* '\e' - escape
- * %UNESCAPE_ANY:
+ * UNESCAPE_ANY:
* all previous together
*
- * Description:
- * The function unquotes characters in the given string.
- *
- * Because the size of the output will be the same as or less than the size of
- * the input, the transformation may be performed in place.
- *
- * Caller must provide valid source and destination pointers. Be aware that
- * destination buffer will always be NULL-terminated. Source string must be
- * NULL-terminated as well.
- *
* Return:
* The amount of the characters processed to the destination buffer excluding
* trailing '\0' is returned.
@@ -360,6 +408,9 @@ static bool escape_special(unsigned char c, char **dst, char *end)
case '\e':
to = 'e';
break;
+ case '"':
+ to = '"';
+ break;
default:
return false;
}
@@ -441,7 +492,31 @@ static bool escape_hex(unsigned char c, char **dst, char *end)
* @isz: source buffer size
* @dst: destination buffer (escaped)
* @osz: destination buffer size
- * @flags: combination of the flags (bitwise OR):
+ * @flags: combination of the flags
+ * @only: NULL-terminated string containing characters used to limit
+ * the selected escape class. If characters are included in @only
+ * that would not normally be escaped by the classes selected
+ * in @flags, they will be copied to @dst unescaped.
+ *
+ * Description:
+ * The process of escaping byte buffer includes several parts. They are applied
+ * in the following sequence.
+ *
+ * 1. The character is not matched to the one from @only string and thus
+ * must go as-is to the output.
+ * 2. The character is matched to the printable and ASCII classes, if asked,
+ * and in case of match it passes through to the output.
+ * 3. The character is matched to the printable or ASCII class, if asked,
+ * and in case of match it passes through to the output.
+ * 4. The character is checked if it falls into the class given by @flags.
+ * %ESCAPE_OCTAL and %ESCAPE_HEX are going last since they cover any
+ * character. Note that they actually can't go together, otherwise
+ * %ESCAPE_HEX will be ignored.
+ *
+ * Caller must provide valid source and destination pointers. Be aware that
+ * destination buffer will not be NULL-terminated, thus caller have to append
+ * it if needs. The supported flags are::
+ *
* %ESCAPE_SPACE: (special white space, not space itself)
* '\f' - form feed
* '\n' - new line
@@ -449,6 +524,7 @@ static bool escape_hex(unsigned char c, char **dst, char *end)
* '\t' - horizontal tab
* '\v' - vertical tab
* %ESCAPE_SPECIAL:
+ * '\"' - double quote
* '\\' - backslash
* '\a' - alert (BEL)
* '\e' - escape
@@ -459,31 +535,27 @@ static bool escape_hex(unsigned char c, char **dst, char *end)
* %ESCAPE_ANY:
* all previous together
* %ESCAPE_NP:
- * escape only non-printable characters (checked by isprint)
+ * escape only non-printable characters, checked by isprint()
* %ESCAPE_ANY_NP:
* all previous together
* %ESCAPE_HEX:
* '\xHH' - byte with hexadecimal value HH (2 digits)
- * @only: NULL-terminated string containing characters used to limit
- * the selected escape class. If characters are included in @only
- * that would not normally be escaped by the classes selected
- * in @flags, they will be copied to @dst unescaped.
+ * %ESCAPE_NA:
+ * escape only non-ascii characters, checked by isascii()
+ * %ESCAPE_NAP:
+ * escape only non-printable or non-ascii characters
+ * %ESCAPE_APPEND:
+ * append characters from @only to be escaped by the given classes
*
- * Description:
- * The process of escaping byte buffer includes several parts. They are applied
- * in the following sequence.
- * 1. The character is matched to the printable class, if asked, and in
- * case of match it passes through to the output.
- * 2. The character is not matched to the one from @only string and thus
- * must go as-is to the output.
- * 3. The character is checked if it falls into the class given by @flags.
- * %ESCAPE_OCTAL and %ESCAPE_HEX are going last since they cover any
- * character. Note that they actually can't go together, otherwise
- * %ESCAPE_HEX will be ignored.
+ * %ESCAPE_APPEND would help to pass additional characters to the escaped, when
+ * one of %ESCAPE_NP, %ESCAPE_NA, or %ESCAPE_NAP is provided.
*
- * Caller must provide valid source and destination pointers. Be aware that
- * destination buffer will not be NULL-terminated, thus caller have to append
- * it if needs.
+ * One notable caveat, the %ESCAPE_NAP, %ESCAPE_NP and %ESCAPE_NA have the
+ * higher priority than the rest of the flags (%ESCAPE_NAP is the highest).
+ * It doesn't make much sense to use either of them without %ESCAPE_OCTAL
+ * or %ESCAPE_HEX, because they cover most of the other character classes.
+ * %ESCAPE_NAP can utilize %ESCAPE_SPACE or %ESCAPE_SPECIAL in addition to
+ * the above.
*
* Return:
* The total size of the escaped output that would be generated for
@@ -497,41 +569,62 @@ int string_escape_mem(const char *src, size_t isz, char *dst, size_t osz,
char *p = dst;
char *end = p + osz;
bool is_dict = only && *only;
+ bool is_append = flags & ESCAPE_APPEND;
while (isz--) {
unsigned char c = *src++;
+ bool in_dict = is_dict && strchr(only, c);
/*
* Apply rules in the following sequence:
- * - the character is printable, when @flags has
- * %ESCAPE_NP bit set
* - the @only string is supplied and does not contain a
* character under question
+ * - the character is printable and ASCII, when @flags has
+ * %ESCAPE_NAP bit set
+ * - the character is printable, when @flags has
+ * %ESCAPE_NP bit set
+ * - the character is ASCII, when @flags has
+ * %ESCAPE_NA bit set
* - the character doesn't fall into a class of symbols
* defined by given @flags
* In these cases we just pass through a character to the
* output buffer.
+ *
+ * When %ESCAPE_APPEND is passed, the characters from @only
+ * have been excluded from the %ESCAPE_NAP, %ESCAPE_NP, and
+ * %ESCAPE_NA cases.
*/
- if ((flags & ESCAPE_NP && isprint(c)) ||
- (is_dict && !strchr(only, c))) {
- /* do nothing */
- } else {
- if (flags & ESCAPE_SPACE && escape_space(c, &p, end))
- continue;
+ if (!(is_append || in_dict) && is_dict &&
+ escape_passthrough(c, &p, end))
+ continue;
- if (flags & ESCAPE_SPECIAL && escape_special(c, &p, end))
- continue;
+ if (!(is_append && in_dict) && isascii(c) && isprint(c) &&
+ flags & ESCAPE_NAP && escape_passthrough(c, &p, end))
+ continue;
- if (flags & ESCAPE_NULL && escape_null(c, &p, end))
- continue;
+ if (!(is_append && in_dict) && isprint(c) &&
+ flags & ESCAPE_NP && escape_passthrough(c, &p, end))
+ continue;
- /* ESCAPE_OCTAL and ESCAPE_HEX always go last */
- if (flags & ESCAPE_OCTAL && escape_octal(c, &p, end))
- continue;
+ if (!(is_append && in_dict) && isascii(c) &&
+ flags & ESCAPE_NA && escape_passthrough(c, &p, end))
+ continue;
- if (flags & ESCAPE_HEX && escape_hex(c, &p, end))
- continue;
- }
+ if (flags & ESCAPE_SPACE && escape_space(c, &p, end))
+ continue;
+
+ if (flags & ESCAPE_SPECIAL && escape_special(c, &p, end))
+ continue;
+
+ if (flags & ESCAPE_NULL && escape_null(c, &p, end))
+ continue;
+
+ /* ESCAPE_OCTAL and ESCAPE_HEX always go last */
+ if (flags & ESCAPE_OCTAL && escape_octal(c, &p, end))
+ continue;
+
+ if (flags & ESCAPE_HEX && escape_hex(c, &p, end))
+ continue;
escape_passthrough(c, &p, end);
}
@@ -627,3 +720,332 @@ char *kstrdup_quotable_file(struct file *file, gfp_t gfp)
return pathname;
}
EXPORT_SYMBOL_GPL(kstrdup_quotable_file);
+
+/*
+ * Returns duplicate string in which the @old characters are replaced by @new.
+ */
+char *kstrdup_and_replace(const char *src, char old, char new, gfp_t gfp)
+{
+ char *dst;
+
+ dst = kstrdup(src, gfp);
+ if (!dst)
+ return NULL;
+
+ return strreplace(dst, old, new);
+}
+EXPORT_SYMBOL_GPL(kstrdup_and_replace);
+
+/**
+ * kasprintf_strarray - allocate and fill array of sequential strings
+ * @gfp: flags for the slab allocator
+ * @prefix: prefix to be used
+ * @n: amount of lines to be allocated and filled
+ *
+ * Allocates and fills @n strings using pattern "%s-%zu", where prefix
+ * is provided by caller. The caller is responsible to free them with
+ * kfree_strarray() after use.
+ *
+ * Returns array of strings or NULL when memory can't be allocated.
+ */
+char **kasprintf_strarray(gfp_t gfp, const char *prefix, size_t n)
+{
+ char **names;
+ size_t i;
+
+ names = kcalloc(n + 1, sizeof(char *), gfp);
+ if (!names)
+ return NULL;
+
+ for (i = 0; i < n; i++) {
+ names[i] = kasprintf(gfp, "%s-%zu", prefix, i);
+ if (!names[i]) {
+ kfree_strarray(names, i);
+ return NULL;
+ }
+ }
+
+ return names;
+}
+EXPORT_SYMBOL_GPL(kasprintf_strarray);
+
+/**
+ * kfree_strarray - free a number of dynamically allocated strings contained
+ * in an array and the array itself
+ *
+ * @array: Dynamically allocated array of strings to free.
+ * @n: Number of strings (starting from the beginning of the array) to free.
+ *
+ * Passing a non-NULL @array and @n == 0 as well as NULL @array are valid
+ * use-cases. If @array is NULL, the function does nothing.
+ */
+void kfree_strarray(char **array, size_t n)
+{
+ unsigned int i;
+
+ if (!array)
+ return;
+
+ for (i = 0; i < n; i++)
+ kfree(array[i]);
+ kfree(array);
+}
+EXPORT_SYMBOL_GPL(kfree_strarray);
+
+struct strarray {
+ char **array;
+ size_t n;
+};
+
+static void devm_kfree_strarray(struct device *dev, void *res)
+{
+ struct strarray *array = res;
+
+ kfree_strarray(array->array, array->n);
+}
+
+char **devm_kasprintf_strarray(struct device *dev, const char *prefix, size_t n)
+{
+ struct strarray *ptr;
+
+ ptr = devres_alloc(devm_kfree_strarray, sizeof(*ptr), GFP_KERNEL);
+ if (!ptr)
+ return ERR_PTR(-ENOMEM);
+
+ ptr->array = kasprintf_strarray(GFP_KERNEL, prefix, n);
+ if (!ptr->array) {
+ devres_free(ptr);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ ptr->n = n;
+ devres_add(dev, ptr);
+
+ return ptr->array;
+}
+EXPORT_SYMBOL_GPL(devm_kasprintf_strarray);
+
+/**
+ * strscpy_pad() - Copy a C-string into a sized buffer
+ * @dest: Where to copy the string to
+ * @src: Where to copy the string from
+ * @count: Size of destination buffer
+ *
+ * Copy the string, or as much of it as fits, into the dest buffer. The
+ * behavior is undefined if the string buffers overlap. The destination
+ * buffer is always %NUL terminated, unless it's zero-sized.
+ *
+ * If the source string is shorter than the destination buffer, zeros
+ * the tail of the destination buffer.
+ *
+ * For full explanation of why you may want to consider using the
+ * 'strscpy' functions please see the function docstring for strscpy().
+ *
+ * Returns:
+ * * The number of characters copied (not including the trailing %NUL)
+ * * -E2BIG if count is 0 or @src was truncated.
+ */
+ssize_t strscpy_pad(char *dest, const char *src, size_t count)
+{
+ ssize_t written;
+
+ written = strscpy(dest, src, count);
+ if (written < 0 || written == count - 1)
+ return written;
+
+ memset(dest + written + 1, 0, count - written - 1);
+
+ return written;
+}
+EXPORT_SYMBOL(strscpy_pad);
+
+/**
+ * skip_spaces - Removes leading whitespace from @str.
+ * @str: The string to be stripped.
+ *
+ * Returns a pointer to the first non-whitespace character in @str.
+ */
+char *skip_spaces(const char *str)
+{
+ while (isspace(*str))
+ ++str;
+ return (char *)str;
+}
+EXPORT_SYMBOL(skip_spaces);
+
+/**
+ * strim - Removes leading and trailing whitespace from @s.
+ * @s: The string to be stripped.
+ *
+ * Note that the first trailing whitespace is replaced with a %NUL-terminator
+ * in the given string @s. Returns a pointer to the first non-whitespace
+ * character in @s.
+ */
+char *strim(char *s)
+{
+ size_t size;
+ char *end;
+
+ size = strlen(s);
+ if (!size)
+ return s;
+
+ end = s + size - 1;
+ while (end >= s && isspace(*end))
+ end--;
+ *(end + 1) = '\0';
+
+ return skip_spaces(s);
+}
+EXPORT_SYMBOL(strim);
+
+/**
+ * sysfs_streq - return true if strings are equal, modulo trailing newline
+ * @s1: one string
+ * @s2: another string
+ *
+ * This routine returns true iff two strings are equal, treating both
+ * NUL and newline-then-NUL as equivalent string terminations. It's
+ * geared for use with sysfs input strings, which generally terminate
+ * with newlines but are compared against values without newlines.
+ */
+bool sysfs_streq(const char *s1, const char *s2)
+{
+ while (*s1 && *s1 == *s2) {
+ s1++;
+ s2++;
+ }
+
+ if (*s1 == *s2)
+ return true;
+ if (!*s1 && *s2 == '\n' && !s2[1])
+ return true;
+ if (*s1 == '\n' && !s1[1] && !*s2)
+ return true;
+ return false;
+}
+EXPORT_SYMBOL(sysfs_streq);
+
+/**
+ * match_string - matches given string in an array
+ * @array: array of strings
+ * @n: number of strings in the array or -1 for NULL terminated arrays
+ * @string: string to match with
+ *
+ * This routine will look for a string in an array of strings up to the
+ * n-th element in the array or until the first NULL element.
+ *
+ * Historically the value of -1 for @n, was used to search in arrays that
+ * are NULL terminated. However, the function does not make a distinction
+ * when finishing the search: either @n elements have been compared OR
+ * the first NULL element was found.
+ *
+ * Return:
+ * index of a @string in the @array if matches, or %-EINVAL otherwise.
+ */
+int match_string(const char * const *array, size_t n, const char *string)
+{
+ int index;
+ const char *item;
+
+ for (index = 0; index < n; index++) {
+ item = array[index];
+ if (!item)
+ break;
+ if (!strcmp(item, string))
+ return index;
+ }
+
+ return -EINVAL;
+}
+EXPORT_SYMBOL(match_string);
+
+/**
+ * __sysfs_match_string - matches given string in an array
+ * @array: array of strings
+ * @n: number of strings in the array or -1 for NULL terminated arrays
+ * @str: string to match with
+ *
+ * Returns index of @str in the @array or -EINVAL, just like match_string().
+ * Uses sysfs_streq instead of strcmp for matching.
+ *
+ * This routine will look for a string in an array of strings up to the
+ * n-th element in the array or until the first NULL element.
+ *
+ * Historically the value of -1 for @n, was used to search in arrays that
+ * are NULL terminated. However, the function does not make a distinction
+ * when finishing the search: either @n elements have been compared OR
+ * the first NULL element was found.
+ */
+int __sysfs_match_string(const char * const *array, size_t n, const char *str)
+{
+ const char *item;
+ int index;
+
+ for (index = 0; index < n; index++) {
+ item = array[index];
+ if (!item)
+ break;
+ if (sysfs_streq(item, str))
+ return index;
+ }
+
+ return -EINVAL;
+}
+EXPORT_SYMBOL(__sysfs_match_string);
+
+/**
+ * strreplace - Replace all occurrences of character in string.
+ * @str: The string to operate on.
+ * @old: The character being replaced.
+ * @new: The character @old is replaced with.
+ *
+ * Replaces the each @old character with a @new one in the given string @str.
+ *
+ * Return: pointer to the string @str itself.
+ */
+char *strreplace(char *str, char old, char new)
+{
+ char *s = str;
+
+ for (; *s; ++s)
+ if (*s == old)
+ *s = new;
+ return str;
+}
+EXPORT_SYMBOL(strreplace);
+
+/**
+ * memcpy_and_pad - Copy one buffer to another with padding
+ * @dest: Where to copy to
+ * @dest_len: The destination buffer size
+ * @src: Where to copy from
+ * @count: The number of bytes to copy
+ * @pad: Character to use for padding if space is left in destination.
+ */
+void memcpy_and_pad(void *dest, size_t dest_len, const void *src, size_t count,
+ int pad)
+{
+ if (dest_len > count) {
+ memcpy(dest, src, count);
+ memset(dest + count, pad, dest_len - count);
+ } else {
+ memcpy(dest, src, dest_len);
+ }
+}
+EXPORT_SYMBOL(memcpy_and_pad);
+
+#ifdef CONFIG_FORTIFY_SOURCE
+/* These are placeholders for fortify compile-time warnings. */
+void __read_overflow2_field(size_t avail, size_t wanted) { }
+EXPORT_SYMBOL(__read_overflow2_field);
+void __write_overflow_field(size_t avail, size_t wanted) { }
+EXPORT_SYMBOL(__write_overflow_field);
+
+void fortify_panic(const char *name)
+{
+ pr_emerg("detected buffer overflow in %s\n", name);
+ BUG();
+}
+EXPORT_SYMBOL(fortify_panic);
+#endif /* CONFIG_FORTIFY_SOURCE */
diff --git a/lib/strncpy_from_user.c b/lib/strncpy_from_user.c
index 023ba9f3b99f..6432b8c3e431 100644
--- a/lib/strncpy_from_user.c
+++ b/lib/strncpy_from_user.c
@@ -1,11 +1,13 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/compiler.h>
#include <linux/export.h>
+#include <linux/fault-inject-usercopy.h>
#include <linux/kasan-checks.h>
#include <linux/thread_info.h>
#include <linux/uaccess.h>
#include <linux/kernel.h>
#include <linux/errno.h>
+#include <linux/mm.h>
#include <asm/byteorder.h>
#include <asm/word-at-a-time.h>
@@ -23,34 +25,42 @@
* hit it), 'max' is the address space maximum (and we return
* -EFAULT if we hit it).
*/
-static inline long do_strncpy_from_user(char *dst, const char __user *src,
+static __always_inline long do_strncpy_from_user(char *dst, const char __user *src,
unsigned long count, unsigned long max)
{
const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
unsigned long res = 0;
- /*
- * Truncate 'max' to the user-specified limit, so that
- * we only have one limit we need to check in the loop
- */
- if (max > count)
- max = count;
-
if (IS_UNALIGNED(src, dst))
goto byte_at_a_time;
while (max >= sizeof(unsigned long)) {
- unsigned long c, data;
+ unsigned long c, data, mask;
/* Fall back to byte-at-a-time if we get a page fault */
unsafe_get_user(c, (unsigned long __user *)(src+res), byte_at_a_time);
- *(unsigned long *)(dst+res) = c;
+ /*
+ * Note that we mask out the bytes following the NUL. This is
+ * important to do because string oblivious code may read past
+ * the NUL. For those routines, we don't want to give them
+ * potentially random bytes after the NUL in `src`.
+ *
+ * One example of such code is BPF map keys. BPF treats map keys
+ * as an opaque set of bytes. Without the post-NUL mask, any BPF
+ * maps keyed by strings returned from strncpy_from_user() may
+ * have multiple entries for semantically identical strings.
+ */
if (has_zero(c, &data, &constants)) {
data = prep_zero_mask(c, data, &constants);
data = create_zero_mask(data);
+ mask = zero_bytemask(data);
+ *(unsigned long *)(dst+res) = c & mask;
return res + find_zero(data);
}
+
+ *(unsigned long *)(dst+res) = c;
+
res += sizeof(unsigned long);
max -= sizeof(unsigned long);
}
@@ -104,20 +114,30 @@ long strncpy_from_user(char *dst, const char __user *src, long count)
{
unsigned long max_addr, src_addr;
+ might_fault();
+ if (should_fail_usercopy())
+ return -EFAULT;
if (unlikely(count <= 0))
return 0;
- max_addr = user_addr_max();
- src_addr = (unsigned long)src;
+ max_addr = TASK_SIZE_MAX;
+ src_addr = (unsigned long)untagged_addr(src);
if (likely(src_addr < max_addr)) {
unsigned long max = max_addr - src_addr;
long retval;
+ /*
+ * Truncate 'max' to the user-specified limit, so that
+ * we only have one limit we need to check in the loop
+ */
+ if (max > count)
+ max = count;
+
kasan_check_write(dst, count);
check_object_size(dst, count, false);
- if (user_access_begin(src, max)) {
+ if (user_read_access_begin(src, max)) {
retval = do_strncpy_from_user(dst, src, count, max);
- user_access_end();
+ user_read_access_end();
return retval;
}
}
diff --git a/lib/strnlen_user.c b/lib/strnlen_user.c
index 7f2db3fe311f..feeb935a2299 100644
--- a/lib/strnlen_user.c
+++ b/lib/strnlen_user.c
@@ -2,16 +2,11 @@
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/uaccess.h>
+#include <linux/mm.h>
+#include <linux/bitops.h>
#include <asm/word-at-a-time.h>
-/* Set bits in the first 'n' bytes when loaded from memory */
-#ifdef __LITTLE_ENDIAN
-# define aligned_byte_mask(n) ((1ul << 8*(n))-1)
-#else
-# define aligned_byte_mask(n) (~0xfful << (BITS_PER_LONG - 8 - 8*(n)))
-#endif
-
/*
* Do a strnlen, return length of string *with* final '\0'.
* 'count' is the user-supplied count, while 'max' is the
@@ -25,20 +20,13 @@
* if it fits in a aligned 'long'. The caller needs to check
* the return value against "> max".
*/
-static inline long do_strnlen_user(const char __user *src, unsigned long count, unsigned long max)
+static __always_inline long do_strnlen_user(const char __user *src, unsigned long count, unsigned long max)
{
const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
unsigned long align, res = 0;
unsigned long c;
/*
- * Truncate 'max' to the user-specified limit, so that
- * we only have one limit we need to check in the loop
- */
- if (max > count)
- max = count;
-
- /*
* Do everything aligned. But that means that we
* need to also expand the maximum..
*/
@@ -108,15 +96,22 @@ long strnlen_user(const char __user *str, long count)
if (unlikely(count <= 0))
return 0;
- max_addr = user_addr_max();
- src_addr = (unsigned long)str;
+ max_addr = TASK_SIZE_MAX;
+ src_addr = (unsigned long)untagged_addr(str);
if (likely(src_addr < max_addr)) {
unsigned long max = max_addr - src_addr;
long retval;
- if (user_access_begin(str, max)) {
+ /*
+ * Truncate 'max' to the user-specified limit, so that
+ * we only have one limit we need to check in the loop
+ */
+ if (max > count)
+ max = count;
+
+ if (user_read_access_begin(str, max)) {
retval = do_strnlen_user(str, count, max);
- user_access_end();
+ user_read_access_end();
return retval;
}
}
diff --git a/lib/strscpy_kunit.c b/lib/strscpy_kunit.c
new file mode 100644
index 000000000000..a6b6344354ed
--- /dev/null
+++ b/lib/strscpy_kunit.c
@@ -0,0 +1,142 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Kernel module for testing 'strscpy' family of functions.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <kunit/test.h>
+#include <linux/string.h>
+
+/*
+ * tc() - Run a specific test case.
+ * @src: Source string, argument to strscpy_pad()
+ * @count: Size of destination buffer, argument to strscpy_pad()
+ * @expected: Expected return value from call to strscpy_pad()
+ * @terminator: 1 if there should be a terminating null byte 0 otherwise.
+ * @chars: Number of characters from the src string expected to be
+ * written to the dst buffer.
+ * @pad: Number of pad characters expected (in the tail of dst buffer).
+ * (@pad does not include the null terminator byte.)
+ *
+ * Calls strscpy_pad() and verifies the return value and state of the
+ * destination buffer after the call returns.
+ */
+static void tc(struct kunit *test, char *src, int count, int expected,
+ int chars, int terminator, int pad)
+{
+ int nr_bytes_poison;
+ int max_expected;
+ int max_count;
+ int written;
+ char buf[6];
+ int index, i;
+ const char POISON = 'z';
+
+ KUNIT_ASSERT_TRUE_MSG(test, src != NULL,
+ "null source string not supported");
+
+ memset(buf, POISON, sizeof(buf));
+ /* Future proofing test suite, validate args */
+ max_count = sizeof(buf) - 2; /* Space for null and to verify overflow */
+ max_expected = count - 1; /* Space for the null */
+
+ KUNIT_ASSERT_LE_MSG(test, count, max_count,
+ "count (%d) is too big (%d) ... aborting", count, max_count);
+ KUNIT_EXPECT_LE_MSG(test, expected, max_expected,
+ "expected (%d) is bigger than can possibly be returned (%d)",
+ expected, max_expected);
+
+ written = strscpy_pad(buf, src, count);
+ KUNIT_ASSERT_EQ(test, written, expected);
+
+ if (count && written == -E2BIG) {
+ KUNIT_ASSERT_EQ_MSG(test, 0, strncmp(buf, src, count - 1),
+ "buffer state invalid for -E2BIG");
+ KUNIT_ASSERT_EQ_MSG(test, buf[count - 1], '\0',
+ "too big string is not null terminated correctly");
+ }
+
+ for (i = 0; i < chars; i++)
+ KUNIT_ASSERT_EQ_MSG(test, buf[i], src[i],
+ "buf[i]==%c != src[i]==%c", buf[i], src[i]);
+
+ if (terminator)
+ KUNIT_ASSERT_EQ_MSG(test, buf[count - 1], '\0',
+ "string is not null terminated correctly");
+
+ for (i = 0; i < pad; i++) {
+ index = chars + terminator + i;
+ KUNIT_ASSERT_EQ_MSG(test, buf[index], '\0',
+ "padding missing at index: %d", i);
+ }
+
+ nr_bytes_poison = sizeof(buf) - chars - terminator - pad;
+ for (i = 0; i < nr_bytes_poison; i++) {
+ index = sizeof(buf) - 1 - i; /* Check from the end back */
+ KUNIT_ASSERT_EQ_MSG(test, buf[index], POISON,
+ "poison value missing at index: %d", i);
+ }
+}
+
+static void strscpy_test(struct kunit *test)
+{
+ char dest[8];
+
+ /*
+ * tc() uses a destination buffer of size 6 and needs at
+ * least 2 characters spare (one for null and one to check for
+ * overflow). This means we should only call tc() with
+ * strings up to a maximum of 4 characters long and 'count'
+ * should not exceed 4. To test with longer strings increase
+ * the buffer size in tc().
+ */
+
+ /* tc(test, src, count, expected, chars, terminator, pad) */
+ tc(test, "a", 0, -E2BIG, 0, 0, 0);
+ tc(test, "", 0, -E2BIG, 0, 0, 0);
+
+ tc(test, "a", 1, -E2BIG, 0, 1, 0);
+ tc(test, "", 1, 0, 0, 1, 0);
+
+ tc(test, "ab", 2, -E2BIG, 1, 1, 0);
+ tc(test, "a", 2, 1, 1, 1, 0);
+ tc(test, "", 2, 0, 0, 1, 1);
+
+ tc(test, "abc", 3, -E2BIG, 2, 1, 0);
+ tc(test, "ab", 3, 2, 2, 1, 0);
+ tc(test, "a", 3, 1, 1, 1, 1);
+ tc(test, "", 3, 0, 0, 1, 2);
+
+ tc(test, "abcd", 4, -E2BIG, 3, 1, 0);
+ tc(test, "abc", 4, 3, 3, 1, 0);
+ tc(test, "ab", 4, 2, 2, 1, 1);
+ tc(test, "a", 4, 1, 1, 1, 2);
+ tc(test, "", 4, 0, 0, 1, 3);
+
+ /* Compile-time-known source strings. */
+ KUNIT_EXPECT_EQ(test, strscpy(dest, "", ARRAY_SIZE(dest)), 0);
+ KUNIT_EXPECT_EQ(test, strscpy(dest, "", 3), 0);
+ KUNIT_EXPECT_EQ(test, strscpy(dest, "", 1), 0);
+ KUNIT_EXPECT_EQ(test, strscpy(dest, "", 0), -E2BIG);
+ KUNIT_EXPECT_EQ(test, strscpy(dest, "Fixed", ARRAY_SIZE(dest)), 5);
+ KUNIT_EXPECT_EQ(test, strscpy(dest, "Fixed", 3), -E2BIG);
+ KUNIT_EXPECT_EQ(test, strscpy(dest, "Fixed", 1), -E2BIG);
+ KUNIT_EXPECT_EQ(test, strscpy(dest, "Fixed", 0), -E2BIG);
+ KUNIT_EXPECT_EQ(test, strscpy(dest, "This is too long", ARRAY_SIZE(dest)), -E2BIG);
+}
+
+static struct kunit_case strscpy_test_cases[] = {
+ KUNIT_CASE(strscpy_test),
+ {}
+};
+
+static struct kunit_suite strscpy_test_suite = {
+ .name = "strscpy",
+ .test_cases = strscpy_test_cases,
+};
+
+kunit_test_suite(strscpy_test_suite);
+
+MODULE_AUTHOR("Tobin C. Harding <tobin@kernel.org>");
+MODULE_LICENSE("GPL");
diff --git a/lib/syscall.c b/lib/syscall.c
index fb328e7ccb08..006e256d2264 100644
--- a/lib/syscall.c
+++ b/lib/syscall.c
@@ -7,6 +7,7 @@
static int collect_syscall(struct task_struct *target, struct syscall_info *info)
{
+ unsigned long args[6] = { };
struct pt_regs *regs;
if (!try_get_task_stack(target)) {
@@ -27,8 +28,14 @@ static int collect_syscall(struct task_struct *target, struct syscall_info *info
info->data.nr = syscall_get_nr(target, regs);
if (info->data.nr != -1L)
- syscall_get_arguments(target, regs,
- (unsigned long *)&info->data.args[0]);
+ syscall_get_arguments(target, regs, args);
+
+ info->data.args[0] = args[0];
+ info->data.args[1] = args[1];
+ info->data.args[2] = args[2];
+ info->data.args[3] = args[3];
+ info->data.args[4] = args[4];
+ info->data.args[5] = args[5];
put_task_stack(target);
return 0;
@@ -44,7 +51,7 @@ static int collect_syscall(struct task_struct *target, struct syscall_info *info
* .data.instruction_pointer - filled with user PC
*
* If @target is blocked in a system call, returns zero with @info.data.nr
- * set to the the call's number and @info.data.args filled in with its
+ * set to the call's number and @info.data.args filled in with its
* arguments. Registers not used for system call arguments may not be available
* and it is not kosher to use &struct user_regset calls while the system
* call is still in progress. Note we may get this result if @target
@@ -61,13 +68,13 @@ static int collect_syscall(struct task_struct *target, struct syscall_info *info
*/
int task_current_syscall(struct task_struct *target, struct syscall_info *info)
{
- long state;
unsigned long ncsw;
+ unsigned int state;
if (target == current)
return collect_syscall(target, info);
- state = target->state;
+ state = READ_ONCE(target->__state);
if (unlikely(!state))
return -EAGAIN;
diff --git a/lib/test-string_helpers.c b/lib/test-string_helpers.c
index 25b5cbfb7615..9a68849a5d55 100644
--- a/lib/test-string_helpers.c
+++ b/lib/test-string_helpers.c
@@ -19,7 +19,7 @@ static __init bool test_string_check_buf(const char *name, unsigned int flags,
if (q_real == q_test && !memcmp(out_test, out_real, q_test))
return true;
- pr_warn("Test '%s' failed: flags = %u\n", name, flags);
+ pr_warn("Test '%s' failed: flags = %#x\n", name, flags);
print_hex_dump(KERN_WARNING, "Input: ", DUMP_PREFIX_NONE, 16, 1,
in, p, true);
@@ -136,31 +136,31 @@ static const struct test_string_2 escape0[] __initconst = {{
.flags = ESCAPE_SPACE | ESCAPE_HEX,
},{
/* terminator */
- }},
+ }}
},{
.in = "\\h\\\"\a\e\\",
.s1 = {{
- .out = "\\\\h\\\\\"\\a\\e\\\\",
+ .out = "\\\\h\\\\\\\"\\a\\e\\\\",
.flags = ESCAPE_SPECIAL,
},{
- .out = "\\\\\\150\\\\\\042\\a\\e\\\\",
+ .out = "\\\\\\150\\\\\\\"\\a\\e\\\\",
.flags = ESCAPE_SPECIAL | ESCAPE_OCTAL,
},{
- .out = "\\\\\\x68\\\\\\x22\\a\\e\\\\",
+ .out = "\\\\\\x68\\\\\\\"\\a\\e\\\\",
.flags = ESCAPE_SPECIAL | ESCAPE_HEX,
},{
/* terminator */
- }},
+ }}
},{
.in = "\eb \\C\007\"\x90\r]",
.s1 = {{
.out = "\eb \\C\007\"\x90\\r]",
.flags = ESCAPE_SPACE,
},{
- .out = "\\eb \\\\C\\a\"\x90\r]",
+ .out = "\\eb \\\\C\\a\\\"\x90\r]",
.flags = ESCAPE_SPECIAL,
},{
- .out = "\\eb \\\\C\\a\"\x90\\r]",
+ .out = "\\eb \\\\C\\a\\\"\x90\\r]",
.flags = ESCAPE_SPACE | ESCAPE_SPECIAL,
},{
.out = "\\033\\142\\040\\134\\103\\007\\042\\220\\015\\135",
@@ -169,10 +169,10 @@ static const struct test_string_2 escape0[] __initconst = {{
.out = "\\033\\142\\040\\134\\103\\007\\042\\220\\r\\135",
.flags = ESCAPE_SPACE | ESCAPE_OCTAL,
},{
- .out = "\\e\\142\\040\\\\\\103\\a\\042\\220\\015\\135",
+ .out = "\\e\\142\\040\\\\\\103\\a\\\"\\220\\015\\135",
.flags = ESCAPE_SPECIAL | ESCAPE_OCTAL,
},{
- .out = "\\e\\142\\040\\\\\\103\\a\\042\\220\\r\\135",
+ .out = "\\e\\142\\040\\\\\\103\\a\\\"\\220\\r\\135",
.flags = ESCAPE_SPACE | ESCAPE_SPECIAL | ESCAPE_OCTAL,
},{
.out = "\eb \\C\007\"\x90\r]",
@@ -201,12 +201,26 @@ static const struct test_string_2 escape0[] __initconst = {{
.flags = ESCAPE_NP | ESCAPE_HEX,
},{
/* terminator */
- }},
+ }}
+},{
+ .in = "\007 \eb\"\x90\xCF\r",
+ .s1 = {{
+ .out = "\007 \eb\"\\220\\317\r",
+ .flags = ESCAPE_OCTAL | ESCAPE_NA,
+ },{
+ .out = "\007 \eb\"\\x90\\xcf\r",
+ .flags = ESCAPE_HEX | ESCAPE_NA,
+ },{
+ .out = "\007 \eb\"\x90\xCF\r",
+ .flags = ESCAPE_NA,
+ },{
+ /* terminator */
+ }}
},{
/* terminator */
}};
-#define TEST_STRING_2_DICT_1 "b\\ \t\r"
+#define TEST_STRING_2_DICT_1 "b\\ \t\r\xCF"
static const struct test_string_2 escape1[] __initconst = {{
.in = "\f\\ \n\r\t\v",
.s1 = {{
@@ -216,16 +230,40 @@ static const struct test_string_2 escape1[] __initconst = {{
.out = "\f\\x5c\\x20\n\\x0d\\x09\v",
.flags = ESCAPE_HEX,
},{
+ .out = "\f\\134\\040\n\\015\\011\v",
+ .flags = ESCAPE_ANY | ESCAPE_APPEND,
+ },{
+ .out = "\\014\\134\\040\\012\\015\\011\\013",
+ .flags = ESCAPE_OCTAL | ESCAPE_APPEND | ESCAPE_NAP,
+ },{
+ .out = "\\x0c\\x5c\\x20\\x0a\\x0d\\x09\\x0b",
+ .flags = ESCAPE_HEX | ESCAPE_APPEND | ESCAPE_NAP,
+ },{
+ .out = "\f\\134\\040\n\\015\\011\v",
+ .flags = ESCAPE_OCTAL | ESCAPE_APPEND | ESCAPE_NA,
+ },{
+ .out = "\f\\x5c\\x20\n\\x0d\\x09\v",
+ .flags = ESCAPE_HEX | ESCAPE_APPEND | ESCAPE_NA,
+ },{
/* terminator */
- }},
+ }}
},{
- .in = "\\h\\\"\a\e\\",
+ .in = "\\h\\\"\a\xCF\e\\",
.s1 = {{
- .out = "\\134h\\134\"\a\e\\134",
+ .out = "\\134h\\134\"\a\\317\e\\134",
.flags = ESCAPE_OCTAL,
},{
+ .out = "\\134h\\134\"\a\\317\e\\134",
+ .flags = ESCAPE_ANY | ESCAPE_APPEND,
+ },{
+ .out = "\\134h\\134\"\\007\\317\\033\\134",
+ .flags = ESCAPE_OCTAL | ESCAPE_APPEND | ESCAPE_NAP,
+ },{
+ .out = "\\134h\\134\"\a\\317\e\\134",
+ .flags = ESCAPE_OCTAL | ESCAPE_APPEND | ESCAPE_NA,
+ },{
/* terminator */
- }},
+ }}
},{
.in = "\eb \\C\007\"\x90\r]",
.s1 = {{
@@ -233,11 +271,115 @@ static const struct test_string_2 escape1[] __initconst = {{
.flags = ESCAPE_OCTAL,
},{
/* terminator */
- }},
+ }}
+},{
+ .in = "\007 \eb\"\x90\xCF\r",
+ .s1 = {{
+ .out = "\007 \eb\"\x90\xCF\r",
+ .flags = ESCAPE_NA,
+ },{
+ .out = "\007 \eb\"\x90\xCF\r",
+ .flags = ESCAPE_SPACE | ESCAPE_NA,
+ },{
+ .out = "\007 \eb\"\x90\xCF\r",
+ .flags = ESCAPE_SPECIAL | ESCAPE_NA,
+ },{
+ .out = "\007 \eb\"\x90\xCF\r",
+ .flags = ESCAPE_SPACE | ESCAPE_SPECIAL | ESCAPE_NA,
+ },{
+ .out = "\007 \eb\"\x90\\317\r",
+ .flags = ESCAPE_OCTAL | ESCAPE_NA,
+ },{
+ .out = "\007 \eb\"\x90\\317\r",
+ .flags = ESCAPE_SPACE | ESCAPE_OCTAL | ESCAPE_NA,
+ },{
+ .out = "\007 \eb\"\x90\\317\r",
+ .flags = ESCAPE_SPECIAL | ESCAPE_OCTAL | ESCAPE_NA,
+ },{
+ .out = "\007 \eb\"\x90\\317\r",
+ .flags = ESCAPE_ANY | ESCAPE_NA,
+ },{
+ .out = "\007 \eb\"\x90\\xcf\r",
+ .flags = ESCAPE_HEX | ESCAPE_NA,
+ },{
+ .out = "\007 \eb\"\x90\\xcf\r",
+ .flags = ESCAPE_SPACE | ESCAPE_HEX | ESCAPE_NA,
+ },{
+ .out = "\007 \eb\"\x90\\xcf\r",
+ .flags = ESCAPE_SPECIAL | ESCAPE_HEX | ESCAPE_NA,
+ },{
+ .out = "\007 \eb\"\x90\\xcf\r",
+ .flags = ESCAPE_SPACE | ESCAPE_SPECIAL | ESCAPE_HEX | ESCAPE_NA,
+ },{
+ /* terminator */
+ }}
+},{
+ .in = "\007 \eb\"\x90\xCF\r",
+ .s1 = {{
+ .out = "\007 \eb\"\x90\xCF\r",
+ .flags = ESCAPE_NAP,
+ },{
+ .out = "\007 \eb\"\x90\xCF\\r",
+ .flags = ESCAPE_SPACE | ESCAPE_NAP,
+ },{
+ .out = "\007 \eb\"\x90\xCF\r",
+ .flags = ESCAPE_SPECIAL | ESCAPE_NAP,
+ },{
+ .out = "\007 \eb\"\x90\xCF\\r",
+ .flags = ESCAPE_SPACE | ESCAPE_SPECIAL | ESCAPE_NAP,
+ },{
+ .out = "\007 \eb\"\x90\\317\\015",
+ .flags = ESCAPE_OCTAL | ESCAPE_NAP,
+ },{
+ .out = "\007 \eb\"\x90\\317\\r",
+ .flags = ESCAPE_SPACE | ESCAPE_OCTAL | ESCAPE_NAP,
+ },{
+ .out = "\007 \eb\"\x90\\317\\015",
+ .flags = ESCAPE_SPECIAL | ESCAPE_OCTAL | ESCAPE_NAP,
+ },{
+ .out = "\007 \eb\"\x90\\317\r",
+ .flags = ESCAPE_ANY | ESCAPE_NAP,
+ },{
+ .out = "\007 \eb\"\x90\\xcf\\x0d",
+ .flags = ESCAPE_HEX | ESCAPE_NAP,
+ },{
+ .out = "\007 \eb\"\x90\\xcf\\r",
+ .flags = ESCAPE_SPACE | ESCAPE_HEX | ESCAPE_NAP,
+ },{
+ .out = "\007 \eb\"\x90\\xcf\\x0d",
+ .flags = ESCAPE_SPECIAL | ESCAPE_HEX | ESCAPE_NAP,
+ },{
+ .out = "\007 \eb\"\x90\\xcf\\r",
+ .flags = ESCAPE_SPACE | ESCAPE_SPECIAL | ESCAPE_HEX | ESCAPE_NAP,
+ },{
+ /* terminator */
+ }}
},{
/* terminator */
}};
+static const struct test_string strings_upper[] __initconst = {
+ {
+ .in = "abcdefgh1234567890test",
+ .out = "ABCDEFGH1234567890TEST",
+ },
+ {
+ .in = "abCdeFgH1234567890TesT",
+ .out = "ABCDEFGH1234567890TEST",
+ },
+};
+
+static const struct test_string strings_lower[] __initconst = {
+ {
+ .in = "ABCDEFGH1234567890TEST",
+ .out = "abcdefgh1234567890test",
+ },
+ {
+ .in = "abCdeFgH1234567890TesT",
+ .out = "abcdefgh1234567890test",
+ },
+};
+
static __init const char *test_string_find_match(const struct test_string_2 *s2,
unsigned int flags)
{
@@ -268,7 +410,7 @@ test_string_escape_overflow(const char *in, int p, unsigned int flags, const cha
q_real = string_escape_mem(in, p, NULL, 0, flags, esc);
if (q_real != q_test)
- pr_warn("Test '%s' failed: flags = %u, osz = 0, expected %d, got %d\n",
+ pr_warn("Test '%s' failed: flags = %#x, osz = 0, expected %d, got %d\n",
name, flags, q_test, q_real);
}
@@ -293,8 +435,13 @@ static __init void test_string_escape(const char *name,
/* NULL injection */
if (flags & ESCAPE_NULL) {
in[p++] = '\0';
- out_test[q_test++] = '\\';
- out_test[q_test++] = '0';
+ /* '\0' passes isascii() test */
+ if (flags & ESCAPE_NA && !(flags & ESCAPE_APPEND && esc)) {
+ out_test[q_test++] = '\0';
+ } else {
+ out_test[q_test++] = '\\';
+ out_test[q_test++] = '0';
+ }
}
/* Don't try strings that have no output */
@@ -390,27 +537,72 @@ static __init void test_string_get_size(void)
test_string_get_size_one(4096, U64_MAX, "75.6 ZB", "64.0 ZiB");
}
+static void __init test_string_upper_lower(void)
+{
+ char *dst;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(strings_upper); i++) {
+ const char *s = strings_upper[i].in;
+ int len = strlen(strings_upper[i].in) + 1;
+
+ dst = kmalloc(len, GFP_KERNEL);
+ if (!dst)
+ return;
+
+ string_upper(dst, s);
+ if (memcmp(dst, strings_upper[i].out, len)) {
+ pr_warn("Test 'string_upper' failed : expected %s, got %s!\n",
+ strings_upper[i].out, dst);
+ kfree(dst);
+ return;
+ }
+ kfree(dst);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(strings_lower); i++) {
+ const char *s = strings_lower[i].in;
+ int len = strlen(strings_lower[i].in) + 1;
+
+ dst = kmalloc(len, GFP_KERNEL);
+ if (!dst)
+ return;
+
+ string_lower(dst, s);
+ if (memcmp(dst, strings_lower[i].out, len)) {
+ pr_warn("Test 'string_lower failed : : expected %s, got %s!\n",
+ strings_lower[i].out, dst);
+ kfree(dst);
+ return;
+ }
+ kfree(dst);
+ }
+}
+
static int __init test_string_helpers_init(void)
{
unsigned int i;
pr_info("Running tests...\n");
- for (i = 0; i < UNESCAPE_ANY + 1; i++)
+ for (i = 0; i < UNESCAPE_ALL_MASK + 1; i++)
test_string_unescape("unescape", i, false);
test_string_unescape("unescape inplace",
- get_random_int() % (UNESCAPE_ANY + 1), true);
+ get_random_u32_below(UNESCAPE_ALL_MASK + 1), true);
/* Without dictionary */
- for (i = 0; i < (ESCAPE_ANY_NP | ESCAPE_HEX) + 1; i++)
+ for (i = 0; i < ESCAPE_ALL_MASK + 1; i++)
test_string_escape("escape 0", escape0, i, TEST_STRING_2_DICT_0);
/* With dictionary */
- for (i = 0; i < (ESCAPE_ANY_NP | ESCAPE_HEX) + 1; i++)
+ for (i = 0; i < ESCAPE_ALL_MASK + 1; i++)
test_string_escape("escape 1", escape1, i, TEST_STRING_2_DICT_1);
/* Test string_get_size() */
test_string_get_size();
+ /* Test string upper(), string_lower() */
+ test_string_upper_lower();
+
return -EINVAL;
}
module_init(test_string_helpers_init);
diff --git a/lib/test_bitmap.c b/lib/test_bitmap.c
index 51a98f7ee79e..65f22c2578b0 100644
--- a/lib/test_bitmap.c
+++ b/lib/test_bitmap.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Test cases for printf facility.
+ * Test cases for bitmap API.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -16,11 +16,48 @@
#include "../tools/testing/selftests/kselftest_module.h"
-static unsigned total_tests __initdata;
-static unsigned failed_tests __initdata;
+#define EXP1_IN_BITS (sizeof(exp1) * 8)
+
+KSTM_MODULE_GLOBALS();
static char pbl_buffer[PAGE_SIZE] __initdata;
+static char print_buf[PAGE_SIZE * 2] __initdata;
+
+static const unsigned long exp1[] __initconst = {
+ BITMAP_FROM_U64(1),
+ BITMAP_FROM_U64(2),
+ BITMAP_FROM_U64(0x0000ffff),
+ BITMAP_FROM_U64(0xffff0000),
+ BITMAP_FROM_U64(0x55555555),
+ BITMAP_FROM_U64(0xaaaaaaaa),
+ BITMAP_FROM_U64(0x11111111),
+ BITMAP_FROM_U64(0x22222222),
+ BITMAP_FROM_U64(0xffffffff),
+ BITMAP_FROM_U64(0xfffffffe),
+ BITMAP_FROM_U64(0x3333333311111111ULL),
+ BITMAP_FROM_U64(0xffffffff77777777ULL),
+ BITMAP_FROM_U64(0),
+ BITMAP_FROM_U64(0x00008000),
+ BITMAP_FROM_U64(0x80000000),
+};
+
+static const unsigned long exp2[] __initconst = {
+ BITMAP_FROM_U64(0x3333333311111111ULL),
+ BITMAP_FROM_U64(0xffffffff77777777ULL),
+};
+/* Fibonacci sequence */
+static const unsigned long exp2_to_exp3_mask[] __initconst = {
+ BITMAP_FROM_U64(0x008000020020212eULL),
+};
+/* exp3_0_1 = (exp2[0] & ~exp2_to_exp3_mask) | (exp2[1] & exp2_to_exp3_mask) */
+static const unsigned long exp3_0_1[] __initconst = {
+ BITMAP_FROM_U64(0x33b3333311313137ULL),
+};
+/* exp3_1_0 = (exp2[1] & ~exp2_to_exp3_mask) | (exp2[0] & exp2_to_exp3_mask) */
+static const unsigned long exp3_1_0[] __initconst = {
+ BITMAP_FROM_U64(0xff7fffff77575751ULL),
+};
static bool __init
__check_eq_uint(const char *srcfile, unsigned int line,
@@ -92,6 +129,50 @@ __check_eq_u32_array(const char *srcfile, unsigned int line,
return true;
}
+static bool __init __check_eq_clump8(const char *srcfile, unsigned int line,
+ const unsigned int offset,
+ const unsigned int size,
+ const unsigned char *const clump_exp,
+ const unsigned long *const clump)
+{
+ unsigned long exp;
+
+ if (offset >= size) {
+ pr_warn("[%s:%u] bit offset for clump out-of-bounds: expected less than %u, got %u\n",
+ srcfile, line, size, offset);
+ return false;
+ }
+
+ exp = clump_exp[offset / 8];
+ if (!exp) {
+ pr_warn("[%s:%u] bit offset for zero clump: expected nonzero clump, got bit offset %u with clump value 0",
+ srcfile, line, offset);
+ return false;
+ }
+
+ if (*clump != exp) {
+ pr_warn("[%s:%u] expected clump value of 0x%lX, got clump value of 0x%lX",
+ srcfile, line, exp, *clump);
+ return false;
+ }
+
+ return true;
+}
+
+static bool __init
+__check_eq_str(const char *srcfile, unsigned int line,
+ const char *exp_str, const char *str,
+ unsigned int len)
+{
+ bool eq;
+
+ eq = strncmp(exp_str, str, len) == 0;
+ if (!eq)
+ pr_err("[%s:%u] expected %s, got %s\n", srcfile, line, exp_str, str);
+
+ return eq;
+}
+
#define __expect_eq(suffix, ...) \
({ \
int result = 0; \
@@ -108,6 +189,8 @@ __check_eq_u32_array(const char *srcfile, unsigned int line,
#define expect_eq_bitmap(...) __expect_eq(bitmap, ##__VA_ARGS__)
#define expect_eq_pbl(...) __expect_eq(pbl, ##__VA_ARGS__)
#define expect_eq_u32_array(...) __expect_eq(u32_array, ##__VA_ARGS__)
+#define expect_eq_clump8(...) __expect_eq(clump8, ##__VA_ARGS__)
+#define expect_eq_str(...) __expect_eq(str, ##__VA_ARGS__)
static void __init test_zero_clear(void)
{
@@ -138,6 +221,47 @@ static void __init test_zero_clear(void)
expect_eq_pbl("", bmap, 1024);
}
+static void __init test_find_nth_bit(void)
+{
+ unsigned long b, bit, cnt = 0;
+ DECLARE_BITMAP(bmap, 64 * 3);
+
+ bitmap_zero(bmap, 64 * 3);
+ __set_bit(10, bmap);
+ __set_bit(20, bmap);
+ __set_bit(30, bmap);
+ __set_bit(40, bmap);
+ __set_bit(50, bmap);
+ __set_bit(60, bmap);
+ __set_bit(80, bmap);
+ __set_bit(123, bmap);
+
+ expect_eq_uint(10, find_nth_bit(bmap, 64 * 3, 0));
+ expect_eq_uint(20, find_nth_bit(bmap, 64 * 3, 1));
+ expect_eq_uint(30, find_nth_bit(bmap, 64 * 3, 2));
+ expect_eq_uint(40, find_nth_bit(bmap, 64 * 3, 3));
+ expect_eq_uint(50, find_nth_bit(bmap, 64 * 3, 4));
+ expect_eq_uint(60, find_nth_bit(bmap, 64 * 3, 5));
+ expect_eq_uint(80, find_nth_bit(bmap, 64 * 3, 6));
+ expect_eq_uint(123, find_nth_bit(bmap, 64 * 3, 7));
+ expect_eq_uint(64 * 3, find_nth_bit(bmap, 64 * 3, 8));
+
+ expect_eq_uint(10, find_nth_bit(bmap, 64 * 3 - 1, 0));
+ expect_eq_uint(20, find_nth_bit(bmap, 64 * 3 - 1, 1));
+ expect_eq_uint(30, find_nth_bit(bmap, 64 * 3 - 1, 2));
+ expect_eq_uint(40, find_nth_bit(bmap, 64 * 3 - 1, 3));
+ expect_eq_uint(50, find_nth_bit(bmap, 64 * 3 - 1, 4));
+ expect_eq_uint(60, find_nth_bit(bmap, 64 * 3 - 1, 5));
+ expect_eq_uint(80, find_nth_bit(bmap, 64 * 3 - 1, 6));
+ expect_eq_uint(123, find_nth_bit(bmap, 64 * 3 - 1, 7));
+ expect_eq_uint(64 * 3 - 1, find_nth_bit(bmap, 64 * 3 - 1, 8));
+
+ for_each_set_bit(bit, exp1, EXP1_IN_BITS) {
+ b = find_nth_bit(exp1, EXP1_IN_BITS, cnt++);
+ expect_eq_uint(b, bit);
+ }
+}
+
static void __init test_fill_set(void)
{
DECLARE_BITMAP(bmap, 1024);
@@ -206,7 +330,58 @@ static void __init test_copy(void)
expect_eq_pbl("0-108,128-1023", bmap2, 1024);
}
-#define PARSE_TIME 0x1
+static void __init test_bitmap_region(void)
+{
+ int pos, order;
+
+ DECLARE_BITMAP(bmap, 1000);
+
+ bitmap_zero(bmap, 1000);
+
+ for (order = 0; order < 10; order++) {
+ pos = bitmap_find_free_region(bmap, 1000, order);
+ if (order == 0)
+ expect_eq_uint(pos, 0);
+ else
+ expect_eq_uint(pos, order < 9 ? BIT(order) : -ENOMEM);
+ }
+
+ bitmap_release_region(bmap, 0, 0);
+ for (order = 1; order < 9; order++)
+ bitmap_release_region(bmap, BIT(order), order);
+
+ expect_eq_uint(bitmap_weight(bmap, 1000), 0);
+}
+
+#define EXP2_IN_BITS (sizeof(exp2) * 8)
+
+static void __init test_replace(void)
+{
+ unsigned int nbits = 64;
+ unsigned int nlongs = DIV_ROUND_UP(nbits, BITS_PER_LONG);
+ DECLARE_BITMAP(bmap, 1024);
+
+ BUILD_BUG_ON(EXP2_IN_BITS < nbits * 2);
+
+ bitmap_zero(bmap, 1024);
+ bitmap_replace(bmap, &exp2[0 * nlongs], &exp2[1 * nlongs], exp2_to_exp3_mask, nbits);
+ expect_eq_bitmap(bmap, exp3_0_1, nbits);
+
+ bitmap_zero(bmap, 1024);
+ bitmap_replace(bmap, &exp2[1 * nlongs], &exp2[0 * nlongs], exp2_to_exp3_mask, nbits);
+ expect_eq_bitmap(bmap, exp3_1_0, nbits);
+
+ bitmap_fill(bmap, 1024);
+ bitmap_replace(bmap, &exp2[0 * nlongs], &exp2[1 * nlongs], exp2_to_exp3_mask, nbits);
+ expect_eq_bitmap(bmap, exp3_0_1, nbits);
+
+ bitmap_fill(bmap, 1024);
+ bitmap_replace(bmap, &exp2[1 * nlongs], &exp2[0 * nlongs], exp2_to_exp3_mask, nbits);
+ expect_eq_bitmap(bmap, exp3_1_0, nbits);
+}
+
+#define PARSE_TIME 0x1
+#define NO_LEN 0x2
struct test_bitmap_parselist{
const int errno;
@@ -216,63 +391,81 @@ struct test_bitmap_parselist{
const int flags;
};
-static const unsigned long exp[] __initconst = {
- BITMAP_FROM_U64(1),
- BITMAP_FROM_U64(2),
- BITMAP_FROM_U64(0x0000ffff),
- BITMAP_FROM_U64(0xffff0000),
- BITMAP_FROM_U64(0x55555555),
- BITMAP_FROM_U64(0xaaaaaaaa),
- BITMAP_FROM_U64(0x11111111),
- BITMAP_FROM_U64(0x22222222),
- BITMAP_FROM_U64(0xffffffff),
- BITMAP_FROM_U64(0xfffffffe),
- BITMAP_FROM_U64(0x3333333311111111ULL),
- BITMAP_FROM_U64(0xffffffff77777777ULL),
- BITMAP_FROM_U64(0),
-};
-
-static const unsigned long exp2[] __initconst = {
- BITMAP_FROM_U64(0x3333333311111111ULL),
- BITMAP_FROM_U64(0xffffffff77777777ULL)
-};
-
static const struct test_bitmap_parselist parselist_tests[] __initconst = {
#define step (sizeof(u64) / sizeof(unsigned long))
- {0, "0", &exp[0], 8, 0},
- {0, "1", &exp[1 * step], 8, 0},
- {0, "0-15", &exp[2 * step], 32, 0},
- {0, "16-31", &exp[3 * step], 32, 0},
- {0, "0-31:1/2", &exp[4 * step], 32, 0},
- {0, "1-31:1/2", &exp[5 * step], 32, 0},
- {0, "0-31:1/4", &exp[6 * step], 32, 0},
- {0, "1-31:1/4", &exp[7 * step], 32, 0},
- {0, "0-31:4/4", &exp[8 * step], 32, 0},
- {0, "1-31:4/4", &exp[9 * step], 32, 0},
- {0, "0-31:1/4,32-63:2/4", &exp[10 * step], 64, 0},
- {0, "0-31:3/4,32-63:4/4", &exp[11 * step], 64, 0},
- {0, " ,, 0-31:3/4 ,, 32-63:4/4 ,, ", &exp[11 * step], 64, 0},
+ {0, "0", &exp1[0], 8, 0},
+ {0, "1", &exp1[1 * step], 8, 0},
+ {0, "0-15", &exp1[2 * step], 32, 0},
+ {0, "16-31", &exp1[3 * step], 32, 0},
+ {0, "0-31:1/2", &exp1[4 * step], 32, 0},
+ {0, "1-31:1/2", &exp1[5 * step], 32, 0},
+ {0, "0-31:1/4", &exp1[6 * step], 32, 0},
+ {0, "1-31:1/4", &exp1[7 * step], 32, 0},
+ {0, "0-31:4/4", &exp1[8 * step], 32, 0},
+ {0, "1-31:4/4", &exp1[9 * step], 32, 0},
+ {0, "0-31:1/4,32-63:2/4", &exp1[10 * step], 64, 0},
+ {0, "0-31:3/4,32-63:4/4", &exp1[11 * step], 64, 0},
+ {0, " ,, 0-31:3/4 ,, 32-63:4/4 ,, ", &exp1[11 * step], 64, 0},
{0, "0-31:1/4,32-63:2/4,64-95:3/4,96-127:4/4", exp2, 128, 0},
{0, "0-2047:128/256", NULL, 2048, PARSE_TIME},
- {0, "", &exp[12 * step], 8, 0},
- {0, "\n", &exp[12 * step], 8, 0},
- {0, ",, ,, , , ,", &exp[12 * step], 8, 0},
- {0, " , ,, , , ", &exp[12 * step], 8, 0},
- {0, " , ,, , , \n", &exp[12 * step], 8, 0},
+ {0, "", &exp1[12 * step], 8, 0},
+ {0, "\n", &exp1[12 * step], 8, 0},
+ {0, ",, ,, , , ,", &exp1[12 * step], 8, 0},
+ {0, " , ,, , , ", &exp1[12 * step], 8, 0},
+ {0, " , ,, , , \n", &exp1[12 * step], 8, 0},
+
+ {0, "0-0", &exp1[0], 32, 0},
+ {0, "1-1", &exp1[1 * step], 32, 0},
+ {0, "15-15", &exp1[13 * step], 32, 0},
+ {0, "31-31", &exp1[14 * step], 32, 0},
+
+ {0, "0-0:0/1", &exp1[12 * step], 32, 0},
+ {0, "0-0:1/1", &exp1[0], 32, 0},
+ {0, "0-0:1/31", &exp1[0], 32, 0},
+ {0, "0-0:31/31", &exp1[0], 32, 0},
+ {0, "1-1:1/1", &exp1[1 * step], 32, 0},
+ {0, "0-15:16/31", &exp1[2 * step], 32, 0},
+ {0, "15-15:1/2", &exp1[13 * step], 32, 0},
+ {0, "15-15:31/31", &exp1[13 * step], 32, 0},
+ {0, "15-31:1/31", &exp1[13 * step], 32, 0},
+ {0, "16-31:16/31", &exp1[3 * step], 32, 0},
+ {0, "31-31:31/31", &exp1[14 * step], 32, 0},
+
+ {0, "N-N", &exp1[14 * step], 32, 0},
+ {0, "0-0:1/N", &exp1[0], 32, 0},
+ {0, "0-0:N/N", &exp1[0], 32, 0},
+ {0, "0-15:16/N", &exp1[2 * step], 32, 0},
+ {0, "15-15:N/N", &exp1[13 * step], 32, 0},
+ {0, "15-N:1/N", &exp1[13 * step], 32, 0},
+ {0, "16-N:16/N", &exp1[3 * step], 32, 0},
+ {0, "N-N:N/N", &exp1[14 * step], 32, 0},
+
+ {0, "0-N:1/3,1-N:1/3,2-N:1/3", &exp1[8 * step], 32, 0},
+ {0, "0-31:1/3,1-31:1/3,2-31:1/3", &exp1[8 * step], 32, 0},
+ {0, "1-10:8/12,8-31:24/29,0-31:0/3", &exp1[9 * step], 32, 0},
+
+ {0, "all", &exp1[8 * step], 32, 0},
+ {0, "0, 1, all, ", &exp1[8 * step], 32, 0},
+ {0, "all:1/2", &exp1[4 * step], 32, 0},
+ {0, "ALL:1/2", &exp1[4 * step], 32, 0},
+ {-EINVAL, "al", NULL, 8, 0},
+ {-EINVAL, "alll", NULL, 8, 0},
{-EINVAL, "-1", NULL, 8, 0},
{-EINVAL, "-0", NULL, 8, 0},
{-EINVAL, "10-1", NULL, 8, 0},
- {-EINVAL, "0-31:", NULL, 8, 0},
- {-EINVAL, "0-31:0", NULL, 8, 0},
- {-EINVAL, "0-31:0/", NULL, 8, 0},
- {-EINVAL, "0-31:0/0", NULL, 8, 0},
- {-EINVAL, "0-31:1/0", NULL, 8, 0},
- {-EINVAL, "0-31:10/1", NULL, 8, 0},
+ {-ERANGE, "8-8", NULL, 8, 0},
+ {-ERANGE, "0-31", NULL, 8, 0},
+ {-EINVAL, "0-31:", NULL, 32, 0},
+ {-EINVAL, "0-31:0", NULL, 32, 0},
+ {-EINVAL, "0-31:0/", NULL, 32, 0},
+ {-EINVAL, "0-31:0/0", NULL, 32, 0},
+ {-EINVAL, "0-31:1/0", NULL, 32, 0},
+ {-EINVAL, "0-31:10/1", NULL, 32, 0},
{-EOVERFLOW, "0-98765432123456789:10/1", NULL, 8, 0},
{-EINVAL, "a-31", NULL, 8, 0},
@@ -280,93 +473,224 @@ static const struct test_bitmap_parselist parselist_tests[] __initconst = {
{-EINVAL, "a-31:10/1", NULL, 8, 0},
{-EINVAL, "0-31:a/1", NULL, 8, 0},
{-EINVAL, "0-\n", NULL, 8, 0},
+
};
-static void __init __test_bitmap_parselist(int is_user)
+static void __init test_bitmap_parselist(void)
{
int i;
int err;
ktime_t time;
DECLARE_BITMAP(bmap, 2048);
- char *mode = is_user ? "_user" : "";
for (i = 0; i < ARRAY_SIZE(parselist_tests); i++) {
#define ptest parselist_tests[i]
- if (is_user) {
- mm_segment_t orig_fs = get_fs();
- size_t len = strlen(ptest.in);
-
- set_fs(KERNEL_DS);
- time = ktime_get();
- err = bitmap_parselist_user(ptest.in, len,
- bmap, ptest.nbits);
- time = ktime_get() - time;
- set_fs(orig_fs);
- } else {
- time = ktime_get();
- err = bitmap_parselist(ptest.in, bmap, ptest.nbits);
- time = ktime_get() - time;
- }
+ time = ktime_get();
+ err = bitmap_parselist(ptest.in, bmap, ptest.nbits);
+ time = ktime_get() - time;
if (err != ptest.errno) {
- pr_err("parselist%s: %d: input is %s, errno is %d, expected %d\n",
- mode, i, ptest.in, err, ptest.errno);
+ pr_err("parselist: %d: input is %s, errno is %d, expected %d\n",
+ i, ptest.in, err, ptest.errno);
+ failed_tests++;
continue;
}
if (!err && ptest.expected
&& !__bitmap_equal(bmap, ptest.expected, ptest.nbits)) {
- pr_err("parselist%s: %d: input is %s, result is 0x%lx, expected 0x%lx\n",
- mode, i, ptest.in, bmap[0],
+ pr_err("parselist: %d: input is %s, result is 0x%lx, expected 0x%lx\n",
+ i, ptest.in, bmap[0],
*ptest.expected);
+ failed_tests++;
continue;
}
if (ptest.flags & PARSE_TIME)
- pr_err("parselist%s: %d: input is '%s' OK, Time: %llu\n",
- mode, i, ptest.in, time);
+ pr_err("parselist: %d: input is '%s' OK, Time: %llu\n",
+ i, ptest.in, time);
+
+#undef ptest
}
}
-static void __init test_bitmap_parselist(void)
+static void __init test_bitmap_printlist(void)
{
- __test_bitmap_parselist(0);
+ unsigned long *bmap = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ char *buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ char expected[256];
+ int ret, slen;
+ ktime_t time;
+
+ if (!buf || !bmap)
+ goto out;
+
+ memset(bmap, -1, PAGE_SIZE);
+ slen = snprintf(expected, 256, "0-%ld", PAGE_SIZE * 8 - 1);
+ if (slen < 0)
+ goto out;
+
+ time = ktime_get();
+ ret = bitmap_print_to_pagebuf(true, buf, bmap, PAGE_SIZE * 8);
+ time = ktime_get() - time;
+
+ if (ret != slen + 1) {
+ pr_err("bitmap_print_to_pagebuf: result is %d, expected %d\n", ret, slen);
+ failed_tests++;
+ goto out;
+ }
+
+ if (strncmp(buf, expected, slen)) {
+ pr_err("bitmap_print_to_pagebuf: result is %s, expected %s\n", buf, expected);
+ failed_tests++;
+ goto out;
+ }
+
+ pr_err("bitmap_print_to_pagebuf: input is '%s', Time: %llu\n", buf, time);
+out:
+ kfree(buf);
+ kfree(bmap);
}
-static void __init test_bitmap_parselist_user(void)
+static const unsigned long parse_test[] __initconst = {
+ BITMAP_FROM_U64(0),
+ BITMAP_FROM_U64(1),
+ BITMAP_FROM_U64(0xdeadbeef),
+ BITMAP_FROM_U64(0x100000000ULL),
+};
+
+static const unsigned long parse_test2[] __initconst = {
+ BITMAP_FROM_U64(0x100000000ULL), BITMAP_FROM_U64(0xdeadbeef),
+ BITMAP_FROM_U64(0x100000000ULL), BITMAP_FROM_U64(0xbaadf00ddeadbeef),
+ BITMAP_FROM_U64(0x100000000ULL), BITMAP_FROM_U64(0x0badf00ddeadbeef),
+};
+
+static const struct test_bitmap_parselist parse_tests[] __initconst = {
+ {0, "", &parse_test[0 * step], 32, 0},
+ {0, " ", &parse_test[0 * step], 32, 0},
+ {0, "0", &parse_test[0 * step], 32, 0},
+ {0, "0\n", &parse_test[0 * step], 32, 0},
+ {0, "1", &parse_test[1 * step], 32, 0},
+ {0, "deadbeef", &parse_test[2 * step], 32, 0},
+ {0, "1,0", &parse_test[3 * step], 33, 0},
+ {0, "deadbeef,\n,0,1", &parse_test[2 * step], 96, 0},
+
+ {0, "deadbeef,1,0", &parse_test2[0 * 2 * step], 96, 0},
+ {0, "baadf00d,deadbeef,1,0", &parse_test2[1 * 2 * step], 128, 0},
+ {0, "badf00d,deadbeef,1,0", &parse_test2[2 * 2 * step], 124, 0},
+ {0, "badf00d,deadbeef,1,0", &parse_test2[2 * 2 * step], 124, NO_LEN},
+ {0, " badf00d,deadbeef,1,0 ", &parse_test2[2 * 2 * step], 124, 0},
+ {0, " , badf00d,deadbeef,1,0 , ", &parse_test2[2 * 2 * step], 124, 0},
+ {0, " , badf00d, ,, ,,deadbeef,1,0 , ", &parse_test2[2 * 2 * step], 124, 0},
+
+ {-EINVAL, "goodfood,deadbeef,1,0", NULL, 128, 0},
+ {-EOVERFLOW, "3,0", NULL, 33, 0},
+ {-EOVERFLOW, "123badf00d,deadbeef,1,0", NULL, 128, 0},
+ {-EOVERFLOW, "badf00d,deadbeef,1,0", NULL, 90, 0},
+ {-EOVERFLOW, "fbadf00d,deadbeef,1,0", NULL, 95, 0},
+ {-EOVERFLOW, "badf00d,deadbeef,1,0", NULL, 100, 0},
+#undef step
+};
+
+static void __init test_bitmap_parse(void)
{
- __test_bitmap_parselist(1);
-}
+ int i;
+ int err;
+ ktime_t time;
+ DECLARE_BITMAP(bmap, 2048);
+
+ for (i = 0; i < ARRAY_SIZE(parse_tests); i++) {
+ struct test_bitmap_parselist test = parse_tests[i];
+ size_t len = test.flags & NO_LEN ? UINT_MAX : strlen(test.in);
-#define EXP_BYTES (sizeof(exp) * 8)
+ time = ktime_get();
+ err = bitmap_parse(test.in, len, bmap, test.nbits);
+ time = ktime_get() - time;
+
+ if (err != test.errno) {
+ pr_err("parse: %d: input is %s, errno is %d, expected %d\n",
+ i, test.in, err, test.errno);
+ failed_tests++;
+ continue;
+ }
+
+ if (!err && test.expected
+ && !__bitmap_equal(bmap, test.expected, test.nbits)) {
+ pr_err("parse: %d: input is %s, result is 0x%lx, expected 0x%lx\n",
+ i, test.in, bmap[0],
+ *test.expected);
+ failed_tests++;
+ continue;
+ }
+
+ if (test.flags & PARSE_TIME)
+ pr_err("parse: %d: input is '%s' OK, Time: %llu\n",
+ i, test.in, time);
+ }
+}
static void __init test_bitmap_arr32(void)
{
unsigned int nbits, next_bit;
- u32 arr[sizeof(exp) / 4];
- DECLARE_BITMAP(bmap2, EXP_BYTES);
+ u32 arr[EXP1_IN_BITS / 32];
+ DECLARE_BITMAP(bmap2, EXP1_IN_BITS);
memset(arr, 0xa5, sizeof(arr));
- for (nbits = 0; nbits < EXP_BYTES; ++nbits) {
- bitmap_to_arr32(arr, exp, nbits);
+ for (nbits = 0; nbits < EXP1_IN_BITS; ++nbits) {
+ bitmap_to_arr32(arr, exp1, nbits);
bitmap_from_arr32(bmap2, arr, nbits);
- expect_eq_bitmap(bmap2, exp, nbits);
+ expect_eq_bitmap(bmap2, exp1, nbits);
next_bit = find_next_bit(bmap2,
round_up(nbits, BITS_PER_LONG), nbits);
- if (next_bit < round_up(nbits, BITS_PER_LONG))
+ if (next_bit < round_up(nbits, BITS_PER_LONG)) {
pr_err("bitmap_copy_arr32(nbits == %d:"
" tail is not safely cleared: %d\n",
nbits, next_bit);
+ failed_tests++;
+ }
- if (nbits < EXP_BYTES - 32)
+ if (nbits < EXP1_IN_BITS - 32)
expect_eq_uint(arr[DIV_ROUND_UP(nbits, 32)],
0xa5a5a5a5);
}
}
+static void __init test_bitmap_arr64(void)
+{
+ unsigned int nbits, next_bit;
+ u64 arr[EXP1_IN_BITS / 64];
+ DECLARE_BITMAP(bmap2, EXP1_IN_BITS);
+
+ memset(arr, 0xa5, sizeof(arr));
+
+ for (nbits = 0; nbits < EXP1_IN_BITS; ++nbits) {
+ memset(bmap2, 0xff, sizeof(arr));
+ bitmap_to_arr64(arr, exp1, nbits);
+ bitmap_from_arr64(bmap2, arr, nbits);
+ expect_eq_bitmap(bmap2, exp1, nbits);
+
+ next_bit = find_next_bit(bmap2, round_up(nbits, BITS_PER_LONG), nbits);
+ if (next_bit < round_up(nbits, BITS_PER_LONG)) {
+ pr_err("bitmap_copy_arr64(nbits == %d:"
+ " tail is not safely cleared: %d\n", nbits, next_bit);
+ failed_tests++;
+ }
+
+ if ((nbits % 64) &&
+ (arr[(nbits - 1) / 64] & ~GENMASK_ULL((nbits - 1) % 64, 0))) {
+ pr_err("bitmap_to_arr64(nbits == %d): tail is not safely cleared: 0x%016llx (must be 0x%016llx)\n",
+ nbits, arr[(nbits - 1) / 64],
+ GENMASK_ULL((nbits - 1) % 64, 0));
+ failed_tests++;
+ }
+
+ if (nbits < EXP1_IN_BITS - 64)
+ expect_eq_uint(arr[DIV_ROUND_UP(nbits, 64)], 0xa5a5a5a5);
+ }
+}
+
static void noinline __init test_mem_optimisations(void)
{
DECLARE_BITMAP(bmap1, 1024);
@@ -404,15 +728,551 @@ static void noinline __init test_mem_optimisations(void)
}
}
+static const unsigned char clump_exp[] __initconst = {
+ 0x01, /* 1 bit set */
+ 0x02, /* non-edge 1 bit set */
+ 0x00, /* zero bits set */
+ 0x38, /* 3 bits set across 4-bit boundary */
+ 0x38, /* Repeated clump */
+ 0x0F, /* 4 bits set */
+ 0xFF, /* all bits set */
+ 0x05, /* non-adjacent 2 bits set */
+};
+
+static void __init test_for_each_set_clump8(void)
+{
+#define CLUMP_EXP_NUMBITS 64
+ DECLARE_BITMAP(bits, CLUMP_EXP_NUMBITS);
+ unsigned int start;
+ unsigned long clump;
+
+ /* set bitmap to test case */
+ bitmap_zero(bits, CLUMP_EXP_NUMBITS);
+ bitmap_set(bits, 0, 1); /* 0x01 */
+ bitmap_set(bits, 9, 1); /* 0x02 */
+ bitmap_set(bits, 27, 3); /* 0x28 */
+ bitmap_set(bits, 35, 3); /* 0x28 */
+ bitmap_set(bits, 40, 4); /* 0x0F */
+ bitmap_set(bits, 48, 8); /* 0xFF */
+ bitmap_set(bits, 56, 1); /* 0x05 - part 1 */
+ bitmap_set(bits, 58, 1); /* 0x05 - part 2 */
+
+ for_each_set_clump8(start, clump, bits, CLUMP_EXP_NUMBITS)
+ expect_eq_clump8(start, CLUMP_EXP_NUMBITS, clump_exp, &clump);
+}
+
+static void __init test_for_each_set_bit_wrap(void)
+{
+ DECLARE_BITMAP(orig, 500);
+ DECLARE_BITMAP(copy, 500);
+ unsigned int wr, bit;
+
+ bitmap_zero(orig, 500);
+
+ /* Set individual bits */
+ for (bit = 0; bit < 500; bit += 10)
+ bitmap_set(orig, bit, 1);
+
+ /* Set range of bits */
+ bitmap_set(orig, 100, 50);
+
+ for (wr = 0; wr < 500; wr++) {
+ bitmap_zero(copy, 500);
+
+ for_each_set_bit_wrap(bit, orig, 500, wr)
+ bitmap_set(copy, bit, 1);
+
+ expect_eq_bitmap(orig, copy, 500);
+ }
+}
+
+static void __init test_for_each_set_bit(void)
+{
+ DECLARE_BITMAP(orig, 500);
+ DECLARE_BITMAP(copy, 500);
+ unsigned int bit;
+
+ bitmap_zero(orig, 500);
+ bitmap_zero(copy, 500);
+
+ /* Set individual bits */
+ for (bit = 0; bit < 500; bit += 10)
+ bitmap_set(orig, bit, 1);
+
+ /* Set range of bits */
+ bitmap_set(orig, 100, 50);
+
+ for_each_set_bit(bit, orig, 500)
+ bitmap_set(copy, bit, 1);
+
+ expect_eq_bitmap(orig, copy, 500);
+}
+
+static void __init test_for_each_set_bit_from(void)
+{
+ DECLARE_BITMAP(orig, 500);
+ DECLARE_BITMAP(copy, 500);
+ unsigned int wr, bit;
+
+ bitmap_zero(orig, 500);
+
+ /* Set individual bits */
+ for (bit = 0; bit < 500; bit += 10)
+ bitmap_set(orig, bit, 1);
+
+ /* Set range of bits */
+ bitmap_set(orig, 100, 50);
+
+ for (wr = 0; wr < 500; wr++) {
+ DECLARE_BITMAP(tmp, 500);
+
+ bitmap_zero(copy, 500);
+ bit = wr;
+
+ for_each_set_bit_from(bit, orig, 500)
+ bitmap_set(copy, bit, 1);
+
+ bitmap_copy(tmp, orig, 500);
+ bitmap_clear(tmp, 0, wr);
+ expect_eq_bitmap(tmp, copy, 500);
+ }
+}
+
+static void __init test_for_each_clear_bit(void)
+{
+ DECLARE_BITMAP(orig, 500);
+ DECLARE_BITMAP(copy, 500);
+ unsigned int bit;
+
+ bitmap_fill(orig, 500);
+ bitmap_fill(copy, 500);
+
+ /* Set individual bits */
+ for (bit = 0; bit < 500; bit += 10)
+ bitmap_clear(orig, bit, 1);
+
+ /* Set range of bits */
+ bitmap_clear(orig, 100, 50);
+
+ for_each_clear_bit(bit, orig, 500)
+ bitmap_clear(copy, bit, 1);
+
+ expect_eq_bitmap(orig, copy, 500);
+}
+
+static void __init test_for_each_clear_bit_from(void)
+{
+ DECLARE_BITMAP(orig, 500);
+ DECLARE_BITMAP(copy, 500);
+ unsigned int wr, bit;
+
+ bitmap_fill(orig, 500);
+
+ /* Set individual bits */
+ for (bit = 0; bit < 500; bit += 10)
+ bitmap_clear(orig, bit, 1);
+
+ /* Set range of bits */
+ bitmap_clear(orig, 100, 50);
+
+ for (wr = 0; wr < 500; wr++) {
+ DECLARE_BITMAP(tmp, 500);
+
+ bitmap_fill(copy, 500);
+ bit = wr;
+
+ for_each_clear_bit_from(bit, orig, 500)
+ bitmap_clear(copy, bit, 1);
+
+ bitmap_copy(tmp, orig, 500);
+ bitmap_set(tmp, 0, wr);
+ expect_eq_bitmap(tmp, copy, 500);
+ }
+}
+
+static void __init test_for_each_set_bitrange(void)
+{
+ DECLARE_BITMAP(orig, 500);
+ DECLARE_BITMAP(copy, 500);
+ unsigned int s, e;
+
+ bitmap_zero(orig, 500);
+ bitmap_zero(copy, 500);
+
+ /* Set individual bits */
+ for (s = 0; s < 500; s += 10)
+ bitmap_set(orig, s, 1);
+
+ /* Set range of bits */
+ bitmap_set(orig, 100, 50);
+
+ for_each_set_bitrange(s, e, orig, 500)
+ bitmap_set(copy, s, e-s);
+
+ expect_eq_bitmap(orig, copy, 500);
+}
+
+static void __init test_for_each_clear_bitrange(void)
+{
+ DECLARE_BITMAP(orig, 500);
+ DECLARE_BITMAP(copy, 500);
+ unsigned int s, e;
+
+ bitmap_fill(orig, 500);
+ bitmap_fill(copy, 500);
+
+ /* Set individual bits */
+ for (s = 0; s < 500; s += 10)
+ bitmap_clear(orig, s, 1);
+
+ /* Set range of bits */
+ bitmap_clear(orig, 100, 50);
+
+ for_each_clear_bitrange(s, e, orig, 500)
+ bitmap_clear(copy, s, e-s);
+
+ expect_eq_bitmap(orig, copy, 500);
+}
+
+static void __init test_for_each_set_bitrange_from(void)
+{
+ DECLARE_BITMAP(orig, 500);
+ DECLARE_BITMAP(copy, 500);
+ unsigned int wr, s, e;
+
+ bitmap_zero(orig, 500);
+
+ /* Set individual bits */
+ for (s = 0; s < 500; s += 10)
+ bitmap_set(orig, s, 1);
+
+ /* Set range of bits */
+ bitmap_set(orig, 100, 50);
+
+ for (wr = 0; wr < 500; wr++) {
+ DECLARE_BITMAP(tmp, 500);
+
+ bitmap_zero(copy, 500);
+ s = wr;
+
+ for_each_set_bitrange_from(s, e, orig, 500)
+ bitmap_set(copy, s, e - s);
+
+ bitmap_copy(tmp, orig, 500);
+ bitmap_clear(tmp, 0, wr);
+ expect_eq_bitmap(tmp, copy, 500);
+ }
+}
+
+static void __init test_for_each_clear_bitrange_from(void)
+{
+ DECLARE_BITMAP(orig, 500);
+ DECLARE_BITMAP(copy, 500);
+ unsigned int wr, s, e;
+
+ bitmap_fill(orig, 500);
+
+ /* Set individual bits */
+ for (s = 0; s < 500; s += 10)
+ bitmap_clear(orig, s, 1);
+
+ /* Set range of bits */
+ bitmap_set(orig, 100, 50);
+
+ for (wr = 0; wr < 500; wr++) {
+ DECLARE_BITMAP(tmp, 500);
+
+ bitmap_fill(copy, 500);
+ s = wr;
+
+ for_each_clear_bitrange_from(s, e, orig, 500)
+ bitmap_clear(copy, s, e - s);
+
+ bitmap_copy(tmp, orig, 500);
+ bitmap_set(tmp, 0, wr);
+ expect_eq_bitmap(tmp, copy, 500);
+ }
+}
+
+struct test_bitmap_cut {
+ unsigned int first;
+ unsigned int cut;
+ unsigned int nbits;
+ unsigned long in[4];
+ unsigned long expected[4];
+};
+
+static struct test_bitmap_cut test_cut[] = {
+ { 0, 0, 8, { 0x0000000aUL, }, { 0x0000000aUL, }, },
+ { 0, 0, 32, { 0xdadadeadUL, }, { 0xdadadeadUL, }, },
+ { 0, 3, 8, { 0x000000aaUL, }, { 0x00000015UL, }, },
+ { 3, 3, 8, { 0x000000aaUL, }, { 0x00000012UL, }, },
+ { 0, 1, 32, { 0xa5a5a5a5UL, }, { 0x52d2d2d2UL, }, },
+ { 0, 8, 32, { 0xdeadc0deUL, }, { 0x00deadc0UL, }, },
+ { 1, 1, 32, { 0x5a5a5a5aUL, }, { 0x2d2d2d2cUL, }, },
+ { 0, 15, 32, { 0xa5a5a5a5UL, }, { 0x00014b4bUL, }, },
+ { 0, 16, 32, { 0xa5a5a5a5UL, }, { 0x0000a5a5UL, }, },
+ { 15, 15, 32, { 0xa5a5a5a5UL, }, { 0x000125a5UL, }, },
+ { 15, 16, 32, { 0xa5a5a5a5UL, }, { 0x0000a5a5UL, }, },
+ { 16, 15, 32, { 0xa5a5a5a5UL, }, { 0x0001a5a5UL, }, },
+
+ { BITS_PER_LONG, BITS_PER_LONG, BITS_PER_LONG,
+ { 0xa5a5a5a5UL, 0xa5a5a5a5UL, },
+ { 0xa5a5a5a5UL, 0xa5a5a5a5UL, },
+ },
+ { 1, BITS_PER_LONG - 1, BITS_PER_LONG,
+ { 0xa5a5a5a5UL, 0xa5a5a5a5UL, },
+ { 0x00000001UL, 0x00000001UL, },
+ },
+
+ { 0, BITS_PER_LONG * 2, BITS_PER_LONG * 2 + 1,
+ { 0xa5a5a5a5UL, 0x00000001UL, 0x00000001UL, 0x00000001UL },
+ { 0x00000001UL, },
+ },
+ { 16, BITS_PER_LONG * 2 + 1, BITS_PER_LONG * 2 + 1 + 16,
+ { 0x0000ffffUL, 0x5a5a5a5aUL, 0x5a5a5a5aUL, 0x5a5a5a5aUL },
+ { 0x2d2dffffUL, },
+ },
+};
+
+static void __init test_bitmap_cut(void)
+{
+ unsigned long b[5], *in = &b[1], *out = &b[0]; /* Partial overlap */
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(test_cut); i++) {
+ struct test_bitmap_cut *t = &test_cut[i];
+
+ memcpy(in, t->in, sizeof(t->in));
+
+ bitmap_cut(out, in, t->first, t->cut, t->nbits);
+
+ expect_eq_bitmap(t->expected, out, t->nbits);
+ }
+}
+
+struct test_bitmap_print {
+ const unsigned long *bitmap;
+ unsigned long nbits;
+ const char *mask;
+ const char *list;
+};
+
+static const unsigned long small_bitmap[] __initconst = {
+ BITMAP_FROM_U64(0x3333333311111111ULL),
+};
+
+static const char small_mask[] __initconst = "33333333,11111111\n";
+static const char small_list[] __initconst = "0,4,8,12,16,20,24,28,32-33,36-37,40-41,44-45,48-49,52-53,56-57,60-61\n";
+
+static const unsigned long large_bitmap[] __initconst = {
+ BITMAP_FROM_U64(0x3333333311111111ULL), BITMAP_FROM_U64(0x3333333311111111ULL),
+ BITMAP_FROM_U64(0x3333333311111111ULL), BITMAP_FROM_U64(0x3333333311111111ULL),
+ BITMAP_FROM_U64(0x3333333311111111ULL), BITMAP_FROM_U64(0x3333333311111111ULL),
+ BITMAP_FROM_U64(0x3333333311111111ULL), BITMAP_FROM_U64(0x3333333311111111ULL),
+ BITMAP_FROM_U64(0x3333333311111111ULL), BITMAP_FROM_U64(0x3333333311111111ULL),
+ BITMAP_FROM_U64(0x3333333311111111ULL), BITMAP_FROM_U64(0x3333333311111111ULL),
+ BITMAP_FROM_U64(0x3333333311111111ULL), BITMAP_FROM_U64(0x3333333311111111ULL),
+ BITMAP_FROM_U64(0x3333333311111111ULL), BITMAP_FROM_U64(0x3333333311111111ULL),
+ BITMAP_FROM_U64(0x3333333311111111ULL), BITMAP_FROM_U64(0x3333333311111111ULL),
+ BITMAP_FROM_U64(0x3333333311111111ULL), BITMAP_FROM_U64(0x3333333311111111ULL),
+ BITMAP_FROM_U64(0x3333333311111111ULL), BITMAP_FROM_U64(0x3333333311111111ULL),
+ BITMAP_FROM_U64(0x3333333311111111ULL), BITMAP_FROM_U64(0x3333333311111111ULL),
+ BITMAP_FROM_U64(0x3333333311111111ULL), BITMAP_FROM_U64(0x3333333311111111ULL),
+ BITMAP_FROM_U64(0x3333333311111111ULL), BITMAP_FROM_U64(0x3333333311111111ULL),
+ BITMAP_FROM_U64(0x3333333311111111ULL), BITMAP_FROM_U64(0x3333333311111111ULL),
+ BITMAP_FROM_U64(0x3333333311111111ULL), BITMAP_FROM_U64(0x3333333311111111ULL),
+ BITMAP_FROM_U64(0x3333333311111111ULL), BITMAP_FROM_U64(0x3333333311111111ULL),
+ BITMAP_FROM_U64(0x3333333311111111ULL), BITMAP_FROM_U64(0x3333333311111111ULL),
+ BITMAP_FROM_U64(0x3333333311111111ULL), BITMAP_FROM_U64(0x3333333311111111ULL),
+ BITMAP_FROM_U64(0x3333333311111111ULL), BITMAP_FROM_U64(0x3333333311111111ULL),
+};
+
+static const char large_mask[] __initconst = "33333333,11111111,33333333,11111111,"
+ "33333333,11111111,33333333,11111111,"
+ "33333333,11111111,33333333,11111111,"
+ "33333333,11111111,33333333,11111111,"
+ "33333333,11111111,33333333,11111111,"
+ "33333333,11111111,33333333,11111111,"
+ "33333333,11111111,33333333,11111111,"
+ "33333333,11111111,33333333,11111111,"
+ "33333333,11111111,33333333,11111111,"
+ "33333333,11111111,33333333,11111111,"
+ "33333333,11111111,33333333,11111111,"
+ "33333333,11111111,33333333,11111111,"
+ "33333333,11111111,33333333,11111111,"
+ "33333333,11111111,33333333,11111111,"
+ "33333333,11111111,33333333,11111111,"
+ "33333333,11111111,33333333,11111111,"
+ "33333333,11111111,33333333,11111111,"
+ "33333333,11111111,33333333,11111111,"
+ "33333333,11111111,33333333,11111111,"
+ "33333333,11111111,33333333,11111111\n";
+
+static const char large_list[] __initconst = /* more than 4KB */
+ "0,4,8,12,16,20,24,28,32-33,36-37,40-41,44-45,48-49,52-53,56-57,60-61,64,68,72,76,80,84,88,92,96-97,100-101,104-1"
+ "05,108-109,112-113,116-117,120-121,124-125,128,132,136,140,144,148,152,156,160-161,164-165,168-169,172-173,176-1"
+ "77,180-181,184-185,188-189,192,196,200,204,208,212,216,220,224-225,228-229,232-233,236-237,240-241,244-245,248-2"
+ "49,252-253,256,260,264,268,272,276,280,284,288-289,292-293,296-297,300-301,304-305,308-309,312-313,316-317,320,3"
+ "24,328,332,336,340,344,348,352-353,356-357,360-361,364-365,368-369,372-373,376-377,380-381,384,388,392,396,400,4"
+ "04,408,412,416-417,420-421,424-425,428-429,432-433,436-437,440-441,444-445,448,452,456,460,464,468,472,476,480-4"
+ "81,484-485,488-489,492-493,496-497,500-501,504-505,508-509,512,516,520,524,528,532,536,540,544-545,548-549,552-5"
+ "53,556-557,560-561,564-565,568-569,572-573,576,580,584,588,592,596,600,604,608-609,612-613,616-617,620-621,624-6"
+ "25,628-629,632-633,636-637,640,644,648,652,656,660,664,668,672-673,676-677,680-681,684-685,688-689,692-693,696-6"
+ "97,700-701,704,708,712,716,720,724,728,732,736-737,740-741,744-745,748-749,752-753,756-757,760-761,764-765,768,7"
+ "72,776,780,784,788,792,796,800-801,804-805,808-809,812-813,816-817,820-821,824-825,828-829,832,836,840,844,848,8"
+ "52,856,860,864-865,868-869,872-873,876-877,880-881,884-885,888-889,892-893,896,900,904,908,912,916,920,924,928-9"
+ "29,932-933,936-937,940-941,944-945,948-949,952-953,956-957,960,964,968,972,976,980,984,988,992-993,996-997,1000-"
+ "1001,1004-1005,1008-1009,1012-1013,1016-1017,1020-1021,1024,1028,1032,1036,1040,1044,1048,1052,1056-1057,1060-10"
+ "61,1064-1065,1068-1069,1072-1073,1076-1077,1080-1081,1084-1085,1088,1092,1096,1100,1104,1108,1112,1116,1120-1121"
+ ",1124-1125,1128-1129,1132-1133,1136-1137,1140-1141,1144-1145,1148-1149,1152,1156,1160,1164,1168,1172,1176,1180,1"
+ "184-1185,1188-1189,1192-1193,1196-1197,1200-1201,1204-1205,1208-1209,1212-1213,1216,1220,1224,1228,1232,1236,124"
+ "0,1244,1248-1249,1252-1253,1256-1257,1260-1261,1264-1265,1268-1269,1272-1273,1276-1277,1280,1284,1288,1292,1296,"
+ "1300,1304,1308,1312-1313,1316-1317,1320-1321,1324-1325,1328-1329,1332-1333,1336-1337,1340-1341,1344,1348,1352,13"
+ "56,1360,1364,1368,1372,1376-1377,1380-1381,1384-1385,1388-1389,1392-1393,1396-1397,1400-1401,1404-1405,1408,1412"
+ ",1416,1420,1424,1428,1432,1436,1440-1441,1444-1445,1448-1449,1452-1453,1456-1457,1460-1461,1464-1465,1468-1469,1"
+ "472,1476,1480,1484,1488,1492,1496,1500,1504-1505,1508-1509,1512-1513,1516-1517,1520-1521,1524-1525,1528-1529,153"
+ "2-1533,1536,1540,1544,1548,1552,1556,1560,1564,1568-1569,1572-1573,1576-1577,1580-1581,1584-1585,1588-1589,1592-"
+ "1593,1596-1597,1600,1604,1608,1612,1616,1620,1624,1628,1632-1633,1636-1637,1640-1641,1644-1645,1648-1649,1652-16"
+ "53,1656-1657,1660-1661,1664,1668,1672,1676,1680,1684,1688,1692,1696-1697,1700-1701,1704-1705,1708-1709,1712-1713"
+ ",1716-1717,1720-1721,1724-1725,1728,1732,1736,1740,1744,1748,1752,1756,1760-1761,1764-1765,1768-1769,1772-1773,1"
+ "776-1777,1780-1781,1784-1785,1788-1789,1792,1796,1800,1804,1808,1812,1816,1820,1824-1825,1828-1829,1832-1833,183"
+ "6-1837,1840-1841,1844-1845,1848-1849,1852-1853,1856,1860,1864,1868,1872,1876,1880,1884,1888-1889,1892-1893,1896-"
+ "1897,1900-1901,1904-1905,1908-1909,1912-1913,1916-1917,1920,1924,1928,1932,1936,1940,1944,1948,1952-1953,1956-19"
+ "57,1960-1961,1964-1965,1968-1969,1972-1973,1976-1977,1980-1981,1984,1988,1992,1996,2000,2004,2008,2012,2016-2017"
+ ",2020-2021,2024-2025,2028-2029,2032-2033,2036-2037,2040-2041,2044-2045,2048,2052,2056,2060,2064,2068,2072,2076,2"
+ "080-2081,2084-2085,2088-2089,2092-2093,2096-2097,2100-2101,2104-2105,2108-2109,2112,2116,2120,2124,2128,2132,213"
+ "6,2140,2144-2145,2148-2149,2152-2153,2156-2157,2160-2161,2164-2165,2168-2169,2172-2173,2176,2180,2184,2188,2192,"
+ "2196,2200,2204,2208-2209,2212-2213,2216-2217,2220-2221,2224-2225,2228-2229,2232-2233,2236-2237,2240,2244,2248,22"
+ "52,2256,2260,2264,2268,2272-2273,2276-2277,2280-2281,2284-2285,2288-2289,2292-2293,2296-2297,2300-2301,2304,2308"
+ ",2312,2316,2320,2324,2328,2332,2336-2337,2340-2341,2344-2345,2348-2349,2352-2353,2356-2357,2360-2361,2364-2365,2"
+ "368,2372,2376,2380,2384,2388,2392,2396,2400-2401,2404-2405,2408-2409,2412-2413,2416-2417,2420-2421,2424-2425,242"
+ "8-2429,2432,2436,2440,2444,2448,2452,2456,2460,2464-2465,2468-2469,2472-2473,2476-2477,2480-2481,2484-2485,2488-"
+ "2489,2492-2493,2496,2500,2504,2508,2512,2516,2520,2524,2528-2529,2532-2533,2536-2537,2540-2541,2544-2545,2548-25"
+ "49,2552-2553,2556-2557\n";
+
+static const struct test_bitmap_print test_print[] __initconst = {
+ { small_bitmap, sizeof(small_bitmap) * BITS_PER_BYTE, small_mask, small_list },
+ { large_bitmap, sizeof(large_bitmap) * BITS_PER_BYTE, large_mask, large_list },
+};
+
+static void __init test_bitmap_print_buf(void)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(test_print); i++) {
+ const struct test_bitmap_print *t = &test_print[i];
+ int n;
+
+ n = bitmap_print_bitmask_to_buf(print_buf, t->bitmap, t->nbits,
+ 0, 2 * PAGE_SIZE);
+ expect_eq_uint(strlen(t->mask) + 1, n);
+ expect_eq_str(t->mask, print_buf, n);
+
+ n = bitmap_print_list_to_buf(print_buf, t->bitmap, t->nbits,
+ 0, 2 * PAGE_SIZE);
+ expect_eq_uint(strlen(t->list) + 1, n);
+ expect_eq_str(t->list, print_buf, n);
+
+ /* test by non-zero offset */
+ if (strlen(t->list) > PAGE_SIZE) {
+ n = bitmap_print_list_to_buf(print_buf, t->bitmap, t->nbits,
+ PAGE_SIZE, PAGE_SIZE);
+ expect_eq_uint(strlen(t->list) + 1 - PAGE_SIZE, n);
+ expect_eq_str(t->list + PAGE_SIZE, print_buf, n);
+ }
+ }
+}
+
+/*
+ * FIXME: Clang breaks compile-time evaluations when KASAN and GCOV are enabled.
+ * To workaround it, GCOV is force-disabled in Makefile for this configuration.
+ */
+static void __init test_bitmap_const_eval(void)
+{
+ DECLARE_BITMAP(bitmap, BITS_PER_LONG);
+ unsigned long initvar = BIT(2);
+ unsigned long bitopvar = 0;
+ unsigned long var = 0;
+ int res;
+
+ /*
+ * Compilers must be able to optimize all of those to compile-time
+ * constants on any supported optimization level (-O2, -Os) and any
+ * architecture. Otherwise, trigger a build bug.
+ * The whole function gets optimized out then, there's nothing to do
+ * in runtime.
+ */
+
+ /*
+ * Equals to `unsigned long bitmap[1] = { GENMASK(6, 5), }`.
+ * Clang on s390 optimizes bitops at compile-time as intended, but at
+ * the same time stops treating @bitmap and @bitopvar as compile-time
+ * constants after regular test_bit() is executed, thus triggering the
+ * build bugs below. So, call const_test_bit() there directly until
+ * the compiler is fixed.
+ */
+ bitmap_clear(bitmap, 0, BITS_PER_LONG);
+ if (!test_bit(7, bitmap))
+ bitmap_set(bitmap, 5, 2);
+
+ /* Equals to `unsigned long bitopvar = BIT(20)` */
+ __change_bit(31, &bitopvar);
+ bitmap_shift_right(&bitopvar, &bitopvar, 11, BITS_PER_LONG);
+
+ /* Equals to `unsigned long var = BIT(25)` */
+ var |= BIT(25);
+ if (var & BIT(0))
+ var ^= GENMASK(9, 6);
+
+ /* __const_hweight<32|64>(GENMASK(6, 5)) == 2 */
+ res = bitmap_weight(bitmap, 20);
+ BUILD_BUG_ON(!__builtin_constant_p(res));
+ BUILD_BUG_ON(res != 2);
+
+ /* !(BIT(31) & BIT(18)) == 1 */
+ res = !test_bit(18, &bitopvar);
+ BUILD_BUG_ON(!__builtin_constant_p(res));
+ BUILD_BUG_ON(!res);
+
+ /* BIT(2) & GENMASK(14, 8) == 0 */
+ res = initvar & GENMASK(14, 8);
+ BUILD_BUG_ON(!__builtin_constant_p(res));
+ BUILD_BUG_ON(res);
+
+ /* ~BIT(25) */
+ BUILD_BUG_ON(!__builtin_constant_p(~var));
+ BUILD_BUG_ON(~var != ~BIT(25));
+}
+
static void __init selftest(void)
{
test_zero_clear();
test_fill_set();
test_copy();
+ test_bitmap_region();
+ test_replace();
test_bitmap_arr32();
+ test_bitmap_arr64();
+ test_bitmap_parse();
test_bitmap_parselist();
- test_bitmap_parselist_user();
+ test_bitmap_printlist();
test_mem_optimisations();
+ test_bitmap_cut();
+ test_bitmap_print_buf();
+ test_bitmap_const_eval();
+
+ test_find_nth_bit();
+ test_for_each_set_bit();
+ test_for_each_set_bit_from();
+ test_for_each_clear_bit();
+ test_for_each_clear_bit_from();
+ test_for_each_set_bitrange();
+ test_for_each_clear_bitrange();
+ test_for_each_set_bitrange_from();
+ test_for_each_clear_bitrange_from();
+ test_for_each_set_clump8();
+ test_for_each_set_bit_wrap();
}
KSTM_MODULE_LOADERS(test_bitmap);
diff --git a/lib/test_bitops.c b/lib/test_bitops.c
new file mode 100644
index 000000000000..3b7bcbee84db
--- /dev/null
+++ b/lib/test_bitops.c
@@ -0,0 +1,111 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020 Intel Corporation
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/printk.h>
+
+/* a tiny module only meant to test
+ *
+ * set/clear_bit
+ * get_count_order/long
+ */
+
+/* use an enum because that's the most common BITMAP usage */
+enum bitops_fun {
+ BITOPS_4 = 4,
+ BITOPS_7 = 7,
+ BITOPS_11 = 11,
+ BITOPS_31 = 31,
+ BITOPS_88 = 88,
+ BITOPS_LAST = 255,
+ BITOPS_LENGTH = 256
+};
+
+static DECLARE_BITMAP(g_bitmap, BITOPS_LENGTH);
+
+static unsigned int order_comb[][2] = {
+ {0x00000003, 2},
+ {0x00000004, 2},
+ {0x00001fff, 13},
+ {0x00002000, 13},
+ {0x50000000, 31},
+ {0x80000000, 31},
+ {0x80003000, 32},
+};
+
+#ifdef CONFIG_64BIT
+static unsigned long order_comb_long[][2] = {
+ {0x0000000300000000, 34},
+ {0x0000000400000000, 34},
+ {0x00001fff00000000, 45},
+ {0x0000200000000000, 45},
+ {0x5000000000000000, 63},
+ {0x8000000000000000, 63},
+ {0x8000300000000000, 64},
+};
+#endif
+
+static int __init test_bitops_startup(void)
+{
+ int i, bit_set;
+
+ pr_info("Starting bitops test\n");
+ set_bit(BITOPS_4, g_bitmap);
+ set_bit(BITOPS_7, g_bitmap);
+ set_bit(BITOPS_11, g_bitmap);
+ set_bit(BITOPS_31, g_bitmap);
+ set_bit(BITOPS_88, g_bitmap);
+
+ for (i = 0; i < ARRAY_SIZE(order_comb); i++) {
+ if (order_comb[i][1] != get_count_order(order_comb[i][0]))
+ pr_warn("get_count_order wrong for %x\n",
+ order_comb[i][0]);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(order_comb); i++) {
+ if (order_comb[i][1] != get_count_order_long(order_comb[i][0]))
+ pr_warn("get_count_order_long wrong for %x\n",
+ order_comb[i][0]);
+ }
+
+#ifdef CONFIG_64BIT
+ for (i = 0; i < ARRAY_SIZE(order_comb_long); i++) {
+ if (order_comb_long[i][1] !=
+ get_count_order_long(order_comb_long[i][0]))
+ pr_warn("get_count_order_long wrong for %lx\n",
+ order_comb_long[i][0]);
+ }
+#endif
+
+ barrier();
+
+ clear_bit(BITOPS_4, g_bitmap);
+ clear_bit(BITOPS_7, g_bitmap);
+ clear_bit(BITOPS_11, g_bitmap);
+ clear_bit(BITOPS_31, g_bitmap);
+ clear_bit(BITOPS_88, g_bitmap);
+
+ bit_set = find_first_bit(g_bitmap, BITOPS_LAST);
+ if (bit_set != BITOPS_LAST)
+ pr_err("ERROR: FOUND SET BIT %d\n", bit_set);
+
+ pr_info("Completed bitops test\n");
+
+ return 0;
+}
+
+static void __exit test_bitops_unstartup(void)
+{
+}
+
+module_init(test_bitops_startup);
+module_exit(test_bitops_unstartup);
+
+MODULE_AUTHOR("Jesse Brandeburg <jesse.brandeburg@intel.com>, Wei Yang <richard.weiyang@gmail.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Bit testing module");
diff --git a/lib/test_bits.c b/lib/test_bits.c
new file mode 100644
index 000000000000..c9368a2314e7
--- /dev/null
+++ b/lib/test_bits.c
@@ -0,0 +1,75 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Test cases for functions and macros in bits.h
+ */
+
+#include <kunit/test.h>
+#include <linux/bits.h>
+
+
+static void genmask_test(struct kunit *test)
+{
+ KUNIT_EXPECT_EQ(test, 1ul, GENMASK(0, 0));
+ KUNIT_EXPECT_EQ(test, 3ul, GENMASK(1, 0));
+ KUNIT_EXPECT_EQ(test, 6ul, GENMASK(2, 1));
+ KUNIT_EXPECT_EQ(test, 0xFFFFFFFFul, GENMASK(31, 0));
+
+#ifdef TEST_GENMASK_FAILURES
+ /* these should fail compilation */
+ GENMASK(0, 1);
+ GENMASK(0, 10);
+ GENMASK(9, 10);
+#endif
+
+
+}
+
+static void genmask_ull_test(struct kunit *test)
+{
+ KUNIT_EXPECT_EQ(test, 1ull, GENMASK_ULL(0, 0));
+ KUNIT_EXPECT_EQ(test, 3ull, GENMASK_ULL(1, 0));
+ KUNIT_EXPECT_EQ(test, 0x000000ffffe00000ull, GENMASK_ULL(39, 21));
+ KUNIT_EXPECT_EQ(test, 0xffffffffffffffffull, GENMASK_ULL(63, 0));
+
+#ifdef TEST_GENMASK_FAILURES
+ /* these should fail compilation */
+ GENMASK_ULL(0, 1);
+ GENMASK_ULL(0, 10);
+ GENMASK_ULL(9, 10);
+#endif
+}
+
+static void genmask_input_check_test(struct kunit *test)
+{
+ unsigned int x, y;
+ int z, w;
+
+ /* Unknown input */
+ KUNIT_EXPECT_EQ(test, 0, GENMASK_INPUT_CHECK(x, 0));
+ KUNIT_EXPECT_EQ(test, 0, GENMASK_INPUT_CHECK(0, x));
+ KUNIT_EXPECT_EQ(test, 0, GENMASK_INPUT_CHECK(x, y));
+
+ KUNIT_EXPECT_EQ(test, 0, GENMASK_INPUT_CHECK(z, 0));
+ KUNIT_EXPECT_EQ(test, 0, GENMASK_INPUT_CHECK(0, z));
+ KUNIT_EXPECT_EQ(test, 0, GENMASK_INPUT_CHECK(z, w));
+
+ /* Valid input */
+ KUNIT_EXPECT_EQ(test, 0, GENMASK_INPUT_CHECK(1, 1));
+ KUNIT_EXPECT_EQ(test, 0, GENMASK_INPUT_CHECK(39, 21));
+}
+
+
+static struct kunit_case bits_test_cases[] = {
+ KUNIT_CASE(genmask_test),
+ KUNIT_CASE(genmask_ull_test),
+ KUNIT_CASE(genmask_input_check_test),
+ {}
+};
+
+static struct kunit_suite bits_test_suite = {
+ .name = "bits-test",
+ .test_cases = bits_test_cases,
+};
+kunit_test_suite(bits_test_suite);
+
+MODULE_LICENSE("GPL");
diff --git a/lib/test_blackhole_dev.c b/lib/test_blackhole_dev.c
new file mode 100644
index 000000000000..4c40580a99a3
--- /dev/null
+++ b/lib/test_blackhole_dev.c
@@ -0,0 +1,100 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * This module tests the blackhole_dev that is created during the
+ * net subsystem initialization. The test this module performs is
+ * by injecting an skb into the stack with skb->dev as the
+ * blackhole_dev and expects kernel to behave in a sane manner
+ * (in other words, *not crash*)!
+ *
+ * Copyright (c) 2018, Mahesh Bandewar <maheshb@google.com>
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/printk.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/udp.h>
+#include <linux/ipv6.h>
+
+#include <net/dst.h>
+
+#define SKB_SIZE 256
+#define HEAD_SIZE (14+40+8) /* Ether + IPv6 + UDP */
+#define TAIL_SIZE 32 /* random tail-room */
+
+#define UDP_PORT 1234
+
+static int __init test_blackholedev_init(void)
+{
+ struct ipv6hdr *ip6h;
+ struct sk_buff *skb;
+ struct ethhdr *ethh;
+ struct udphdr *uh;
+ int data_len;
+ int ret;
+
+ skb = alloc_skb(SKB_SIZE, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
+ /* Reserve head-room for the headers */
+ skb_reserve(skb, HEAD_SIZE);
+
+ /* Add data to the skb */
+ data_len = SKB_SIZE - (HEAD_SIZE + TAIL_SIZE);
+ memset(__skb_put(skb, data_len), 0xf, data_len);
+
+ /* Add protocol data */
+ /* (Transport) UDP */
+ uh = (struct udphdr *)skb_push(skb, sizeof(struct udphdr));
+ skb_set_transport_header(skb, 0);
+ uh->source = uh->dest = htons(UDP_PORT);
+ uh->len = htons(data_len);
+ uh->check = 0;
+ /* (Network) IPv6 */
+ ip6h = (struct ipv6hdr *)skb_push(skb, sizeof(struct ipv6hdr));
+ skb_set_network_header(skb, 0);
+ ip6h->hop_limit = 32;
+ ip6h->payload_len = data_len + sizeof(struct udphdr);
+ ip6h->nexthdr = IPPROTO_UDP;
+ ip6h->saddr = in6addr_loopback;
+ ip6h->daddr = in6addr_loopback;
+ /* Ether */
+ ethh = (struct ethhdr *)skb_push(skb, sizeof(struct ethhdr));
+ skb_set_mac_header(skb, 0);
+
+ skb->protocol = htons(ETH_P_IPV6);
+ skb->pkt_type = PACKET_HOST;
+ skb->dev = blackhole_netdev;
+
+ /* Now attempt to send the packet */
+ ret = dev_queue_xmit(skb);
+
+ switch (ret) {
+ case NET_XMIT_SUCCESS:
+ pr_warn("dev_queue_xmit() returned NET_XMIT_SUCCESS\n");
+ break;
+ case NET_XMIT_DROP:
+ pr_warn("dev_queue_xmit() returned NET_XMIT_DROP\n");
+ break;
+ case NET_XMIT_CN:
+ pr_warn("dev_queue_xmit() returned NET_XMIT_CN\n");
+ break;
+ default:
+ pr_err("dev_queue_xmit() returned UNKNOWN(%d)\n", ret);
+ }
+
+ return 0;
+}
+
+static void __exit test_blackholedev_exit(void)
+{
+ pr_warn("test_blackholedev module terminating.\n");
+}
+
+module_init(test_blackholedev_init);
+module_exit(test_blackholedev_exit);
+
+MODULE_AUTHOR("Mahesh Bandewar <maheshb@google.com>");
+MODULE_LICENSE("GPL");
diff --git a/lib/test_bpf.c b/lib/test_bpf.c
index c41705835cba..569e6d2dc55c 100644
--- a/lib/test_bpf.c
+++ b/lib/test_bpf.c
@@ -52,6 +52,8 @@
#define FLAG_NO_DATA BIT(0)
#define FLAG_EXPECTED_FAIL BIT(1)
#define FLAG_SKB_FRAG BIT(2)
+#define FLAG_VERIFIER_ZEXT BIT(3)
+#define FLAG_LARGE_MEM BIT(4)
enum {
CLASSIC = BIT(6), /* Old BPF instructions only. */
@@ -80,6 +82,7 @@ struct bpf_test {
int expected_errcode; /* used when FLAG_EXPECTED_FAIL is set in the aux */
__u8 frag_data[MAX_DATA];
int stack_depth; /* for eBPF only, since tests don't call verifier */
+ int nr_testruns; /* Custom run count, defaults to MAX_TESTRUNS if 0 */
};
/* Large test cases need separate allocation and fill handler. */
@@ -345,7 +348,7 @@ static int __bpf_fill_ja(struct bpf_test *self, unsigned int len,
static int bpf_fill_maxinsns11(struct bpf_test *self)
{
- /* Hits 70 passes on x86_64, so cannot get JITed there. */
+ /* Hits 70 passes on x86_64 and triggers NOPs padding. */
return __bpf_fill_ja(self, BPF_MAXINSNS, 68);
}
@@ -461,6 +464,2602 @@ static int bpf_fill_stxdw(struct bpf_test *self)
return __bpf_fill_stxdw(self, BPF_DW);
}
+static int __bpf_ld_imm64(struct bpf_insn insns[2], u8 reg, s64 imm64)
+{
+ struct bpf_insn tmp[] = {BPF_LD_IMM64(reg, imm64)};
+
+ memcpy(insns, tmp, sizeof(tmp));
+ return 2;
+}
+
+/*
+ * Branch conversion tests. Complex operations can expand to a lot
+ * of instructions when JITed. This in turn may cause jump offsets
+ * to overflow the field size of the native instruction, triggering
+ * a branch conversion mechanism in some JITs.
+ */
+static int __bpf_fill_max_jmp(struct bpf_test *self, int jmp, int imm)
+{
+ struct bpf_insn *insns;
+ int len = S16_MAX + 5;
+ int i;
+
+ insns = kmalloc_array(len, sizeof(*insns), GFP_KERNEL);
+ if (!insns)
+ return -ENOMEM;
+
+ i = __bpf_ld_imm64(insns, R1, 0x0123456789abcdefULL);
+ insns[i++] = BPF_ALU64_IMM(BPF_MOV, R0, 1);
+ insns[i++] = BPF_JMP_IMM(jmp, R0, imm, S16_MAX);
+ insns[i++] = BPF_ALU64_IMM(BPF_MOV, R0, 2);
+ insns[i++] = BPF_EXIT_INSN();
+
+ while (i < len - 1) {
+ static const int ops[] = {
+ BPF_LSH, BPF_RSH, BPF_ARSH, BPF_ADD,
+ BPF_SUB, BPF_MUL, BPF_DIV, BPF_MOD,
+ };
+ int op = ops[(i >> 1) % ARRAY_SIZE(ops)];
+
+ if (i & 1)
+ insns[i++] = BPF_ALU32_REG(op, R0, R1);
+ else
+ insns[i++] = BPF_ALU64_REG(op, R0, R1);
+ }
+
+ insns[i++] = BPF_EXIT_INSN();
+ self->u.ptr.insns = insns;
+ self->u.ptr.len = len;
+ BUG_ON(i != len);
+
+ return 0;
+}
+
+/* Branch taken by runtime decision */
+static int bpf_fill_max_jmp_taken(struct bpf_test *self)
+{
+ return __bpf_fill_max_jmp(self, BPF_JEQ, 1);
+}
+
+/* Branch not taken by runtime decision */
+static int bpf_fill_max_jmp_not_taken(struct bpf_test *self)
+{
+ return __bpf_fill_max_jmp(self, BPF_JEQ, 0);
+}
+
+/* Branch always taken, known at JIT time */
+static int bpf_fill_max_jmp_always_taken(struct bpf_test *self)
+{
+ return __bpf_fill_max_jmp(self, BPF_JGE, 0);
+}
+
+/* Branch never taken, known at JIT time */
+static int bpf_fill_max_jmp_never_taken(struct bpf_test *self)
+{
+ return __bpf_fill_max_jmp(self, BPF_JLT, 0);
+}
+
+/* ALU result computation used in tests */
+static bool __bpf_alu_result(u64 *res, u64 v1, u64 v2, u8 op)
+{
+ *res = 0;
+ switch (op) {
+ case BPF_MOV:
+ *res = v2;
+ break;
+ case BPF_AND:
+ *res = v1 & v2;
+ break;
+ case BPF_OR:
+ *res = v1 | v2;
+ break;
+ case BPF_XOR:
+ *res = v1 ^ v2;
+ break;
+ case BPF_LSH:
+ *res = v1 << v2;
+ break;
+ case BPF_RSH:
+ *res = v1 >> v2;
+ break;
+ case BPF_ARSH:
+ *res = v1 >> v2;
+ if (v2 > 0 && v1 > S64_MAX)
+ *res |= ~0ULL << (64 - v2);
+ break;
+ case BPF_ADD:
+ *res = v1 + v2;
+ break;
+ case BPF_SUB:
+ *res = v1 - v2;
+ break;
+ case BPF_MUL:
+ *res = v1 * v2;
+ break;
+ case BPF_DIV:
+ if (v2 == 0)
+ return false;
+ *res = div64_u64(v1, v2);
+ break;
+ case BPF_MOD:
+ if (v2 == 0)
+ return false;
+ div64_u64_rem(v1, v2, res);
+ break;
+ }
+ return true;
+}
+
+/* Test an ALU shift operation for all valid shift values */
+static int __bpf_fill_alu_shift(struct bpf_test *self, u8 op,
+ u8 mode, bool alu32)
+{
+ static const s64 regs[] = {
+ 0x0123456789abcdefLL, /* dword > 0, word < 0 */
+ 0xfedcba9876543210LL, /* dword < 0, word > 0 */
+ 0xfedcba0198765432LL, /* dword < 0, word < 0 */
+ 0x0123458967abcdefLL, /* dword > 0, word > 0 */
+ };
+ int bits = alu32 ? 32 : 64;
+ int len = (2 + 7 * bits) * ARRAY_SIZE(regs) + 3;
+ struct bpf_insn *insn;
+ int imm, k;
+ int i = 0;
+
+ insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
+ if (!insn)
+ return -ENOMEM;
+
+ insn[i++] = BPF_ALU64_IMM(BPF_MOV, R0, 0);
+
+ for (k = 0; k < ARRAY_SIZE(regs); k++) {
+ s64 reg = regs[k];
+
+ i += __bpf_ld_imm64(&insn[i], R3, reg);
+
+ for (imm = 0; imm < bits; imm++) {
+ u64 val;
+
+ /* Perform operation */
+ insn[i++] = BPF_ALU64_REG(BPF_MOV, R1, R3);
+ insn[i++] = BPF_ALU64_IMM(BPF_MOV, R2, imm);
+ if (alu32) {
+ if (mode == BPF_K)
+ insn[i++] = BPF_ALU32_IMM(op, R1, imm);
+ else
+ insn[i++] = BPF_ALU32_REG(op, R1, R2);
+
+ if (op == BPF_ARSH)
+ reg = (s32)reg;
+ else
+ reg = (u32)reg;
+ __bpf_alu_result(&val, reg, imm, op);
+ val = (u32)val;
+ } else {
+ if (mode == BPF_K)
+ insn[i++] = BPF_ALU64_IMM(op, R1, imm);
+ else
+ insn[i++] = BPF_ALU64_REG(op, R1, R2);
+ __bpf_alu_result(&val, reg, imm, op);
+ }
+
+ /*
+ * When debugging a JIT that fails this test, one
+ * can write the immediate value to R0 here to find
+ * out which operand values that fail.
+ */
+
+ /* Load reference and check the result */
+ i += __bpf_ld_imm64(&insn[i], R4, val);
+ insn[i++] = BPF_JMP_REG(BPF_JEQ, R1, R4, 1);
+ insn[i++] = BPF_EXIT_INSN();
+ }
+ }
+
+ insn[i++] = BPF_ALU64_IMM(BPF_MOV, R0, 1);
+ insn[i++] = BPF_EXIT_INSN();
+
+ self->u.ptr.insns = insn;
+ self->u.ptr.len = len;
+ BUG_ON(i != len);
+
+ return 0;
+}
+
+static int bpf_fill_alu64_lsh_imm(struct bpf_test *self)
+{
+ return __bpf_fill_alu_shift(self, BPF_LSH, BPF_K, false);
+}
+
+static int bpf_fill_alu64_rsh_imm(struct bpf_test *self)
+{
+ return __bpf_fill_alu_shift(self, BPF_RSH, BPF_K, false);
+}
+
+static int bpf_fill_alu64_arsh_imm(struct bpf_test *self)
+{
+ return __bpf_fill_alu_shift(self, BPF_ARSH, BPF_K, false);
+}
+
+static int bpf_fill_alu64_lsh_reg(struct bpf_test *self)
+{
+ return __bpf_fill_alu_shift(self, BPF_LSH, BPF_X, false);
+}
+
+static int bpf_fill_alu64_rsh_reg(struct bpf_test *self)
+{
+ return __bpf_fill_alu_shift(self, BPF_RSH, BPF_X, false);
+}
+
+static int bpf_fill_alu64_arsh_reg(struct bpf_test *self)
+{
+ return __bpf_fill_alu_shift(self, BPF_ARSH, BPF_X, false);
+}
+
+static int bpf_fill_alu32_lsh_imm(struct bpf_test *self)
+{
+ return __bpf_fill_alu_shift(self, BPF_LSH, BPF_K, true);
+}
+
+static int bpf_fill_alu32_rsh_imm(struct bpf_test *self)
+{
+ return __bpf_fill_alu_shift(self, BPF_RSH, BPF_K, true);
+}
+
+static int bpf_fill_alu32_arsh_imm(struct bpf_test *self)
+{
+ return __bpf_fill_alu_shift(self, BPF_ARSH, BPF_K, true);
+}
+
+static int bpf_fill_alu32_lsh_reg(struct bpf_test *self)
+{
+ return __bpf_fill_alu_shift(self, BPF_LSH, BPF_X, true);
+}
+
+static int bpf_fill_alu32_rsh_reg(struct bpf_test *self)
+{
+ return __bpf_fill_alu_shift(self, BPF_RSH, BPF_X, true);
+}
+
+static int bpf_fill_alu32_arsh_reg(struct bpf_test *self)
+{
+ return __bpf_fill_alu_shift(self, BPF_ARSH, BPF_X, true);
+}
+
+/*
+ * Test an ALU register shift operation for all valid shift values
+ * for the case when the source and destination are the same.
+ */
+static int __bpf_fill_alu_shift_same_reg(struct bpf_test *self, u8 op,
+ bool alu32)
+{
+ int bits = alu32 ? 32 : 64;
+ int len = 3 + 6 * bits;
+ struct bpf_insn *insn;
+ int i = 0;
+ u64 val;
+
+ insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
+ if (!insn)
+ return -ENOMEM;
+
+ insn[i++] = BPF_ALU64_IMM(BPF_MOV, R0, 0);
+
+ for (val = 0; val < bits; val++) {
+ u64 res;
+
+ /* Perform operation */
+ insn[i++] = BPF_ALU64_IMM(BPF_MOV, R1, val);
+ if (alu32)
+ insn[i++] = BPF_ALU32_REG(op, R1, R1);
+ else
+ insn[i++] = BPF_ALU64_REG(op, R1, R1);
+
+ /* Compute the reference result */
+ __bpf_alu_result(&res, val, val, op);
+ if (alu32)
+ res = (u32)res;
+ i += __bpf_ld_imm64(&insn[i], R2, res);
+
+ /* Check the actual result */
+ insn[i++] = BPF_JMP_REG(BPF_JEQ, R1, R2, 1);
+ insn[i++] = BPF_EXIT_INSN();
+ }
+
+ insn[i++] = BPF_ALU64_IMM(BPF_MOV, R0, 1);
+ insn[i++] = BPF_EXIT_INSN();
+
+ self->u.ptr.insns = insn;
+ self->u.ptr.len = len;
+ BUG_ON(i != len);
+
+ return 0;
+}
+
+static int bpf_fill_alu64_lsh_same_reg(struct bpf_test *self)
+{
+ return __bpf_fill_alu_shift_same_reg(self, BPF_LSH, false);
+}
+
+static int bpf_fill_alu64_rsh_same_reg(struct bpf_test *self)
+{
+ return __bpf_fill_alu_shift_same_reg(self, BPF_RSH, false);
+}
+
+static int bpf_fill_alu64_arsh_same_reg(struct bpf_test *self)
+{
+ return __bpf_fill_alu_shift_same_reg(self, BPF_ARSH, false);
+}
+
+static int bpf_fill_alu32_lsh_same_reg(struct bpf_test *self)
+{
+ return __bpf_fill_alu_shift_same_reg(self, BPF_LSH, true);
+}
+
+static int bpf_fill_alu32_rsh_same_reg(struct bpf_test *self)
+{
+ return __bpf_fill_alu_shift_same_reg(self, BPF_RSH, true);
+}
+
+static int bpf_fill_alu32_arsh_same_reg(struct bpf_test *self)
+{
+ return __bpf_fill_alu_shift_same_reg(self, BPF_ARSH, true);
+}
+
+/*
+ * Common operand pattern generator for exhaustive power-of-two magnitudes
+ * tests. The block size parameters can be adjusted to increase/reduce the
+ * number of combinatons tested and thereby execution speed and memory
+ * footprint.
+ */
+
+static inline s64 value(int msb, int delta, int sign)
+{
+ return sign * (1LL << msb) + delta;
+}
+
+static int __bpf_fill_pattern(struct bpf_test *self, void *arg,
+ int dbits, int sbits, int block1, int block2,
+ int (*emit)(struct bpf_test*, void*,
+ struct bpf_insn*, s64, s64))
+{
+ static const int sgn[][2] = {{1, 1}, {1, -1}, {-1, 1}, {-1, -1}};
+ struct bpf_insn *insns;
+ int di, si, bt, db, sb;
+ int count, len, k;
+ int extra = 1 + 2;
+ int i = 0;
+
+ /* Total number of iterations for the two pattern */
+ count = (dbits - 1) * (sbits - 1) * block1 * block1 * ARRAY_SIZE(sgn);
+ count += (max(dbits, sbits) - 1) * block2 * block2 * ARRAY_SIZE(sgn);
+
+ /* Compute the maximum number of insns and allocate the buffer */
+ len = extra + count * (*emit)(self, arg, NULL, 0, 0);
+ insns = kmalloc_array(len, sizeof(*insns), GFP_KERNEL);
+ if (!insns)
+ return -ENOMEM;
+
+ /* Add head instruction(s) */
+ insns[i++] = BPF_ALU64_IMM(BPF_MOV, R0, 0);
+
+ /*
+ * Pattern 1: all combinations of power-of-two magnitudes and sign,
+ * and with a block of contiguous values around each magnitude.
+ */
+ for (di = 0; di < dbits - 1; di++) /* Dst magnitudes */
+ for (si = 0; si < sbits - 1; si++) /* Src magnitudes */
+ for (k = 0; k < ARRAY_SIZE(sgn); k++) /* Sign combos */
+ for (db = -(block1 / 2);
+ db < (block1 + 1) / 2; db++)
+ for (sb = -(block1 / 2);
+ sb < (block1 + 1) / 2; sb++) {
+ s64 dst, src;
+
+ dst = value(di, db, sgn[k][0]);
+ src = value(si, sb, sgn[k][1]);
+ i += (*emit)(self, arg,
+ &insns[i],
+ dst, src);
+ }
+ /*
+ * Pattern 2: all combinations for a larger block of values
+ * for each power-of-two magnitude and sign, where the magnitude is
+ * the same for both operands.
+ */
+ for (bt = 0; bt < max(dbits, sbits) - 1; bt++) /* Magnitude */
+ for (k = 0; k < ARRAY_SIZE(sgn); k++) /* Sign combos */
+ for (db = -(block2 / 2); db < (block2 + 1) / 2; db++)
+ for (sb = -(block2 / 2);
+ sb < (block2 + 1) / 2; sb++) {
+ s64 dst, src;
+
+ dst = value(bt % dbits, db, sgn[k][0]);
+ src = value(bt % sbits, sb, sgn[k][1]);
+ i += (*emit)(self, arg, &insns[i],
+ dst, src);
+ }
+
+ /* Append tail instructions */
+ insns[i++] = BPF_ALU64_IMM(BPF_MOV, R0, 1);
+ insns[i++] = BPF_EXIT_INSN();
+ BUG_ON(i > len);
+
+ self->u.ptr.insns = insns;
+ self->u.ptr.len = i;
+
+ return 0;
+}
+
+/*
+ * Block size parameters used in pattern tests below. une as needed to
+ * increase/reduce the number combinations tested, see following examples.
+ * block values per operand MSB
+ * ----------------------------------------
+ * 0 none
+ * 1 (1 << MSB)
+ * 2 (1 << MSB) + [-1, 0]
+ * 3 (1 << MSB) + [-1, 0, 1]
+ */
+#define PATTERN_BLOCK1 1
+#define PATTERN_BLOCK2 5
+
+/* Number of test runs for a pattern test */
+#define NR_PATTERN_RUNS 1
+
+/*
+ * Exhaustive tests of ALU operations for all combinations of power-of-two
+ * magnitudes of the operands, both for positive and negative values. The
+ * test is designed to verify e.g. the ALU and ALU64 operations for JITs that
+ * emit different code depending on the magnitude of the immediate value.
+ */
+static int __bpf_emit_alu64_imm(struct bpf_test *self, void *arg,
+ struct bpf_insn *insns, s64 dst, s64 imm)
+{
+ int op = *(int *)arg;
+ int i = 0;
+ u64 res;
+
+ if (!insns)
+ return 7;
+
+ if (__bpf_alu_result(&res, dst, (s32)imm, op)) {
+ i += __bpf_ld_imm64(&insns[i], R1, dst);
+ i += __bpf_ld_imm64(&insns[i], R3, res);
+ insns[i++] = BPF_ALU64_IMM(op, R1, imm);
+ insns[i++] = BPF_JMP_REG(BPF_JEQ, R1, R3, 1);
+ insns[i++] = BPF_EXIT_INSN();
+ }
+
+ return i;
+}
+
+static int __bpf_emit_alu32_imm(struct bpf_test *self, void *arg,
+ struct bpf_insn *insns, s64 dst, s64 imm)
+{
+ int op = *(int *)arg;
+ int i = 0;
+ u64 res;
+
+ if (!insns)
+ return 7;
+
+ if (__bpf_alu_result(&res, (u32)dst, (u32)imm, op)) {
+ i += __bpf_ld_imm64(&insns[i], R1, dst);
+ i += __bpf_ld_imm64(&insns[i], R3, (u32)res);
+ insns[i++] = BPF_ALU32_IMM(op, R1, imm);
+ insns[i++] = BPF_JMP_REG(BPF_JEQ, R1, R3, 1);
+ insns[i++] = BPF_EXIT_INSN();
+ }
+
+ return i;
+}
+
+static int __bpf_emit_alu64_reg(struct bpf_test *self, void *arg,
+ struct bpf_insn *insns, s64 dst, s64 src)
+{
+ int op = *(int *)arg;
+ int i = 0;
+ u64 res;
+
+ if (!insns)
+ return 9;
+
+ if (__bpf_alu_result(&res, dst, src, op)) {
+ i += __bpf_ld_imm64(&insns[i], R1, dst);
+ i += __bpf_ld_imm64(&insns[i], R2, src);
+ i += __bpf_ld_imm64(&insns[i], R3, res);
+ insns[i++] = BPF_ALU64_REG(op, R1, R2);
+ insns[i++] = BPF_JMP_REG(BPF_JEQ, R1, R3, 1);
+ insns[i++] = BPF_EXIT_INSN();
+ }
+
+ return i;
+}
+
+static int __bpf_emit_alu32_reg(struct bpf_test *self, void *arg,
+ struct bpf_insn *insns, s64 dst, s64 src)
+{
+ int op = *(int *)arg;
+ int i = 0;
+ u64 res;
+
+ if (!insns)
+ return 9;
+
+ if (__bpf_alu_result(&res, (u32)dst, (u32)src, op)) {
+ i += __bpf_ld_imm64(&insns[i], R1, dst);
+ i += __bpf_ld_imm64(&insns[i], R2, src);
+ i += __bpf_ld_imm64(&insns[i], R3, (u32)res);
+ insns[i++] = BPF_ALU32_REG(op, R1, R2);
+ insns[i++] = BPF_JMP_REG(BPF_JEQ, R1, R3, 1);
+ insns[i++] = BPF_EXIT_INSN();
+ }
+
+ return i;
+}
+
+static int __bpf_fill_alu64_imm(struct bpf_test *self, int op)
+{
+ return __bpf_fill_pattern(self, &op, 64, 32,
+ PATTERN_BLOCK1, PATTERN_BLOCK2,
+ &__bpf_emit_alu64_imm);
+}
+
+static int __bpf_fill_alu32_imm(struct bpf_test *self, int op)
+{
+ return __bpf_fill_pattern(self, &op, 64, 32,
+ PATTERN_BLOCK1, PATTERN_BLOCK2,
+ &__bpf_emit_alu32_imm);
+}
+
+static int __bpf_fill_alu64_reg(struct bpf_test *self, int op)
+{
+ return __bpf_fill_pattern(self, &op, 64, 64,
+ PATTERN_BLOCK1, PATTERN_BLOCK2,
+ &__bpf_emit_alu64_reg);
+}
+
+static int __bpf_fill_alu32_reg(struct bpf_test *self, int op)
+{
+ return __bpf_fill_pattern(self, &op, 64, 64,
+ PATTERN_BLOCK1, PATTERN_BLOCK2,
+ &__bpf_emit_alu32_reg);
+}
+
+/* ALU64 immediate operations */
+static int bpf_fill_alu64_mov_imm(struct bpf_test *self)
+{
+ return __bpf_fill_alu64_imm(self, BPF_MOV);
+}
+
+static int bpf_fill_alu64_and_imm(struct bpf_test *self)
+{
+ return __bpf_fill_alu64_imm(self, BPF_AND);
+}
+
+static int bpf_fill_alu64_or_imm(struct bpf_test *self)
+{
+ return __bpf_fill_alu64_imm(self, BPF_OR);
+}
+
+static int bpf_fill_alu64_xor_imm(struct bpf_test *self)
+{
+ return __bpf_fill_alu64_imm(self, BPF_XOR);
+}
+
+static int bpf_fill_alu64_add_imm(struct bpf_test *self)
+{
+ return __bpf_fill_alu64_imm(self, BPF_ADD);
+}
+
+static int bpf_fill_alu64_sub_imm(struct bpf_test *self)
+{
+ return __bpf_fill_alu64_imm(self, BPF_SUB);
+}
+
+static int bpf_fill_alu64_mul_imm(struct bpf_test *self)
+{
+ return __bpf_fill_alu64_imm(self, BPF_MUL);
+}
+
+static int bpf_fill_alu64_div_imm(struct bpf_test *self)
+{
+ return __bpf_fill_alu64_imm(self, BPF_DIV);
+}
+
+static int bpf_fill_alu64_mod_imm(struct bpf_test *self)
+{
+ return __bpf_fill_alu64_imm(self, BPF_MOD);
+}
+
+/* ALU32 immediate operations */
+static int bpf_fill_alu32_mov_imm(struct bpf_test *self)
+{
+ return __bpf_fill_alu32_imm(self, BPF_MOV);
+}
+
+static int bpf_fill_alu32_and_imm(struct bpf_test *self)
+{
+ return __bpf_fill_alu32_imm(self, BPF_AND);
+}
+
+static int bpf_fill_alu32_or_imm(struct bpf_test *self)
+{
+ return __bpf_fill_alu32_imm(self, BPF_OR);
+}
+
+static int bpf_fill_alu32_xor_imm(struct bpf_test *self)
+{
+ return __bpf_fill_alu32_imm(self, BPF_XOR);
+}
+
+static int bpf_fill_alu32_add_imm(struct bpf_test *self)
+{
+ return __bpf_fill_alu32_imm(self, BPF_ADD);
+}
+
+static int bpf_fill_alu32_sub_imm(struct bpf_test *self)
+{
+ return __bpf_fill_alu32_imm(self, BPF_SUB);
+}
+
+static int bpf_fill_alu32_mul_imm(struct bpf_test *self)
+{
+ return __bpf_fill_alu32_imm(self, BPF_MUL);
+}
+
+static int bpf_fill_alu32_div_imm(struct bpf_test *self)
+{
+ return __bpf_fill_alu32_imm(self, BPF_DIV);
+}
+
+static int bpf_fill_alu32_mod_imm(struct bpf_test *self)
+{
+ return __bpf_fill_alu32_imm(self, BPF_MOD);
+}
+
+/* ALU64 register operations */
+static int bpf_fill_alu64_mov_reg(struct bpf_test *self)
+{
+ return __bpf_fill_alu64_reg(self, BPF_MOV);
+}
+
+static int bpf_fill_alu64_and_reg(struct bpf_test *self)
+{
+ return __bpf_fill_alu64_reg(self, BPF_AND);
+}
+
+static int bpf_fill_alu64_or_reg(struct bpf_test *self)
+{
+ return __bpf_fill_alu64_reg(self, BPF_OR);
+}
+
+static int bpf_fill_alu64_xor_reg(struct bpf_test *self)
+{
+ return __bpf_fill_alu64_reg(self, BPF_XOR);
+}
+
+static int bpf_fill_alu64_add_reg(struct bpf_test *self)
+{
+ return __bpf_fill_alu64_reg(self, BPF_ADD);
+}
+
+static int bpf_fill_alu64_sub_reg(struct bpf_test *self)
+{
+ return __bpf_fill_alu64_reg(self, BPF_SUB);
+}
+
+static int bpf_fill_alu64_mul_reg(struct bpf_test *self)
+{
+ return __bpf_fill_alu64_reg(self, BPF_MUL);
+}
+
+static int bpf_fill_alu64_div_reg(struct bpf_test *self)
+{
+ return __bpf_fill_alu64_reg(self, BPF_DIV);
+}
+
+static int bpf_fill_alu64_mod_reg(struct bpf_test *self)
+{
+ return __bpf_fill_alu64_reg(self, BPF_MOD);
+}
+
+/* ALU32 register operations */
+static int bpf_fill_alu32_mov_reg(struct bpf_test *self)
+{
+ return __bpf_fill_alu32_reg(self, BPF_MOV);
+}
+
+static int bpf_fill_alu32_and_reg(struct bpf_test *self)
+{
+ return __bpf_fill_alu32_reg(self, BPF_AND);
+}
+
+static int bpf_fill_alu32_or_reg(struct bpf_test *self)
+{
+ return __bpf_fill_alu32_reg(self, BPF_OR);
+}
+
+static int bpf_fill_alu32_xor_reg(struct bpf_test *self)
+{
+ return __bpf_fill_alu32_reg(self, BPF_XOR);
+}
+
+static int bpf_fill_alu32_add_reg(struct bpf_test *self)
+{
+ return __bpf_fill_alu32_reg(self, BPF_ADD);
+}
+
+static int bpf_fill_alu32_sub_reg(struct bpf_test *self)
+{
+ return __bpf_fill_alu32_reg(self, BPF_SUB);
+}
+
+static int bpf_fill_alu32_mul_reg(struct bpf_test *self)
+{
+ return __bpf_fill_alu32_reg(self, BPF_MUL);
+}
+
+static int bpf_fill_alu32_div_reg(struct bpf_test *self)
+{
+ return __bpf_fill_alu32_reg(self, BPF_DIV);
+}
+
+static int bpf_fill_alu32_mod_reg(struct bpf_test *self)
+{
+ return __bpf_fill_alu32_reg(self, BPF_MOD);
+}
+
+/*
+ * Test JITs that implement complex ALU operations as function
+ * calls, and must re-arrange operands for argument passing.
+ */
+static int __bpf_fill_alu_imm_regs(struct bpf_test *self, u8 op, bool alu32)
+{
+ int len = 2 + 10 * 10;
+ struct bpf_insn *insns;
+ u64 dst, res;
+ int i = 0;
+ u32 imm;
+ int rd;
+
+ insns = kmalloc_array(len, sizeof(*insns), GFP_KERNEL);
+ if (!insns)
+ return -ENOMEM;
+
+ /* Operand and result values according to operation */
+ if (alu32)
+ dst = 0x76543210U;
+ else
+ dst = 0x7edcba9876543210ULL;
+ imm = 0x01234567U;
+
+ if (op == BPF_LSH || op == BPF_RSH || op == BPF_ARSH)
+ imm &= 31;
+
+ __bpf_alu_result(&res, dst, imm, op);
+
+ if (alu32)
+ res = (u32)res;
+
+ /* Check all operand registers */
+ for (rd = R0; rd <= R9; rd++) {
+ i += __bpf_ld_imm64(&insns[i], rd, dst);
+
+ if (alu32)
+ insns[i++] = BPF_ALU32_IMM(op, rd, imm);
+ else
+ insns[i++] = BPF_ALU64_IMM(op, rd, imm);
+
+ insns[i++] = BPF_JMP32_IMM(BPF_JEQ, rd, res, 2);
+ insns[i++] = BPF_MOV64_IMM(R0, __LINE__);
+ insns[i++] = BPF_EXIT_INSN();
+
+ insns[i++] = BPF_ALU64_IMM(BPF_RSH, rd, 32);
+ insns[i++] = BPF_JMP32_IMM(BPF_JEQ, rd, res >> 32, 2);
+ insns[i++] = BPF_MOV64_IMM(R0, __LINE__);
+ insns[i++] = BPF_EXIT_INSN();
+ }
+
+ insns[i++] = BPF_MOV64_IMM(R0, 1);
+ insns[i++] = BPF_EXIT_INSN();
+
+ self->u.ptr.insns = insns;
+ self->u.ptr.len = len;
+ BUG_ON(i != len);
+
+ return 0;
+}
+
+/* ALU64 K registers */
+static int bpf_fill_alu64_mov_imm_regs(struct bpf_test *self)
+{
+ return __bpf_fill_alu_imm_regs(self, BPF_MOV, false);
+}
+
+static int bpf_fill_alu64_and_imm_regs(struct bpf_test *self)
+{
+ return __bpf_fill_alu_imm_regs(self, BPF_AND, false);
+}
+
+static int bpf_fill_alu64_or_imm_regs(struct bpf_test *self)
+{
+ return __bpf_fill_alu_imm_regs(self, BPF_OR, false);
+}
+
+static int bpf_fill_alu64_xor_imm_regs(struct bpf_test *self)
+{
+ return __bpf_fill_alu_imm_regs(self, BPF_XOR, false);
+}
+
+static int bpf_fill_alu64_lsh_imm_regs(struct bpf_test *self)
+{
+ return __bpf_fill_alu_imm_regs(self, BPF_LSH, false);
+}
+
+static int bpf_fill_alu64_rsh_imm_regs(struct bpf_test *self)
+{
+ return __bpf_fill_alu_imm_regs(self, BPF_RSH, false);
+}
+
+static int bpf_fill_alu64_arsh_imm_regs(struct bpf_test *self)
+{
+ return __bpf_fill_alu_imm_regs(self, BPF_ARSH, false);
+}
+
+static int bpf_fill_alu64_add_imm_regs(struct bpf_test *self)
+{
+ return __bpf_fill_alu_imm_regs(self, BPF_ADD, false);
+}
+
+static int bpf_fill_alu64_sub_imm_regs(struct bpf_test *self)
+{
+ return __bpf_fill_alu_imm_regs(self, BPF_SUB, false);
+}
+
+static int bpf_fill_alu64_mul_imm_regs(struct bpf_test *self)
+{
+ return __bpf_fill_alu_imm_regs(self, BPF_MUL, false);
+}
+
+static int bpf_fill_alu64_div_imm_regs(struct bpf_test *self)
+{
+ return __bpf_fill_alu_imm_regs(self, BPF_DIV, false);
+}
+
+static int bpf_fill_alu64_mod_imm_regs(struct bpf_test *self)
+{
+ return __bpf_fill_alu_imm_regs(self, BPF_MOD, false);
+}
+
+/* ALU32 K registers */
+static int bpf_fill_alu32_mov_imm_regs(struct bpf_test *self)
+{
+ return __bpf_fill_alu_imm_regs(self, BPF_MOV, true);
+}
+
+static int bpf_fill_alu32_and_imm_regs(struct bpf_test *self)
+{
+ return __bpf_fill_alu_imm_regs(self, BPF_AND, true);
+}
+
+static int bpf_fill_alu32_or_imm_regs(struct bpf_test *self)
+{
+ return __bpf_fill_alu_imm_regs(self, BPF_OR, true);
+}
+
+static int bpf_fill_alu32_xor_imm_regs(struct bpf_test *self)
+{
+ return __bpf_fill_alu_imm_regs(self, BPF_XOR, true);
+}
+
+static int bpf_fill_alu32_lsh_imm_regs(struct bpf_test *self)
+{
+ return __bpf_fill_alu_imm_regs(self, BPF_LSH, true);
+}
+
+static int bpf_fill_alu32_rsh_imm_regs(struct bpf_test *self)
+{
+ return __bpf_fill_alu_imm_regs(self, BPF_RSH, true);
+}
+
+static int bpf_fill_alu32_arsh_imm_regs(struct bpf_test *self)
+{
+ return __bpf_fill_alu_imm_regs(self, BPF_ARSH, true);
+}
+
+static int bpf_fill_alu32_add_imm_regs(struct bpf_test *self)
+{
+ return __bpf_fill_alu_imm_regs(self, BPF_ADD, true);
+}
+
+static int bpf_fill_alu32_sub_imm_regs(struct bpf_test *self)
+{
+ return __bpf_fill_alu_imm_regs(self, BPF_SUB, true);
+}
+
+static int bpf_fill_alu32_mul_imm_regs(struct bpf_test *self)
+{
+ return __bpf_fill_alu_imm_regs(self, BPF_MUL, true);
+}
+
+static int bpf_fill_alu32_div_imm_regs(struct bpf_test *self)
+{
+ return __bpf_fill_alu_imm_regs(self, BPF_DIV, true);
+}
+
+static int bpf_fill_alu32_mod_imm_regs(struct bpf_test *self)
+{
+ return __bpf_fill_alu_imm_regs(self, BPF_MOD, true);
+}
+
+/*
+ * Test JITs that implement complex ALU operations as function
+ * calls, and must re-arrange operands for argument passing.
+ */
+static int __bpf_fill_alu_reg_pairs(struct bpf_test *self, u8 op, bool alu32)
+{
+ int len = 2 + 10 * 10 * 12;
+ u64 dst, src, res, same;
+ struct bpf_insn *insns;
+ int rd, rs;
+ int i = 0;
+
+ insns = kmalloc_array(len, sizeof(*insns), GFP_KERNEL);
+ if (!insns)
+ return -ENOMEM;
+
+ /* Operand and result values according to operation */
+ if (alu32) {
+ dst = 0x76543210U;
+ src = 0x01234567U;
+ } else {
+ dst = 0x7edcba9876543210ULL;
+ src = 0x0123456789abcdefULL;
+ }
+
+ if (op == BPF_LSH || op == BPF_RSH || op == BPF_ARSH)
+ src &= 31;
+
+ __bpf_alu_result(&res, dst, src, op);
+ __bpf_alu_result(&same, src, src, op);
+
+ if (alu32) {
+ res = (u32)res;
+ same = (u32)same;
+ }
+
+ /* Check all combinations of operand registers */
+ for (rd = R0; rd <= R9; rd++) {
+ for (rs = R0; rs <= R9; rs++) {
+ u64 val = rd == rs ? same : res;
+
+ i += __bpf_ld_imm64(&insns[i], rd, dst);
+ i += __bpf_ld_imm64(&insns[i], rs, src);
+
+ if (alu32)
+ insns[i++] = BPF_ALU32_REG(op, rd, rs);
+ else
+ insns[i++] = BPF_ALU64_REG(op, rd, rs);
+
+ insns[i++] = BPF_JMP32_IMM(BPF_JEQ, rd, val, 2);
+ insns[i++] = BPF_MOV64_IMM(R0, __LINE__);
+ insns[i++] = BPF_EXIT_INSN();
+
+ insns[i++] = BPF_ALU64_IMM(BPF_RSH, rd, 32);
+ insns[i++] = BPF_JMP32_IMM(BPF_JEQ, rd, val >> 32, 2);
+ insns[i++] = BPF_MOV64_IMM(R0, __LINE__);
+ insns[i++] = BPF_EXIT_INSN();
+ }
+ }
+
+ insns[i++] = BPF_MOV64_IMM(R0, 1);
+ insns[i++] = BPF_EXIT_INSN();
+
+ self->u.ptr.insns = insns;
+ self->u.ptr.len = len;
+ BUG_ON(i != len);
+
+ return 0;
+}
+
+/* ALU64 X register combinations */
+static int bpf_fill_alu64_mov_reg_pairs(struct bpf_test *self)
+{
+ return __bpf_fill_alu_reg_pairs(self, BPF_MOV, false);
+}
+
+static int bpf_fill_alu64_and_reg_pairs(struct bpf_test *self)
+{
+ return __bpf_fill_alu_reg_pairs(self, BPF_AND, false);
+}
+
+static int bpf_fill_alu64_or_reg_pairs(struct bpf_test *self)
+{
+ return __bpf_fill_alu_reg_pairs(self, BPF_OR, false);
+}
+
+static int bpf_fill_alu64_xor_reg_pairs(struct bpf_test *self)
+{
+ return __bpf_fill_alu_reg_pairs(self, BPF_XOR, false);
+}
+
+static int bpf_fill_alu64_lsh_reg_pairs(struct bpf_test *self)
+{
+ return __bpf_fill_alu_reg_pairs(self, BPF_LSH, false);
+}
+
+static int bpf_fill_alu64_rsh_reg_pairs(struct bpf_test *self)
+{
+ return __bpf_fill_alu_reg_pairs(self, BPF_RSH, false);
+}
+
+static int bpf_fill_alu64_arsh_reg_pairs(struct bpf_test *self)
+{
+ return __bpf_fill_alu_reg_pairs(self, BPF_ARSH, false);
+}
+
+static int bpf_fill_alu64_add_reg_pairs(struct bpf_test *self)
+{
+ return __bpf_fill_alu_reg_pairs(self, BPF_ADD, false);
+}
+
+static int bpf_fill_alu64_sub_reg_pairs(struct bpf_test *self)
+{
+ return __bpf_fill_alu_reg_pairs(self, BPF_SUB, false);
+}
+
+static int bpf_fill_alu64_mul_reg_pairs(struct bpf_test *self)
+{
+ return __bpf_fill_alu_reg_pairs(self, BPF_MUL, false);
+}
+
+static int bpf_fill_alu64_div_reg_pairs(struct bpf_test *self)
+{
+ return __bpf_fill_alu_reg_pairs(self, BPF_DIV, false);
+}
+
+static int bpf_fill_alu64_mod_reg_pairs(struct bpf_test *self)
+{
+ return __bpf_fill_alu_reg_pairs(self, BPF_MOD, false);
+}
+
+/* ALU32 X register combinations */
+static int bpf_fill_alu32_mov_reg_pairs(struct bpf_test *self)
+{
+ return __bpf_fill_alu_reg_pairs(self, BPF_MOV, true);
+}
+
+static int bpf_fill_alu32_and_reg_pairs(struct bpf_test *self)
+{
+ return __bpf_fill_alu_reg_pairs(self, BPF_AND, true);
+}
+
+static int bpf_fill_alu32_or_reg_pairs(struct bpf_test *self)
+{
+ return __bpf_fill_alu_reg_pairs(self, BPF_OR, true);
+}
+
+static int bpf_fill_alu32_xor_reg_pairs(struct bpf_test *self)
+{
+ return __bpf_fill_alu_reg_pairs(self, BPF_XOR, true);
+}
+
+static int bpf_fill_alu32_lsh_reg_pairs(struct bpf_test *self)
+{
+ return __bpf_fill_alu_reg_pairs(self, BPF_LSH, true);
+}
+
+static int bpf_fill_alu32_rsh_reg_pairs(struct bpf_test *self)
+{
+ return __bpf_fill_alu_reg_pairs(self, BPF_RSH, true);
+}
+
+static int bpf_fill_alu32_arsh_reg_pairs(struct bpf_test *self)
+{
+ return __bpf_fill_alu_reg_pairs(self, BPF_ARSH, true);
+}
+
+static int bpf_fill_alu32_add_reg_pairs(struct bpf_test *self)
+{
+ return __bpf_fill_alu_reg_pairs(self, BPF_ADD, true);
+}
+
+static int bpf_fill_alu32_sub_reg_pairs(struct bpf_test *self)
+{
+ return __bpf_fill_alu_reg_pairs(self, BPF_SUB, true);
+}
+
+static int bpf_fill_alu32_mul_reg_pairs(struct bpf_test *self)
+{
+ return __bpf_fill_alu_reg_pairs(self, BPF_MUL, true);
+}
+
+static int bpf_fill_alu32_div_reg_pairs(struct bpf_test *self)
+{
+ return __bpf_fill_alu_reg_pairs(self, BPF_DIV, true);
+}
+
+static int bpf_fill_alu32_mod_reg_pairs(struct bpf_test *self)
+{
+ return __bpf_fill_alu_reg_pairs(self, BPF_MOD, true);
+}
+
+/*
+ * Exhaustive tests of atomic operations for all power-of-two operand
+ * magnitudes, both for positive and negative values.
+ */
+
+static int __bpf_emit_atomic64(struct bpf_test *self, void *arg,
+ struct bpf_insn *insns, s64 dst, s64 src)
+{
+ int op = *(int *)arg;
+ u64 keep, fetch, res;
+ int i = 0;
+
+ if (!insns)
+ return 21;
+
+ switch (op) {
+ case BPF_XCHG:
+ res = src;
+ break;
+ default:
+ __bpf_alu_result(&res, dst, src, BPF_OP(op));
+ }
+
+ keep = 0x0123456789abcdefULL;
+ if (op & BPF_FETCH)
+ fetch = dst;
+ else
+ fetch = src;
+
+ i += __bpf_ld_imm64(&insns[i], R0, keep);
+ i += __bpf_ld_imm64(&insns[i], R1, dst);
+ i += __bpf_ld_imm64(&insns[i], R2, src);
+ i += __bpf_ld_imm64(&insns[i], R3, res);
+ i += __bpf_ld_imm64(&insns[i], R4, fetch);
+ i += __bpf_ld_imm64(&insns[i], R5, keep);
+
+ insns[i++] = BPF_STX_MEM(BPF_DW, R10, R1, -8);
+ insns[i++] = BPF_ATOMIC_OP(BPF_DW, op, R10, R2, -8);
+ insns[i++] = BPF_LDX_MEM(BPF_DW, R1, R10, -8);
+
+ insns[i++] = BPF_JMP_REG(BPF_JEQ, R1, R3, 1);
+ insns[i++] = BPF_EXIT_INSN();
+
+ insns[i++] = BPF_JMP_REG(BPF_JEQ, R2, R4, 1);
+ insns[i++] = BPF_EXIT_INSN();
+
+ insns[i++] = BPF_JMP_REG(BPF_JEQ, R0, R5, 1);
+ insns[i++] = BPF_EXIT_INSN();
+
+ return i;
+}
+
+static int __bpf_emit_atomic32(struct bpf_test *self, void *arg,
+ struct bpf_insn *insns, s64 dst, s64 src)
+{
+ int op = *(int *)arg;
+ u64 keep, fetch, res;
+ int i = 0;
+
+ if (!insns)
+ return 21;
+
+ switch (op) {
+ case BPF_XCHG:
+ res = src;
+ break;
+ default:
+ __bpf_alu_result(&res, (u32)dst, (u32)src, BPF_OP(op));
+ }
+
+ keep = 0x0123456789abcdefULL;
+ if (op & BPF_FETCH)
+ fetch = (u32)dst;
+ else
+ fetch = src;
+
+ i += __bpf_ld_imm64(&insns[i], R0, keep);
+ i += __bpf_ld_imm64(&insns[i], R1, (u32)dst);
+ i += __bpf_ld_imm64(&insns[i], R2, src);
+ i += __bpf_ld_imm64(&insns[i], R3, (u32)res);
+ i += __bpf_ld_imm64(&insns[i], R4, fetch);
+ i += __bpf_ld_imm64(&insns[i], R5, keep);
+
+ insns[i++] = BPF_STX_MEM(BPF_W, R10, R1, -4);
+ insns[i++] = BPF_ATOMIC_OP(BPF_W, op, R10, R2, -4);
+ insns[i++] = BPF_LDX_MEM(BPF_W, R1, R10, -4);
+
+ insns[i++] = BPF_JMP_REG(BPF_JEQ, R1, R3, 1);
+ insns[i++] = BPF_EXIT_INSN();
+
+ insns[i++] = BPF_JMP_REG(BPF_JEQ, R2, R4, 1);
+ insns[i++] = BPF_EXIT_INSN();
+
+ insns[i++] = BPF_JMP_REG(BPF_JEQ, R0, R5, 1);
+ insns[i++] = BPF_EXIT_INSN();
+
+ return i;
+}
+
+static int __bpf_emit_cmpxchg64(struct bpf_test *self, void *arg,
+ struct bpf_insn *insns, s64 dst, s64 src)
+{
+ int i = 0;
+
+ if (!insns)
+ return 23;
+
+ i += __bpf_ld_imm64(&insns[i], R0, ~dst);
+ i += __bpf_ld_imm64(&insns[i], R1, dst);
+ i += __bpf_ld_imm64(&insns[i], R2, src);
+
+ /* Result unsuccessful */
+ insns[i++] = BPF_STX_MEM(BPF_DW, R10, R1, -8);
+ insns[i++] = BPF_ATOMIC_OP(BPF_DW, BPF_CMPXCHG, R10, R2, -8);
+ insns[i++] = BPF_LDX_MEM(BPF_DW, R3, R10, -8);
+
+ insns[i++] = BPF_JMP_REG(BPF_JEQ, R1, R3, 2);
+ insns[i++] = BPF_MOV64_IMM(R0, __LINE__);
+ insns[i++] = BPF_EXIT_INSN();
+
+ insns[i++] = BPF_JMP_REG(BPF_JEQ, R0, R3, 2);
+ insns[i++] = BPF_MOV64_IMM(R0, __LINE__);
+ insns[i++] = BPF_EXIT_INSN();
+
+ /* Result successful */
+ insns[i++] = BPF_ATOMIC_OP(BPF_DW, BPF_CMPXCHG, R10, R2, -8);
+ insns[i++] = BPF_LDX_MEM(BPF_DW, R3, R10, -8);
+
+ insns[i++] = BPF_JMP_REG(BPF_JEQ, R2, R3, 2);
+ insns[i++] = BPF_MOV64_IMM(R0, __LINE__);
+ insns[i++] = BPF_EXIT_INSN();
+
+ insns[i++] = BPF_JMP_REG(BPF_JEQ, R0, R1, 2);
+ insns[i++] = BPF_MOV64_IMM(R0, __LINE__);
+ insns[i++] = BPF_EXIT_INSN();
+
+ return i;
+}
+
+static int __bpf_emit_cmpxchg32(struct bpf_test *self, void *arg,
+ struct bpf_insn *insns, s64 dst, s64 src)
+{
+ int i = 0;
+
+ if (!insns)
+ return 27;
+
+ i += __bpf_ld_imm64(&insns[i], R0, ~dst);
+ i += __bpf_ld_imm64(&insns[i], R1, (u32)dst);
+ i += __bpf_ld_imm64(&insns[i], R2, src);
+
+ /* Result unsuccessful */
+ insns[i++] = BPF_STX_MEM(BPF_W, R10, R1, -4);
+ insns[i++] = BPF_ATOMIC_OP(BPF_W, BPF_CMPXCHG, R10, R2, -4);
+ insns[i++] = BPF_ZEXT_REG(R0), /* Zext always inserted by verifier */
+ insns[i++] = BPF_LDX_MEM(BPF_W, R3, R10, -4);
+
+ insns[i++] = BPF_JMP32_REG(BPF_JEQ, R1, R3, 2);
+ insns[i++] = BPF_MOV32_IMM(R0, __LINE__);
+ insns[i++] = BPF_EXIT_INSN();
+
+ insns[i++] = BPF_JMP_REG(BPF_JEQ, R0, R3, 2);
+ insns[i++] = BPF_MOV32_IMM(R0, __LINE__);
+ insns[i++] = BPF_EXIT_INSN();
+
+ /* Result successful */
+ i += __bpf_ld_imm64(&insns[i], R0, dst);
+ insns[i++] = BPF_ATOMIC_OP(BPF_W, BPF_CMPXCHG, R10, R2, -4);
+ insns[i++] = BPF_ZEXT_REG(R0), /* Zext always inserted by verifier */
+ insns[i++] = BPF_LDX_MEM(BPF_W, R3, R10, -4);
+
+ insns[i++] = BPF_JMP32_REG(BPF_JEQ, R2, R3, 2);
+ insns[i++] = BPF_MOV32_IMM(R0, __LINE__);
+ insns[i++] = BPF_EXIT_INSN();
+
+ insns[i++] = BPF_JMP_REG(BPF_JEQ, R0, R1, 2);
+ insns[i++] = BPF_MOV32_IMM(R0, __LINE__);
+ insns[i++] = BPF_EXIT_INSN();
+
+ return i;
+}
+
+static int __bpf_fill_atomic64(struct bpf_test *self, int op)
+{
+ return __bpf_fill_pattern(self, &op, 64, 64,
+ 0, PATTERN_BLOCK2,
+ &__bpf_emit_atomic64);
+}
+
+static int __bpf_fill_atomic32(struct bpf_test *self, int op)
+{
+ return __bpf_fill_pattern(self, &op, 64, 64,
+ 0, PATTERN_BLOCK2,
+ &__bpf_emit_atomic32);
+}
+
+/* 64-bit atomic operations */
+static int bpf_fill_atomic64_add(struct bpf_test *self)
+{
+ return __bpf_fill_atomic64(self, BPF_ADD);
+}
+
+static int bpf_fill_atomic64_and(struct bpf_test *self)
+{
+ return __bpf_fill_atomic64(self, BPF_AND);
+}
+
+static int bpf_fill_atomic64_or(struct bpf_test *self)
+{
+ return __bpf_fill_atomic64(self, BPF_OR);
+}
+
+static int bpf_fill_atomic64_xor(struct bpf_test *self)
+{
+ return __bpf_fill_atomic64(self, BPF_XOR);
+}
+
+static int bpf_fill_atomic64_add_fetch(struct bpf_test *self)
+{
+ return __bpf_fill_atomic64(self, BPF_ADD | BPF_FETCH);
+}
+
+static int bpf_fill_atomic64_and_fetch(struct bpf_test *self)
+{
+ return __bpf_fill_atomic64(self, BPF_AND | BPF_FETCH);
+}
+
+static int bpf_fill_atomic64_or_fetch(struct bpf_test *self)
+{
+ return __bpf_fill_atomic64(self, BPF_OR | BPF_FETCH);
+}
+
+static int bpf_fill_atomic64_xor_fetch(struct bpf_test *self)
+{
+ return __bpf_fill_atomic64(self, BPF_XOR | BPF_FETCH);
+}
+
+static int bpf_fill_atomic64_xchg(struct bpf_test *self)
+{
+ return __bpf_fill_atomic64(self, BPF_XCHG);
+}
+
+static int bpf_fill_cmpxchg64(struct bpf_test *self)
+{
+ return __bpf_fill_pattern(self, NULL, 64, 64, 0, PATTERN_BLOCK2,
+ &__bpf_emit_cmpxchg64);
+}
+
+/* 32-bit atomic operations */
+static int bpf_fill_atomic32_add(struct bpf_test *self)
+{
+ return __bpf_fill_atomic32(self, BPF_ADD);
+}
+
+static int bpf_fill_atomic32_and(struct bpf_test *self)
+{
+ return __bpf_fill_atomic32(self, BPF_AND);
+}
+
+static int bpf_fill_atomic32_or(struct bpf_test *self)
+{
+ return __bpf_fill_atomic32(self, BPF_OR);
+}
+
+static int bpf_fill_atomic32_xor(struct bpf_test *self)
+{
+ return __bpf_fill_atomic32(self, BPF_XOR);
+}
+
+static int bpf_fill_atomic32_add_fetch(struct bpf_test *self)
+{
+ return __bpf_fill_atomic32(self, BPF_ADD | BPF_FETCH);
+}
+
+static int bpf_fill_atomic32_and_fetch(struct bpf_test *self)
+{
+ return __bpf_fill_atomic32(self, BPF_AND | BPF_FETCH);
+}
+
+static int bpf_fill_atomic32_or_fetch(struct bpf_test *self)
+{
+ return __bpf_fill_atomic32(self, BPF_OR | BPF_FETCH);
+}
+
+static int bpf_fill_atomic32_xor_fetch(struct bpf_test *self)
+{
+ return __bpf_fill_atomic32(self, BPF_XOR | BPF_FETCH);
+}
+
+static int bpf_fill_atomic32_xchg(struct bpf_test *self)
+{
+ return __bpf_fill_atomic32(self, BPF_XCHG);
+}
+
+static int bpf_fill_cmpxchg32(struct bpf_test *self)
+{
+ return __bpf_fill_pattern(self, NULL, 64, 64, 0, PATTERN_BLOCK2,
+ &__bpf_emit_cmpxchg32);
+}
+
+/*
+ * Test JITs that implement ATOMIC operations as function calls or
+ * other primitives, and must re-arrange operands for argument passing.
+ */
+static int __bpf_fill_atomic_reg_pairs(struct bpf_test *self, u8 width, u8 op)
+{
+ struct bpf_insn *insn;
+ int len = 2 + 34 * 10 * 10;
+ u64 mem, upd, res;
+ int rd, rs, i = 0;
+
+ insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
+ if (!insn)
+ return -ENOMEM;
+
+ /* Operand and memory values */
+ if (width == BPF_DW) {
+ mem = 0x0123456789abcdefULL;
+ upd = 0xfedcba9876543210ULL;
+ } else { /* BPF_W */
+ mem = 0x01234567U;
+ upd = 0x76543210U;
+ }
+
+ /* Memory updated according to operation */
+ switch (op) {
+ case BPF_XCHG:
+ res = upd;
+ break;
+ case BPF_CMPXCHG:
+ res = mem;
+ break;
+ default:
+ __bpf_alu_result(&res, mem, upd, BPF_OP(op));
+ }
+
+ /* Test all operand registers */
+ for (rd = R0; rd <= R9; rd++) {
+ for (rs = R0; rs <= R9; rs++) {
+ u64 cmp, src;
+
+ /* Initialize value in memory */
+ i += __bpf_ld_imm64(&insn[i], R0, mem);
+ insn[i++] = BPF_STX_MEM(width, R10, R0, -8);
+
+ /* Initialize registers in order */
+ i += __bpf_ld_imm64(&insn[i], R0, ~mem);
+ i += __bpf_ld_imm64(&insn[i], rs, upd);
+ insn[i++] = BPF_MOV64_REG(rd, R10);
+
+ /* Perform atomic operation */
+ insn[i++] = BPF_ATOMIC_OP(width, op, rd, rs, -8);
+ if (op == BPF_CMPXCHG && width == BPF_W)
+ insn[i++] = BPF_ZEXT_REG(R0);
+
+ /* Check R0 register value */
+ if (op == BPF_CMPXCHG)
+ cmp = mem; /* Expect value from memory */
+ else if (R0 == rd || R0 == rs)
+ cmp = 0; /* Aliased, checked below */
+ else
+ cmp = ~mem; /* Expect value to be preserved */
+ if (cmp) {
+ insn[i++] = BPF_JMP32_IMM(BPF_JEQ, R0,
+ (u32)cmp, 2);
+ insn[i++] = BPF_MOV32_IMM(R0, __LINE__);
+ insn[i++] = BPF_EXIT_INSN();
+ insn[i++] = BPF_ALU64_IMM(BPF_RSH, R0, 32);
+ insn[i++] = BPF_JMP32_IMM(BPF_JEQ, R0,
+ cmp >> 32, 2);
+ insn[i++] = BPF_MOV32_IMM(R0, __LINE__);
+ insn[i++] = BPF_EXIT_INSN();
+ }
+
+ /* Check source register value */
+ if (rs == R0 && op == BPF_CMPXCHG)
+ src = 0; /* Aliased with R0, checked above */
+ else if (rs == rd && (op == BPF_CMPXCHG ||
+ !(op & BPF_FETCH)))
+ src = 0; /* Aliased with rd, checked below */
+ else if (op == BPF_CMPXCHG)
+ src = upd; /* Expect value to be preserved */
+ else if (op & BPF_FETCH)
+ src = mem; /* Expect fetched value from mem */
+ else /* no fetch */
+ src = upd; /* Expect value to be preserved */
+ if (src) {
+ insn[i++] = BPF_JMP32_IMM(BPF_JEQ, rs,
+ (u32)src, 2);
+ insn[i++] = BPF_MOV32_IMM(R0, __LINE__);
+ insn[i++] = BPF_EXIT_INSN();
+ insn[i++] = BPF_ALU64_IMM(BPF_RSH, rs, 32);
+ insn[i++] = BPF_JMP32_IMM(BPF_JEQ, rs,
+ src >> 32, 2);
+ insn[i++] = BPF_MOV32_IMM(R0, __LINE__);
+ insn[i++] = BPF_EXIT_INSN();
+ }
+
+ /* Check destination register value */
+ if (!(rd == R0 && op == BPF_CMPXCHG) &&
+ !(rd == rs && (op & BPF_FETCH))) {
+ insn[i++] = BPF_JMP_REG(BPF_JEQ, rd, R10, 2);
+ insn[i++] = BPF_MOV32_IMM(R0, __LINE__);
+ insn[i++] = BPF_EXIT_INSN();
+ }
+
+ /* Check value in memory */
+ if (rs != rd) { /* No aliasing */
+ i += __bpf_ld_imm64(&insn[i], R1, res);
+ } else if (op == BPF_XCHG) { /* Aliased, XCHG */
+ insn[i++] = BPF_MOV64_REG(R1, R10);
+ } else if (op == BPF_CMPXCHG) { /* Aliased, CMPXCHG */
+ i += __bpf_ld_imm64(&insn[i], R1, mem);
+ } else { /* Aliased, ALU oper */
+ i += __bpf_ld_imm64(&insn[i], R1, mem);
+ insn[i++] = BPF_ALU64_REG(BPF_OP(op), R1, R10);
+ }
+
+ insn[i++] = BPF_LDX_MEM(width, R0, R10, -8);
+ if (width == BPF_DW)
+ insn[i++] = BPF_JMP_REG(BPF_JEQ, R0, R1, 2);
+ else /* width == BPF_W */
+ insn[i++] = BPF_JMP32_REG(BPF_JEQ, R0, R1, 2);
+ insn[i++] = BPF_MOV32_IMM(R0, __LINE__);
+ insn[i++] = BPF_EXIT_INSN();
+ }
+ }
+
+ insn[i++] = BPF_MOV64_IMM(R0, 1);
+ insn[i++] = BPF_EXIT_INSN();
+
+ self->u.ptr.insns = insn;
+ self->u.ptr.len = i;
+ BUG_ON(i > len);
+
+ return 0;
+}
+
+/* 64-bit atomic register tests */
+static int bpf_fill_atomic64_add_reg_pairs(struct bpf_test *self)
+{
+ return __bpf_fill_atomic_reg_pairs(self, BPF_DW, BPF_ADD);
+}
+
+static int bpf_fill_atomic64_and_reg_pairs(struct bpf_test *self)
+{
+ return __bpf_fill_atomic_reg_pairs(self, BPF_DW, BPF_AND);
+}
+
+static int bpf_fill_atomic64_or_reg_pairs(struct bpf_test *self)
+{
+ return __bpf_fill_atomic_reg_pairs(self, BPF_DW, BPF_OR);
+}
+
+static int bpf_fill_atomic64_xor_reg_pairs(struct bpf_test *self)
+{
+ return __bpf_fill_atomic_reg_pairs(self, BPF_DW, BPF_XOR);
+}
+
+static int bpf_fill_atomic64_add_fetch_reg_pairs(struct bpf_test *self)
+{
+ return __bpf_fill_atomic_reg_pairs(self, BPF_DW, BPF_ADD | BPF_FETCH);
+}
+
+static int bpf_fill_atomic64_and_fetch_reg_pairs(struct bpf_test *self)
+{
+ return __bpf_fill_atomic_reg_pairs(self, BPF_DW, BPF_AND | BPF_FETCH);
+}
+
+static int bpf_fill_atomic64_or_fetch_reg_pairs(struct bpf_test *self)
+{
+ return __bpf_fill_atomic_reg_pairs(self, BPF_DW, BPF_OR | BPF_FETCH);
+}
+
+static int bpf_fill_atomic64_xor_fetch_reg_pairs(struct bpf_test *self)
+{
+ return __bpf_fill_atomic_reg_pairs(self, BPF_DW, BPF_XOR | BPF_FETCH);
+}
+
+static int bpf_fill_atomic64_xchg_reg_pairs(struct bpf_test *self)
+{
+ return __bpf_fill_atomic_reg_pairs(self, BPF_DW, BPF_XCHG);
+}
+
+static int bpf_fill_atomic64_cmpxchg_reg_pairs(struct bpf_test *self)
+{
+ return __bpf_fill_atomic_reg_pairs(self, BPF_DW, BPF_CMPXCHG);
+}
+
+/* 32-bit atomic register tests */
+static int bpf_fill_atomic32_add_reg_pairs(struct bpf_test *self)
+{
+ return __bpf_fill_atomic_reg_pairs(self, BPF_W, BPF_ADD);
+}
+
+static int bpf_fill_atomic32_and_reg_pairs(struct bpf_test *self)
+{
+ return __bpf_fill_atomic_reg_pairs(self, BPF_W, BPF_AND);
+}
+
+static int bpf_fill_atomic32_or_reg_pairs(struct bpf_test *self)
+{
+ return __bpf_fill_atomic_reg_pairs(self, BPF_W, BPF_OR);
+}
+
+static int bpf_fill_atomic32_xor_reg_pairs(struct bpf_test *self)
+{
+ return __bpf_fill_atomic_reg_pairs(self, BPF_W, BPF_XOR);
+}
+
+static int bpf_fill_atomic32_add_fetch_reg_pairs(struct bpf_test *self)
+{
+ return __bpf_fill_atomic_reg_pairs(self, BPF_W, BPF_ADD | BPF_FETCH);
+}
+
+static int bpf_fill_atomic32_and_fetch_reg_pairs(struct bpf_test *self)
+{
+ return __bpf_fill_atomic_reg_pairs(self, BPF_W, BPF_AND | BPF_FETCH);
+}
+
+static int bpf_fill_atomic32_or_fetch_reg_pairs(struct bpf_test *self)
+{
+ return __bpf_fill_atomic_reg_pairs(self, BPF_W, BPF_OR | BPF_FETCH);
+}
+
+static int bpf_fill_atomic32_xor_fetch_reg_pairs(struct bpf_test *self)
+{
+ return __bpf_fill_atomic_reg_pairs(self, BPF_W, BPF_XOR | BPF_FETCH);
+}
+
+static int bpf_fill_atomic32_xchg_reg_pairs(struct bpf_test *self)
+{
+ return __bpf_fill_atomic_reg_pairs(self, BPF_W, BPF_XCHG);
+}
+
+static int bpf_fill_atomic32_cmpxchg_reg_pairs(struct bpf_test *self)
+{
+ return __bpf_fill_atomic_reg_pairs(self, BPF_W, BPF_CMPXCHG);
+}
+
+/*
+ * Test the two-instruction 64-bit immediate load operation for all
+ * power-of-two magnitudes of the immediate operand. For each MSB, a block
+ * of immediate values centered around the power-of-two MSB are tested,
+ * both for positive and negative values. The test is designed to verify
+ * the operation for JITs that emit different code depending on the magnitude
+ * of the immediate value. This is often the case if the native instruction
+ * immediate field width is narrower than 32 bits.
+ */
+static int bpf_fill_ld_imm64_magn(struct bpf_test *self)
+{
+ int block = 64; /* Increase for more tests per MSB position */
+ int len = 3 + 8 * 63 * block * 2;
+ struct bpf_insn *insn;
+ int bit, adj, sign;
+ int i = 0;
+
+ insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
+ if (!insn)
+ return -ENOMEM;
+
+ insn[i++] = BPF_ALU64_IMM(BPF_MOV, R0, 0);
+
+ for (bit = 0; bit <= 62; bit++) {
+ for (adj = -block / 2; adj < block / 2; adj++) {
+ for (sign = -1; sign <= 1; sign += 2) {
+ s64 imm = sign * ((1LL << bit) + adj);
+
+ /* Perform operation */
+ i += __bpf_ld_imm64(&insn[i], R1, imm);
+
+ /* Load reference */
+ insn[i++] = BPF_ALU32_IMM(BPF_MOV, R2, imm);
+ insn[i++] = BPF_ALU32_IMM(BPF_MOV, R3,
+ (u32)(imm >> 32));
+ insn[i++] = BPF_ALU64_IMM(BPF_LSH, R3, 32);
+ insn[i++] = BPF_ALU64_REG(BPF_OR, R2, R3);
+
+ /* Check result */
+ insn[i++] = BPF_JMP_REG(BPF_JEQ, R1, R2, 1);
+ insn[i++] = BPF_EXIT_INSN();
+ }
+ }
+ }
+
+ insn[i++] = BPF_ALU64_IMM(BPF_MOV, R0, 1);
+ insn[i++] = BPF_EXIT_INSN();
+
+ self->u.ptr.insns = insn;
+ self->u.ptr.len = len;
+ BUG_ON(i != len);
+
+ return 0;
+}
+
+/*
+ * Test the two-instruction 64-bit immediate load operation for different
+ * combinations of bytes. Each byte in the 64-bit word is constructed as
+ * (base & mask) | (rand() & ~mask), where rand() is a deterministic LCG.
+ * All patterns (base1, mask1) and (base2, mask2) bytes are tested.
+ */
+static int __bpf_fill_ld_imm64_bytes(struct bpf_test *self,
+ u8 base1, u8 mask1,
+ u8 base2, u8 mask2)
+{
+ struct bpf_insn *insn;
+ int len = 3 + 8 * BIT(8);
+ int pattern, index;
+ u32 rand = 1;
+ int i = 0;
+
+ insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
+ if (!insn)
+ return -ENOMEM;
+
+ insn[i++] = BPF_ALU64_IMM(BPF_MOV, R0, 0);
+
+ for (pattern = 0; pattern < BIT(8); pattern++) {
+ u64 imm = 0;
+
+ for (index = 0; index < 8; index++) {
+ int byte;
+
+ if (pattern & BIT(index))
+ byte = (base1 & mask1) | (rand & ~mask1);
+ else
+ byte = (base2 & mask2) | (rand & ~mask2);
+ imm = (imm << 8) | byte;
+ }
+
+ /* Update our LCG */
+ rand = rand * 1664525 + 1013904223;
+
+ /* Perform operation */
+ i += __bpf_ld_imm64(&insn[i], R1, imm);
+
+ /* Load reference */
+ insn[i++] = BPF_ALU32_IMM(BPF_MOV, R2, imm);
+ insn[i++] = BPF_ALU32_IMM(BPF_MOV, R3, (u32)(imm >> 32));
+ insn[i++] = BPF_ALU64_IMM(BPF_LSH, R3, 32);
+ insn[i++] = BPF_ALU64_REG(BPF_OR, R2, R3);
+
+ /* Check result */
+ insn[i++] = BPF_JMP_REG(BPF_JEQ, R1, R2, 1);
+ insn[i++] = BPF_EXIT_INSN();
+ }
+
+ insn[i++] = BPF_ALU64_IMM(BPF_MOV, R0, 1);
+ insn[i++] = BPF_EXIT_INSN();
+
+ self->u.ptr.insns = insn;
+ self->u.ptr.len = len;
+ BUG_ON(i != len);
+
+ return 0;
+}
+
+static int bpf_fill_ld_imm64_checker(struct bpf_test *self)
+{
+ return __bpf_fill_ld_imm64_bytes(self, 0, 0xff, 0xff, 0xff);
+}
+
+static int bpf_fill_ld_imm64_pos_neg(struct bpf_test *self)
+{
+ return __bpf_fill_ld_imm64_bytes(self, 1, 0x81, 0x80, 0x80);
+}
+
+static int bpf_fill_ld_imm64_pos_zero(struct bpf_test *self)
+{
+ return __bpf_fill_ld_imm64_bytes(self, 1, 0x81, 0, 0xff);
+}
+
+static int bpf_fill_ld_imm64_neg_zero(struct bpf_test *self)
+{
+ return __bpf_fill_ld_imm64_bytes(self, 0x80, 0x80, 0, 0xff);
+}
+
+/*
+ * Exhaustive tests of JMP operations for all combinations of power-of-two
+ * magnitudes of the operands, both for positive and negative values. The
+ * test is designed to verify e.g. the JMP and JMP32 operations for JITs that
+ * emit different code depending on the magnitude of the immediate value.
+ */
+
+static bool __bpf_match_jmp_cond(s64 v1, s64 v2, u8 op)
+{
+ switch (op) {
+ case BPF_JSET:
+ return !!(v1 & v2);
+ case BPF_JEQ:
+ return v1 == v2;
+ case BPF_JNE:
+ return v1 != v2;
+ case BPF_JGT:
+ return (u64)v1 > (u64)v2;
+ case BPF_JGE:
+ return (u64)v1 >= (u64)v2;
+ case BPF_JLT:
+ return (u64)v1 < (u64)v2;
+ case BPF_JLE:
+ return (u64)v1 <= (u64)v2;
+ case BPF_JSGT:
+ return v1 > v2;
+ case BPF_JSGE:
+ return v1 >= v2;
+ case BPF_JSLT:
+ return v1 < v2;
+ case BPF_JSLE:
+ return v1 <= v2;
+ }
+ return false;
+}
+
+static int __bpf_emit_jmp_imm(struct bpf_test *self, void *arg,
+ struct bpf_insn *insns, s64 dst, s64 imm)
+{
+ int op = *(int *)arg;
+
+ if (insns) {
+ bool match = __bpf_match_jmp_cond(dst, (s32)imm, op);
+ int i = 0;
+
+ insns[i++] = BPF_ALU32_IMM(BPF_MOV, R0, match);
+
+ i += __bpf_ld_imm64(&insns[i], R1, dst);
+ insns[i++] = BPF_JMP_IMM(op, R1, imm, 1);
+ if (!match)
+ insns[i++] = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
+ insns[i++] = BPF_EXIT_INSN();
+
+ return i;
+ }
+
+ return 5 + 1;
+}
+
+static int __bpf_emit_jmp32_imm(struct bpf_test *self, void *arg,
+ struct bpf_insn *insns, s64 dst, s64 imm)
+{
+ int op = *(int *)arg;
+
+ if (insns) {
+ bool match = __bpf_match_jmp_cond((s32)dst, (s32)imm, op);
+ int i = 0;
+
+ i += __bpf_ld_imm64(&insns[i], R1, dst);
+ insns[i++] = BPF_JMP32_IMM(op, R1, imm, 1);
+ if (!match)
+ insns[i++] = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
+ insns[i++] = BPF_EXIT_INSN();
+
+ return i;
+ }
+
+ return 5;
+}
+
+static int __bpf_emit_jmp_reg(struct bpf_test *self, void *arg,
+ struct bpf_insn *insns, s64 dst, s64 src)
+{
+ int op = *(int *)arg;
+
+ if (insns) {
+ bool match = __bpf_match_jmp_cond(dst, src, op);
+ int i = 0;
+
+ i += __bpf_ld_imm64(&insns[i], R1, dst);
+ i += __bpf_ld_imm64(&insns[i], R2, src);
+ insns[i++] = BPF_JMP_REG(op, R1, R2, 1);
+ if (!match)
+ insns[i++] = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
+ insns[i++] = BPF_EXIT_INSN();
+
+ return i;
+ }
+
+ return 7;
+}
+
+static int __bpf_emit_jmp32_reg(struct bpf_test *self, void *arg,
+ struct bpf_insn *insns, s64 dst, s64 src)
+{
+ int op = *(int *)arg;
+
+ if (insns) {
+ bool match = __bpf_match_jmp_cond((s32)dst, (s32)src, op);
+ int i = 0;
+
+ i += __bpf_ld_imm64(&insns[i], R1, dst);
+ i += __bpf_ld_imm64(&insns[i], R2, src);
+ insns[i++] = BPF_JMP32_REG(op, R1, R2, 1);
+ if (!match)
+ insns[i++] = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
+ insns[i++] = BPF_EXIT_INSN();
+
+ return i;
+ }
+
+ return 7;
+}
+
+static int __bpf_fill_jmp_imm(struct bpf_test *self, int op)
+{
+ return __bpf_fill_pattern(self, &op, 64, 32,
+ PATTERN_BLOCK1, PATTERN_BLOCK2,
+ &__bpf_emit_jmp_imm);
+}
+
+static int __bpf_fill_jmp32_imm(struct bpf_test *self, int op)
+{
+ return __bpf_fill_pattern(self, &op, 64, 32,
+ PATTERN_BLOCK1, PATTERN_BLOCK2,
+ &__bpf_emit_jmp32_imm);
+}
+
+static int __bpf_fill_jmp_reg(struct bpf_test *self, int op)
+{
+ return __bpf_fill_pattern(self, &op, 64, 64,
+ PATTERN_BLOCK1, PATTERN_BLOCK2,
+ &__bpf_emit_jmp_reg);
+}
+
+static int __bpf_fill_jmp32_reg(struct bpf_test *self, int op)
+{
+ return __bpf_fill_pattern(self, &op, 64, 64,
+ PATTERN_BLOCK1, PATTERN_BLOCK2,
+ &__bpf_emit_jmp32_reg);
+}
+
+/* JMP immediate tests */
+static int bpf_fill_jmp_jset_imm(struct bpf_test *self)
+{
+ return __bpf_fill_jmp_imm(self, BPF_JSET);
+}
+
+static int bpf_fill_jmp_jeq_imm(struct bpf_test *self)
+{
+ return __bpf_fill_jmp_imm(self, BPF_JEQ);
+}
+
+static int bpf_fill_jmp_jne_imm(struct bpf_test *self)
+{
+ return __bpf_fill_jmp_imm(self, BPF_JNE);
+}
+
+static int bpf_fill_jmp_jgt_imm(struct bpf_test *self)
+{
+ return __bpf_fill_jmp_imm(self, BPF_JGT);
+}
+
+static int bpf_fill_jmp_jge_imm(struct bpf_test *self)
+{
+ return __bpf_fill_jmp_imm(self, BPF_JGE);
+}
+
+static int bpf_fill_jmp_jlt_imm(struct bpf_test *self)
+{
+ return __bpf_fill_jmp_imm(self, BPF_JLT);
+}
+
+static int bpf_fill_jmp_jle_imm(struct bpf_test *self)
+{
+ return __bpf_fill_jmp_imm(self, BPF_JLE);
+}
+
+static int bpf_fill_jmp_jsgt_imm(struct bpf_test *self)
+{
+ return __bpf_fill_jmp_imm(self, BPF_JSGT);
+}
+
+static int bpf_fill_jmp_jsge_imm(struct bpf_test *self)
+{
+ return __bpf_fill_jmp_imm(self, BPF_JSGE);
+}
+
+static int bpf_fill_jmp_jslt_imm(struct bpf_test *self)
+{
+ return __bpf_fill_jmp_imm(self, BPF_JSLT);
+}
+
+static int bpf_fill_jmp_jsle_imm(struct bpf_test *self)
+{
+ return __bpf_fill_jmp_imm(self, BPF_JSLE);
+}
+
+/* JMP32 immediate tests */
+static int bpf_fill_jmp32_jset_imm(struct bpf_test *self)
+{
+ return __bpf_fill_jmp32_imm(self, BPF_JSET);
+}
+
+static int bpf_fill_jmp32_jeq_imm(struct bpf_test *self)
+{
+ return __bpf_fill_jmp32_imm(self, BPF_JEQ);
+}
+
+static int bpf_fill_jmp32_jne_imm(struct bpf_test *self)
+{
+ return __bpf_fill_jmp32_imm(self, BPF_JNE);
+}
+
+static int bpf_fill_jmp32_jgt_imm(struct bpf_test *self)
+{
+ return __bpf_fill_jmp32_imm(self, BPF_JGT);
+}
+
+static int bpf_fill_jmp32_jge_imm(struct bpf_test *self)
+{
+ return __bpf_fill_jmp32_imm(self, BPF_JGE);
+}
+
+static int bpf_fill_jmp32_jlt_imm(struct bpf_test *self)
+{
+ return __bpf_fill_jmp32_imm(self, BPF_JLT);
+}
+
+static int bpf_fill_jmp32_jle_imm(struct bpf_test *self)
+{
+ return __bpf_fill_jmp32_imm(self, BPF_JLE);
+}
+
+static int bpf_fill_jmp32_jsgt_imm(struct bpf_test *self)
+{
+ return __bpf_fill_jmp32_imm(self, BPF_JSGT);
+}
+
+static int bpf_fill_jmp32_jsge_imm(struct bpf_test *self)
+{
+ return __bpf_fill_jmp32_imm(self, BPF_JSGE);
+}
+
+static int bpf_fill_jmp32_jslt_imm(struct bpf_test *self)
+{
+ return __bpf_fill_jmp32_imm(self, BPF_JSLT);
+}
+
+static int bpf_fill_jmp32_jsle_imm(struct bpf_test *self)
+{
+ return __bpf_fill_jmp32_imm(self, BPF_JSLE);
+}
+
+/* JMP register tests */
+static int bpf_fill_jmp_jset_reg(struct bpf_test *self)
+{
+ return __bpf_fill_jmp_reg(self, BPF_JSET);
+}
+
+static int bpf_fill_jmp_jeq_reg(struct bpf_test *self)
+{
+ return __bpf_fill_jmp_reg(self, BPF_JEQ);
+}
+
+static int bpf_fill_jmp_jne_reg(struct bpf_test *self)
+{
+ return __bpf_fill_jmp_reg(self, BPF_JNE);
+}
+
+static int bpf_fill_jmp_jgt_reg(struct bpf_test *self)
+{
+ return __bpf_fill_jmp_reg(self, BPF_JGT);
+}
+
+static int bpf_fill_jmp_jge_reg(struct bpf_test *self)
+{
+ return __bpf_fill_jmp_reg(self, BPF_JGE);
+}
+
+static int bpf_fill_jmp_jlt_reg(struct bpf_test *self)
+{
+ return __bpf_fill_jmp_reg(self, BPF_JLT);
+}
+
+static int bpf_fill_jmp_jle_reg(struct bpf_test *self)
+{
+ return __bpf_fill_jmp_reg(self, BPF_JLE);
+}
+
+static int bpf_fill_jmp_jsgt_reg(struct bpf_test *self)
+{
+ return __bpf_fill_jmp_reg(self, BPF_JSGT);
+}
+
+static int bpf_fill_jmp_jsge_reg(struct bpf_test *self)
+{
+ return __bpf_fill_jmp_reg(self, BPF_JSGE);
+}
+
+static int bpf_fill_jmp_jslt_reg(struct bpf_test *self)
+{
+ return __bpf_fill_jmp_reg(self, BPF_JSLT);
+}
+
+static int bpf_fill_jmp_jsle_reg(struct bpf_test *self)
+{
+ return __bpf_fill_jmp_reg(self, BPF_JSLE);
+}
+
+/* JMP32 register tests */
+static int bpf_fill_jmp32_jset_reg(struct bpf_test *self)
+{
+ return __bpf_fill_jmp32_reg(self, BPF_JSET);
+}
+
+static int bpf_fill_jmp32_jeq_reg(struct bpf_test *self)
+{
+ return __bpf_fill_jmp32_reg(self, BPF_JEQ);
+}
+
+static int bpf_fill_jmp32_jne_reg(struct bpf_test *self)
+{
+ return __bpf_fill_jmp32_reg(self, BPF_JNE);
+}
+
+static int bpf_fill_jmp32_jgt_reg(struct bpf_test *self)
+{
+ return __bpf_fill_jmp32_reg(self, BPF_JGT);
+}
+
+static int bpf_fill_jmp32_jge_reg(struct bpf_test *self)
+{
+ return __bpf_fill_jmp32_reg(self, BPF_JGE);
+}
+
+static int bpf_fill_jmp32_jlt_reg(struct bpf_test *self)
+{
+ return __bpf_fill_jmp32_reg(self, BPF_JLT);
+}
+
+static int bpf_fill_jmp32_jle_reg(struct bpf_test *self)
+{
+ return __bpf_fill_jmp32_reg(self, BPF_JLE);
+}
+
+static int bpf_fill_jmp32_jsgt_reg(struct bpf_test *self)
+{
+ return __bpf_fill_jmp32_reg(self, BPF_JSGT);
+}
+
+static int bpf_fill_jmp32_jsge_reg(struct bpf_test *self)
+{
+ return __bpf_fill_jmp32_reg(self, BPF_JSGE);
+}
+
+static int bpf_fill_jmp32_jslt_reg(struct bpf_test *self)
+{
+ return __bpf_fill_jmp32_reg(self, BPF_JSLT);
+}
+
+static int bpf_fill_jmp32_jsle_reg(struct bpf_test *self)
+{
+ return __bpf_fill_jmp32_reg(self, BPF_JSLE);
+}
+
+/*
+ * Set up a sequence of staggered jumps, forwards and backwards with
+ * increasing offset. This tests the conversion of relative jumps to
+ * JITed native jumps. On some architectures, for example MIPS, a large
+ * PC-relative jump offset may overflow the immediate field of the native
+ * conditional branch instruction, triggering a conversion to use an
+ * absolute jump instead. Since this changes the jump offsets, another
+ * offset computation pass is necessary, and that may in turn trigger
+ * another branch conversion. This jump sequence is particularly nasty
+ * in that regard.
+ *
+ * The sequence generation is parameterized by size and jump type.
+ * The size must be even, and the expected result is always size + 1.
+ * Below is an example with size=8 and result=9.
+ *
+ * ________________________Start
+ * R0 = 0
+ * R1 = r1
+ * R2 = r2
+ * ,------- JMP +4 * 3______________Preamble: 4 insns
+ * ,----------|-ind 0- if R0 != 7 JMP 8 * 3 + 1 <--------------------.
+ * | | R0 = 8 |
+ * | | JMP +7 * 3 ------------------------.
+ * | ,--------|-----1- if R0 != 5 JMP 7 * 3 + 1 <--------------. | |
+ * | | | R0 = 6 | | |
+ * | | | JMP +5 * 3 ------------------. | |
+ * | | ,------|-----2- if R0 != 3 JMP 6 * 3 + 1 <--------. | | | |
+ * | | | | R0 = 4 | | | | |
+ * | | | | JMP +3 * 3 ------------. | | | |
+ * | | | ,----|-----3- if R0 != 1 JMP 5 * 3 + 1 <--. | | | | | |
+ * | | | | | R0 = 2 | | | | | | |
+ * | | | | | JMP +1 * 3 ------. | | | | | |
+ * | | | | ,--t=====4> if R0 != 0 JMP 4 * 3 + 1 1 2 3 4 5 6 7 8 loc
+ * | | | | | R0 = 1 -1 +2 -3 +4 -5 +6 -7 +8 off
+ * | | | | | JMP -2 * 3 ---' | | | | | | |
+ * | | | | | ,------5- if R0 != 2 JMP 3 * 3 + 1 <-----' | | | | | |
+ * | | | | | | R0 = 3 | | | | | |
+ * | | | | | | JMP -4 * 3 ---------' | | | | |
+ * | | | | | | ,----6- if R0 != 4 JMP 2 * 3 + 1 <-----------' | | | |
+ * | | | | | | | R0 = 5 | | | |
+ * | | | | | | | JMP -6 * 3 ---------------' | | |
+ * | | | | | | | ,--7- if R0 != 6 JMP 1 * 3 + 1 <-----------------' | |
+ * | | | | | | | | R0 = 7 | |
+ * | | Error | | | JMP -8 * 3 ---------------------' |
+ * | | paths | | | ,8- if R0 != 8 JMP 0 * 3 + 1 <-----------------------'
+ * | | | | | | | | | R0 = 9__________________Sequence: 3 * size - 1 insns
+ * `-+-+-+-+-+-+-+-+-> EXIT____________________Return: 1 insn
+ *
+ */
+
+/* The maximum size parameter */
+#define MAX_STAGGERED_JMP_SIZE ((0x7fff / 3) & ~1)
+
+/* We use a reduced number of iterations to get a reasonable execution time */
+#define NR_STAGGERED_JMP_RUNS 10
+
+static int __bpf_fill_staggered_jumps(struct bpf_test *self,
+ const struct bpf_insn *jmp,
+ u64 r1, u64 r2)
+{
+ int size = self->test[0].result - 1;
+ int len = 4 + 3 * (size + 1);
+ struct bpf_insn *insns;
+ int off, ind;
+
+ insns = kmalloc_array(len, sizeof(*insns), GFP_KERNEL);
+ if (!insns)
+ return -ENOMEM;
+
+ /* Preamble */
+ insns[0] = BPF_ALU64_IMM(BPF_MOV, R0, 0);
+ insns[1] = BPF_ALU64_IMM(BPF_MOV, R1, r1);
+ insns[2] = BPF_ALU64_IMM(BPF_MOV, R2, r2);
+ insns[3] = BPF_JMP_IMM(BPF_JA, 0, 0, 3 * size / 2);
+
+ /* Sequence */
+ for (ind = 0, off = size; ind <= size; ind++, off -= 2) {
+ struct bpf_insn *ins = &insns[4 + 3 * ind];
+ int loc;
+
+ if (off == 0)
+ off--;
+
+ loc = abs(off);
+ ins[0] = BPF_JMP_IMM(BPF_JNE, R0, loc - 1,
+ 3 * (size - ind) + 1);
+ ins[1] = BPF_ALU64_IMM(BPF_MOV, R0, loc);
+ ins[2] = *jmp;
+ ins[2].off = 3 * (off - 1);
+ }
+
+ /* Return */
+ insns[len - 1] = BPF_EXIT_INSN();
+
+ self->u.ptr.insns = insns;
+ self->u.ptr.len = len;
+
+ return 0;
+}
+
+/* 64-bit unconditional jump */
+static int bpf_fill_staggered_ja(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP_IMM(BPF_JA, 0, 0, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, 0, 0);
+}
+
+/* 64-bit immediate jumps */
+static int bpf_fill_staggered_jeq_imm(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP_IMM(BPF_JEQ, R1, 1234, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, 1234, 0);
+}
+
+static int bpf_fill_staggered_jne_imm(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP_IMM(BPF_JNE, R1, 1234, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, 4321, 0);
+}
+
+static int bpf_fill_staggered_jset_imm(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP_IMM(BPF_JSET, R1, 0x82, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, 0x86, 0);
+}
+
+static int bpf_fill_staggered_jgt_imm(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP_IMM(BPF_JGT, R1, 1234, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, 0x80000000, 0);
+}
+
+static int bpf_fill_staggered_jge_imm(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP_IMM(BPF_JGE, R1, 1234, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, 1234, 0);
+}
+
+static int bpf_fill_staggered_jlt_imm(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP_IMM(BPF_JLT, R1, 0x80000000, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, 1234, 0);
+}
+
+static int bpf_fill_staggered_jle_imm(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP_IMM(BPF_JLE, R1, 1234, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, 1234, 0);
+}
+
+static int bpf_fill_staggered_jsgt_imm(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP_IMM(BPF_JSGT, R1, -2, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, -1, 0);
+}
+
+static int bpf_fill_staggered_jsge_imm(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP_IMM(BPF_JSGE, R1, -2, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, -2, 0);
+}
+
+static int bpf_fill_staggered_jslt_imm(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP_IMM(BPF_JSLT, R1, -1, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, -2, 0);
+}
+
+static int bpf_fill_staggered_jsle_imm(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP_IMM(BPF_JSLE, R1, -1, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, -1, 0);
+}
+
+/* 64-bit register jumps */
+static int bpf_fill_staggered_jeq_reg(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP_REG(BPF_JEQ, R1, R2, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, 1234, 1234);
+}
+
+static int bpf_fill_staggered_jne_reg(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP_REG(BPF_JNE, R1, R2, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, 4321, 1234);
+}
+
+static int bpf_fill_staggered_jset_reg(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP_REG(BPF_JSET, R1, R2, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, 0x86, 0x82);
+}
+
+static int bpf_fill_staggered_jgt_reg(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP_REG(BPF_JGT, R1, R2, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, 0x80000000, 1234);
+}
+
+static int bpf_fill_staggered_jge_reg(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP_REG(BPF_JGE, R1, R2, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, 1234, 1234);
+}
+
+static int bpf_fill_staggered_jlt_reg(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP_REG(BPF_JLT, R1, R2, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, 1234, 0x80000000);
+}
+
+static int bpf_fill_staggered_jle_reg(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP_REG(BPF_JLE, R1, R2, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, 1234, 1234);
+}
+
+static int bpf_fill_staggered_jsgt_reg(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP_REG(BPF_JSGT, R1, R2, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, -1, -2);
+}
+
+static int bpf_fill_staggered_jsge_reg(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP_REG(BPF_JSGE, R1, R2, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, -2, -2);
+}
+
+static int bpf_fill_staggered_jslt_reg(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP_REG(BPF_JSLT, R1, R2, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, -2, -1);
+}
+
+static int bpf_fill_staggered_jsle_reg(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP_REG(BPF_JSLE, R1, R2, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, -1, -1);
+}
+
+/* 32-bit immediate jumps */
+static int bpf_fill_staggered_jeq32_imm(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP32_IMM(BPF_JEQ, R1, 1234, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, 1234, 0);
+}
+
+static int bpf_fill_staggered_jne32_imm(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP32_IMM(BPF_JNE, R1, 1234, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, 4321, 0);
+}
+
+static int bpf_fill_staggered_jset32_imm(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP32_IMM(BPF_JSET, R1, 0x82, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, 0x86, 0);
+}
+
+static int bpf_fill_staggered_jgt32_imm(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP32_IMM(BPF_JGT, R1, 1234, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, 0x80000000, 0);
+}
+
+static int bpf_fill_staggered_jge32_imm(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP32_IMM(BPF_JGE, R1, 1234, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, 1234, 0);
+}
+
+static int bpf_fill_staggered_jlt32_imm(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP32_IMM(BPF_JLT, R1, 0x80000000, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, 1234, 0);
+}
+
+static int bpf_fill_staggered_jle32_imm(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP32_IMM(BPF_JLE, R1, 1234, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, 1234, 0);
+}
+
+static int bpf_fill_staggered_jsgt32_imm(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP32_IMM(BPF_JSGT, R1, -2, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, -1, 0);
+}
+
+static int bpf_fill_staggered_jsge32_imm(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP32_IMM(BPF_JSGE, R1, -2, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, -2, 0);
+}
+
+static int bpf_fill_staggered_jslt32_imm(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP32_IMM(BPF_JSLT, R1, -1, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, -2, 0);
+}
+
+static int bpf_fill_staggered_jsle32_imm(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP32_IMM(BPF_JSLE, R1, -1, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, -1, 0);
+}
+
+/* 32-bit register jumps */
+static int bpf_fill_staggered_jeq32_reg(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP32_REG(BPF_JEQ, R1, R2, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, 1234, 1234);
+}
+
+static int bpf_fill_staggered_jne32_reg(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP32_REG(BPF_JNE, R1, R2, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, 4321, 1234);
+}
+
+static int bpf_fill_staggered_jset32_reg(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP32_REG(BPF_JSET, R1, R2, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, 0x86, 0x82);
+}
+
+static int bpf_fill_staggered_jgt32_reg(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP32_REG(BPF_JGT, R1, R2, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, 0x80000000, 1234);
+}
+
+static int bpf_fill_staggered_jge32_reg(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP32_REG(BPF_JGE, R1, R2, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, 1234, 1234);
+}
+
+static int bpf_fill_staggered_jlt32_reg(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP32_REG(BPF_JLT, R1, R2, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, 1234, 0x80000000);
+}
+
+static int bpf_fill_staggered_jle32_reg(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP32_REG(BPF_JLE, R1, R2, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, 1234, 1234);
+}
+
+static int bpf_fill_staggered_jsgt32_reg(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP32_REG(BPF_JSGT, R1, R2, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, -1, -2);
+}
+
+static int bpf_fill_staggered_jsge32_reg(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP32_REG(BPF_JSGE, R1, R2, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, -2, -2);
+}
+
+static int bpf_fill_staggered_jslt32_reg(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP32_REG(BPF_JSLT, R1, R2, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, -2, -1);
+}
+
+static int bpf_fill_staggered_jsle32_reg(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP32_REG(BPF_JSLE, R1, R2, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, -1, -1);
+}
+
+
static struct bpf_test tests[] = {
{
"TAX",
@@ -867,7 +3466,7 @@ static struct bpf_test tests[] = {
},
CLASSIC,
{ },
- { { 4, 10 ^ 300 }, { 20, 10 ^ 300 } },
+ { { 4, 0xA ^ 300 }, { 20, 0xA ^ 300 } },
},
{
"SPILL_FILL",
@@ -1095,7 +3694,7 @@ static struct bpf_test tests[] = {
{
"RET_A",
.u.insns = {
- /* check that unitialized X and A contain zeros */
+ /* check that uninitialized X and A contain zeros */
BPF_STMT(BPF_MISC | BPF_TXA, 0),
BPF_STMT(BPF_RET | BPF_A, 0)
},
@@ -1916,6 +4515,22 @@ static struct bpf_test tests[] = {
{ },
{ { 0, -1 } }
},
+#ifdef CONFIG_32BIT
+ {
+ "INT: 32-bit context pointer word order and zero-extension",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_JMP32_IMM(BPF_JEQ, R1, 0, 3),
+ BPF_ALU64_IMM(BPF_RSH, R1, 32),
+ BPF_JMP32_IMM(BPF_JNE, R1, 0, 1),
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } }
+ },
+#endif
{
"check: missing ret",
.u.insns = {
@@ -2361,6 +4976,48 @@ static struct bpf_test tests[] = {
{ { 0, 0x1 } },
},
{
+ "ALU_MOV_K: small negative",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, -123),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, -123 } }
+ },
+ {
+ "ALU_MOV_K: small negative zero extension",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, -123),
+ BPF_ALU64_IMM(BPF_RSH, R0, 32),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0 } }
+ },
+ {
+ "ALU_MOV_K: large negative",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, -123456789),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, -123456789 } }
+ },
+ {
+ "ALU_MOV_K: large negative zero extension",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, -123456789),
+ BPF_ALU64_IMM(BPF_RSH, R0, 32),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0 } }
+ },
+ {
"ALU64_MOV_K: dst = 2",
.u.insns_int = {
BPF_ALU64_IMM(BPF_MOV, R0, 2),
@@ -2412,6 +5069,130 @@ static struct bpf_test tests[] = {
{ },
{ { 0, 0x1 } },
},
+ {
+ "ALU64_MOV_K: small negative",
+ .u.insns_int = {
+ BPF_ALU64_IMM(BPF_MOV, R0, -123),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, -123 } }
+ },
+ {
+ "ALU64_MOV_K: small negative sign extension",
+ .u.insns_int = {
+ BPF_ALU64_IMM(BPF_MOV, R0, -123),
+ BPF_ALU64_IMM(BPF_RSH, R0, 32),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0xffffffff } }
+ },
+ {
+ "ALU64_MOV_K: large negative",
+ .u.insns_int = {
+ BPF_ALU64_IMM(BPF_MOV, R0, -123456789),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, -123456789 } }
+ },
+ {
+ "ALU64_MOV_K: large negative sign extension",
+ .u.insns_int = {
+ BPF_ALU64_IMM(BPF_MOV, R0, -123456789),
+ BPF_ALU64_IMM(BPF_RSH, R0, 32),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0xffffffff } }
+ },
+ /* MOVSX32 */
+ {
+ "ALU_MOVSX | BPF_B",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x00000000ffffffefLL),
+ BPF_LD_IMM64(R3, 0xdeadbeefdeadbeefLL),
+ BPF_MOVSX32_REG(R1, R3, 8),
+ BPF_JMP_REG(BPF_JEQ, R2, R1, 2),
+ BPF_MOV32_IMM(R0, 2),
+ BPF_EXIT_INSN(),
+ BPF_MOV32_IMM(R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x1 } },
+ },
+ {
+ "ALU_MOVSX | BPF_H",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x00000000ffffbeefLL),
+ BPF_LD_IMM64(R3, 0xdeadbeefdeadbeefLL),
+ BPF_MOVSX32_REG(R1, R3, 16),
+ BPF_JMP_REG(BPF_JEQ, R2, R1, 2),
+ BPF_MOV32_IMM(R0, 2),
+ BPF_EXIT_INSN(),
+ BPF_MOV32_IMM(R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x1 } },
+ },
+ /* MOVSX64 REG */
+ {
+ "ALU64_MOVSX | BPF_B",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0xffffffffffffffefLL),
+ BPF_LD_IMM64(R3, 0xdeadbeefdeadbeefLL),
+ BPF_MOVSX64_REG(R1, R3, 8),
+ BPF_JMP_REG(BPF_JEQ, R2, R1, 2),
+ BPF_MOV32_IMM(R0, 2),
+ BPF_EXIT_INSN(),
+ BPF_MOV32_IMM(R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x1 } },
+ },
+ {
+ "ALU64_MOVSX | BPF_H",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0xffffffffffffbeefLL),
+ BPF_LD_IMM64(R3, 0xdeadbeefdeadbeefLL),
+ BPF_MOVSX64_REG(R1, R3, 16),
+ BPF_JMP_REG(BPF_JEQ, R2, R1, 2),
+ BPF_MOV32_IMM(R0, 2),
+ BPF_EXIT_INSN(),
+ BPF_MOV32_IMM(R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x1 } },
+ },
+ {
+ "ALU64_MOVSX | BPF_W",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0xffffffffdeadbeefLL),
+ BPF_LD_IMM64(R3, 0xdeadbeefdeadbeefLL),
+ BPF_MOVSX64_REG(R1, R3, 32),
+ BPF_JMP_REG(BPF_JEQ, R2, R1, 2),
+ BPF_MOV32_IMM(R0, 2),
+ BPF_EXIT_INSN(),
+ BPF_MOV32_IMM(R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x1 } },
+ },
/* BPF_ALU | BPF_ADD | BPF_X */
{
"ALU_ADD_X: 1 + 2 = 3",
@@ -2967,6 +5748,31 @@ static struct bpf_test tests[] = {
{ },
{ { 0, 2147483647 } },
},
+ {
+ "ALU64_MUL_X: 64x64 multiply, low word",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x0fedcba987654321LL),
+ BPF_LD_IMM64(R1, 0x123456789abcdef0LL),
+ BPF_ALU64_REG(BPF_MUL, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0xe5618cf0 } }
+ },
+ {
+ "ALU64_MUL_X: 64x64 multiply, high word",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x0fedcba987654321LL),
+ BPF_LD_IMM64(R1, 0x123456789abcdef0LL),
+ BPF_ALU64_REG(BPF_MUL, R0, R1),
+ BPF_ALU64_IMM(BPF_RSH, R0, 32),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x2236d88f } }
+ },
/* BPF_ALU | BPF_MUL | BPF_K */
{
"ALU_MUL_K: 2 * 3 = 6",
@@ -3077,6 +5883,29 @@ static struct bpf_test tests[] = {
{ },
{ { 0, 0x1 } },
},
+ {
+ "ALU64_MUL_K: 64x32 multiply, low word",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+ BPF_ALU64_IMM(BPF_MUL, R0, 0x12345678),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0xe242d208 } }
+ },
+ {
+ "ALU64_MUL_K: 64x32 multiply, high word",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+ BPF_ALU64_IMM(BPF_MUL, R0, 0x12345678),
+ BPF_ALU64_IMM(BPF_RSH, R0, 32),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0xc28f5c28 } }
+ },
/* BPF_ALU | BPF_DIV | BPF_X */
{
"ALU_DIV_X: 6 / 2 = 3",
@@ -3358,6 +6187,106 @@ static struct bpf_test tests[] = {
{ },
{ { 0, 2 } },
},
+ /* BPF_ALU | BPF_DIV | BPF_X off=1 (SDIV) */
+ {
+ "ALU_SDIV_X: -6 / 2 = -3",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, -6),
+ BPF_ALU32_IMM(BPF_MOV, R1, 2),
+ BPF_ALU32_REG_OFF(BPF_DIV, R0, R1, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, -3 } },
+ },
+ /* BPF_ALU | BPF_DIV | BPF_K off=1 (SDIV) */
+ {
+ "ALU_SDIV_K: -6 / 2 = -3",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, -6),
+ BPF_ALU32_IMM_OFF(BPF_DIV, R0, 2, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, -3 } },
+ },
+ /* BPF_ALU64 | BPF_DIV | BPF_X off=1 (SDIV64) */
+ {
+ "ALU64_SDIV_X: -6 / 2 = -3",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, -6),
+ BPF_ALU32_IMM(BPF_MOV, R1, 2),
+ BPF_ALU64_REG_OFF(BPF_DIV, R0, R1, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, -3 } },
+ },
+ /* BPF_ALU64 | BPF_DIV | BPF_K off=1 (SDIV64) */
+ {
+ "ALU64_SDIV_K: -6 / 2 = -3",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, -6),
+ BPF_ALU64_IMM_OFF(BPF_DIV, R0, 2, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, -3 } },
+ },
+ /* BPF_ALU | BPF_MOD | BPF_X off=1 (SMOD) */
+ {
+ "ALU_SMOD_X: -7 % 2 = -1",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, -7),
+ BPF_ALU32_IMM(BPF_MOV, R1, 2),
+ BPF_ALU32_REG_OFF(BPF_MOD, R0, R1, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, -1 } },
+ },
+ /* BPF_ALU | BPF_MOD | BPF_K off=1 (SMOD) */
+ {
+ "ALU_SMOD_K: -7 % 2 = -1",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, -7),
+ BPF_ALU32_IMM_OFF(BPF_MOD, R0, 2, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, -1 } },
+ },
+ /* BPF_ALU64 | BPF_MOD | BPF_X off=1 (SMOD64) */
+ {
+ "ALU64_SMOD_X: -7 % 2 = -1",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, -7),
+ BPF_ALU32_IMM(BPF_MOV, R1, 2),
+ BPF_ALU64_REG_OFF(BPF_MOD, R0, R1, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, -1 } },
+ },
+ /* BPF_ALU64 | BPF_MOD | BPF_K off=1 (SMOD64) */
+ {
+ "ALU64_SMOD_K: -7 % 2 = -1",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, -7),
+ BPF_ALU64_IMM_OFF(BPF_MOD, R0, 2, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, -1 } },
+ },
/* BPF_ALU | BPF_AND | BPF_X */
{
"ALU_AND_X: 3 & 2 = 2",
@@ -3431,6 +6360,44 @@ static struct bpf_test tests[] = {
{ { 0, 0xffffffff } },
},
{
+ "ALU_AND_K: Small immediate",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0x01020304),
+ BPF_ALU32_IMM(BPF_AND, R0, 15),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 4 } }
+ },
+ {
+ "ALU_AND_K: Large immediate",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0xf1f2f3f4),
+ BPF_ALU32_IMM(BPF_AND, R0, 0xafbfcfdf),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0xa1b2c3d4 } }
+ },
+ {
+ "ALU_AND_K: Zero extension",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+ BPF_LD_IMM64(R1, 0x0000000080a0c0e0LL),
+ BPF_ALU32_IMM(BPF_AND, R0, 0xf0f0f0f0),
+ BPF_JMP_REG(BPF_JEQ, R0, R1, 2),
+ BPF_MOV32_IMM(R0, 2),
+ BPF_EXIT_INSN(),
+ BPF_MOV32_IMM(R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } }
+ },
+ {
"ALU64_AND_K: 3 & 2 = 2",
.u.insns_int = {
BPF_LD_IMM64(R0, 3),
@@ -3453,7 +6420,7 @@ static struct bpf_test tests[] = {
{ { 0, 0xffffffff } },
},
{
- "ALU64_AND_K: 0x0000ffffffff0000 & 0x0 = 0x0000ffff00000000",
+ "ALU64_AND_K: 0x0000ffffffff0000 & 0x0 = 0x0000000000000000",
.u.insns_int = {
BPF_LD_IMM64(R2, 0x0000ffffffff0000LL),
BPF_LD_IMM64(R3, 0x0000000000000000LL),
@@ -3469,7 +6436,7 @@ static struct bpf_test tests[] = {
{ { 0, 0x1 } },
},
{
- "ALU64_AND_K: 0x0000ffffffff0000 & -1 = 0x0000ffffffffffff",
+ "ALU64_AND_K: 0x0000ffffffff0000 & -1 = 0x0000ffffffff0000",
.u.insns_int = {
BPF_LD_IMM64(R2, 0x0000ffffffff0000LL),
BPF_LD_IMM64(R3, 0x0000ffffffff0000LL),
@@ -3500,6 +6467,38 @@ static struct bpf_test tests[] = {
{ },
{ { 0, 0x1 } },
},
+ {
+ "ALU64_AND_K: Sign extension 1",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+ BPF_LD_IMM64(R1, 0x00000000090b0d0fLL),
+ BPF_ALU64_IMM(BPF_AND, R0, 0x0f0f0f0f),
+ BPF_JMP_REG(BPF_JEQ, R0, R1, 2),
+ BPF_MOV32_IMM(R0, 2),
+ BPF_EXIT_INSN(),
+ BPF_MOV32_IMM(R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } }
+ },
+ {
+ "ALU64_AND_K: Sign extension 2",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+ BPF_LD_IMM64(R1, 0x0123456780a0c0e0LL),
+ BPF_ALU64_IMM(BPF_AND, R0, 0xf0f0f0f0),
+ BPF_JMP_REG(BPF_JEQ, R0, R1, 2),
+ BPF_MOV32_IMM(R0, 2),
+ BPF_EXIT_INSN(),
+ BPF_MOV32_IMM(R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } }
+ },
/* BPF_ALU | BPF_OR | BPF_X */
{
"ALU_OR_X: 1 | 2 = 3",
@@ -3573,6 +6572,44 @@ static struct bpf_test tests[] = {
{ { 0, 0xffffffff } },
},
{
+ "ALU_OR_K: Small immediate",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0x01020304),
+ BPF_ALU32_IMM(BPF_OR, R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x01020305 } }
+ },
+ {
+ "ALU_OR_K: Large immediate",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0x01020304),
+ BPF_ALU32_IMM(BPF_OR, R0, 0xa0b0c0d0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0xa1b2c3d4 } }
+ },
+ {
+ "ALU_OR_K: Zero extension",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+ BPF_LD_IMM64(R1, 0x00000000f9fbfdffLL),
+ BPF_ALU32_IMM(BPF_OR, R0, 0xf0f0f0f0),
+ BPF_JMP_REG(BPF_JEQ, R0, R1, 2),
+ BPF_MOV32_IMM(R0, 2),
+ BPF_EXIT_INSN(),
+ BPF_MOV32_IMM(R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } }
+ },
+ {
"ALU64_OR_K: 1 | 2 = 3",
.u.insns_int = {
BPF_LD_IMM64(R0, 1),
@@ -3595,7 +6632,7 @@ static struct bpf_test tests[] = {
{ { 0, 0xffffffff } },
},
{
- "ALU64_OR_K: 0x0000ffffffff0000 | 0x0 = 0x0000ffff00000000",
+ "ALU64_OR_K: 0x0000ffffffff0000 | 0x0 = 0x0000ffffffff0000",
.u.insns_int = {
BPF_LD_IMM64(R2, 0x0000ffffffff0000LL),
BPF_LD_IMM64(R3, 0x0000ffffffff0000LL),
@@ -3642,6 +6679,38 @@ static struct bpf_test tests[] = {
{ },
{ { 0, 0x1 } },
},
+ {
+ "ALU64_OR_K: Sign extension 1",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+ BPF_LD_IMM64(R1, 0x012345678fafcfefLL),
+ BPF_ALU64_IMM(BPF_OR, R0, 0x0f0f0f0f),
+ BPF_JMP_REG(BPF_JEQ, R0, R1, 2),
+ BPF_MOV32_IMM(R0, 2),
+ BPF_EXIT_INSN(),
+ BPF_MOV32_IMM(R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } }
+ },
+ {
+ "ALU64_OR_K: Sign extension 2",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+ BPF_LD_IMM64(R1, 0xfffffffff9fbfdffLL),
+ BPF_ALU64_IMM(BPF_OR, R0, 0xf0f0f0f0),
+ BPF_JMP_REG(BPF_JEQ, R0, R1, 2),
+ BPF_MOV32_IMM(R0, 2),
+ BPF_EXIT_INSN(),
+ BPF_MOV32_IMM(R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } }
+ },
/* BPF_ALU | BPF_XOR | BPF_X */
{
"ALU_XOR_X: 5 ^ 6 = 3",
@@ -3715,6 +6784,44 @@ static struct bpf_test tests[] = {
{ { 0, 0xfffffffe } },
},
{
+ "ALU_XOR_K: Small immediate",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0x01020304),
+ BPF_ALU32_IMM(BPF_XOR, R0, 15),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x0102030b } }
+ },
+ {
+ "ALU_XOR_K: Large immediate",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0xf1f2f3f4),
+ BPF_ALU32_IMM(BPF_XOR, R0, 0xafbfcfdf),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x5e4d3c2b } }
+ },
+ {
+ "ALU_XOR_K: Zero extension",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+ BPF_LD_IMM64(R1, 0x00000000795b3d1fLL),
+ BPF_ALU32_IMM(BPF_XOR, R0, 0xf0f0f0f0),
+ BPF_JMP_REG(BPF_JEQ, R0, R1, 2),
+ BPF_MOV32_IMM(R0, 2),
+ BPF_EXIT_INSN(),
+ BPF_MOV32_IMM(R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } }
+ },
+ {
"ALU64_XOR_K: 5 ^ 6 = 3",
.u.insns_int = {
BPF_LD_IMM64(R0, 5),
@@ -3726,7 +6833,7 @@ static struct bpf_test tests[] = {
{ { 0, 3 } },
},
{
- "ALU64_XOR_K: 1 & 0xffffffff = 0xfffffffe",
+ "ALU64_XOR_K: 1 ^ 0xffffffff = 0xfffffffe",
.u.insns_int = {
BPF_LD_IMM64(R0, 1),
BPF_ALU64_IMM(BPF_XOR, R0, 0xffffffff),
@@ -3784,6 +6891,38 @@ static struct bpf_test tests[] = {
{ },
{ { 0, 0x1 } },
},
+ {
+ "ALU64_XOR_K: Sign extension 1",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+ BPF_LD_IMM64(R1, 0x0123456786a4c2e0LL),
+ BPF_ALU64_IMM(BPF_XOR, R0, 0x0f0f0f0f),
+ BPF_JMP_REG(BPF_JEQ, R0, R1, 2),
+ BPF_MOV32_IMM(R0, 2),
+ BPF_EXIT_INSN(),
+ BPF_MOV32_IMM(R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } }
+ },
+ {
+ "ALU64_XOR_K: Sign extension 2",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+ BPF_LD_IMM64(R1, 0xfedcba98795b3d1fLL),
+ BPF_ALU64_IMM(BPF_XOR, R0, 0xf0f0f0f0),
+ BPF_JMP_REG(BPF_JEQ, R0, R1, 2),
+ BPF_MOV32_IMM(R0, 2),
+ BPF_EXIT_INSN(),
+ BPF_MOV32_IMM(R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } }
+ },
/* BPF_ALU | BPF_LSH | BPF_X */
{
"ALU_LSH_X: 1 << 1 = 2",
@@ -3810,6 +6949,18 @@ static struct bpf_test tests[] = {
{ { 0, 0x80000000 } },
},
{
+ "ALU_LSH_X: 0x12345678 << 12 = 0x45678000",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0x12345678),
+ BPF_ALU32_IMM(BPF_MOV, R1, 12),
+ BPF_ALU32_REG(BPF_LSH, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x45678000 } }
+ },
+ {
"ALU64_LSH_X: 1 << 1 = 2",
.u.insns_int = {
BPF_LD_IMM64(R0, 1),
@@ -3833,6 +6984,106 @@ static struct bpf_test tests[] = {
{ },
{ { 0, 0x80000000 } },
},
+ {
+ "ALU64_LSH_X: Shift < 32, low word",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+ BPF_ALU32_IMM(BPF_MOV, R1, 12),
+ BPF_ALU64_REG(BPF_LSH, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0xbcdef000 } }
+ },
+ {
+ "ALU64_LSH_X: Shift < 32, high word",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+ BPF_ALU32_IMM(BPF_MOV, R1, 12),
+ BPF_ALU64_REG(BPF_LSH, R0, R1),
+ BPF_ALU64_IMM(BPF_RSH, R0, 32),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x3456789a } }
+ },
+ {
+ "ALU64_LSH_X: Shift > 32, low word",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+ BPF_ALU32_IMM(BPF_MOV, R1, 36),
+ BPF_ALU64_REG(BPF_LSH, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0 } }
+ },
+ {
+ "ALU64_LSH_X: Shift > 32, high word",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+ BPF_ALU32_IMM(BPF_MOV, R1, 36),
+ BPF_ALU64_REG(BPF_LSH, R0, R1),
+ BPF_ALU64_IMM(BPF_RSH, R0, 32),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x9abcdef0 } }
+ },
+ {
+ "ALU64_LSH_X: Shift == 32, low word",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+ BPF_ALU32_IMM(BPF_MOV, R1, 32),
+ BPF_ALU64_REG(BPF_LSH, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0 } }
+ },
+ {
+ "ALU64_LSH_X: Shift == 32, high word",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+ BPF_ALU32_IMM(BPF_MOV, R1, 32),
+ BPF_ALU64_REG(BPF_LSH, R0, R1),
+ BPF_ALU64_IMM(BPF_RSH, R0, 32),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x89abcdef } }
+ },
+ {
+ "ALU64_LSH_X: Zero shift, low word",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+ BPF_ALU32_IMM(BPF_MOV, R1, 0),
+ BPF_ALU64_REG(BPF_LSH, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x89abcdef } }
+ },
+ {
+ "ALU64_LSH_X: Zero shift, high word",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+ BPF_ALU32_IMM(BPF_MOV, R1, 0),
+ BPF_ALU64_REG(BPF_LSH, R0, R1),
+ BPF_ALU64_IMM(BPF_RSH, R0, 32),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x01234567 } }
+ },
/* BPF_ALU | BPF_LSH | BPF_K */
{
"ALU_LSH_K: 1 << 1 = 2",
@@ -3857,6 +7108,28 @@ static struct bpf_test tests[] = {
{ { 0, 0x80000000 } },
},
{
+ "ALU_LSH_K: 0x12345678 << 12 = 0x45678000",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0x12345678),
+ BPF_ALU32_IMM(BPF_LSH, R0, 12),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x45678000 } }
+ },
+ {
+ "ALU_LSH_K: 0x12345678 << 0 = 0x12345678",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0x12345678),
+ BPF_ALU32_IMM(BPF_LSH, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x12345678 } }
+ },
+ {
"ALU64_LSH_K: 1 << 1 = 2",
.u.insns_int = {
BPF_LD_IMM64(R0, 1),
@@ -3878,6 +7151,86 @@ static struct bpf_test tests[] = {
{ },
{ { 0, 0x80000000 } },
},
+ {
+ "ALU64_LSH_K: Shift < 32, low word",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+ BPF_ALU64_IMM(BPF_LSH, R0, 12),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0xbcdef000 } }
+ },
+ {
+ "ALU64_LSH_K: Shift < 32, high word",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+ BPF_ALU64_IMM(BPF_LSH, R0, 12),
+ BPF_ALU64_IMM(BPF_RSH, R0, 32),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x3456789a } }
+ },
+ {
+ "ALU64_LSH_K: Shift > 32, low word",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+ BPF_ALU64_IMM(BPF_LSH, R0, 36),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0 } }
+ },
+ {
+ "ALU64_LSH_K: Shift > 32, high word",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+ BPF_ALU64_IMM(BPF_LSH, R0, 36),
+ BPF_ALU64_IMM(BPF_RSH, R0, 32),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x9abcdef0 } }
+ },
+ {
+ "ALU64_LSH_K: Shift == 32, low word",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+ BPF_ALU64_IMM(BPF_LSH, R0, 32),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0 } }
+ },
+ {
+ "ALU64_LSH_K: Shift == 32, high word",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+ BPF_ALU64_IMM(BPF_LSH, R0, 32),
+ BPF_ALU64_IMM(BPF_RSH, R0, 32),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x89abcdef } }
+ },
+ {
+ "ALU64_LSH_K: Zero shift",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+ BPF_ALU64_IMM(BPF_LSH, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x89abcdef } }
+ },
/* BPF_ALU | BPF_RSH | BPF_X */
{
"ALU_RSH_X: 2 >> 1 = 1",
@@ -3904,6 +7257,18 @@ static struct bpf_test tests[] = {
{ { 0, 1 } },
},
{
+ "ALU_RSH_X: 0x12345678 >> 20 = 0x123",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0x12345678),
+ BPF_ALU32_IMM(BPF_MOV, R1, 20),
+ BPF_ALU32_REG(BPF_RSH, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x123 } }
+ },
+ {
"ALU64_RSH_X: 2 >> 1 = 1",
.u.insns_int = {
BPF_LD_IMM64(R0, 2),
@@ -3927,6 +7292,106 @@ static struct bpf_test tests[] = {
{ },
{ { 0, 1 } },
},
+ {
+ "ALU64_RSH_X: Shift < 32, low word",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
+ BPF_ALU32_IMM(BPF_MOV, R1, 12),
+ BPF_ALU64_REG(BPF_RSH, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x56789abc } }
+ },
+ {
+ "ALU64_RSH_X: Shift < 32, high word",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
+ BPF_ALU32_IMM(BPF_MOV, R1, 12),
+ BPF_ALU64_REG(BPF_RSH, R0, R1),
+ BPF_ALU64_IMM(BPF_RSH, R0, 32),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x00081234 } }
+ },
+ {
+ "ALU64_RSH_X: Shift > 32, low word",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
+ BPF_ALU32_IMM(BPF_MOV, R1, 36),
+ BPF_ALU64_REG(BPF_RSH, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x08123456 } }
+ },
+ {
+ "ALU64_RSH_X: Shift > 32, high word",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
+ BPF_ALU32_IMM(BPF_MOV, R1, 36),
+ BPF_ALU64_REG(BPF_RSH, R0, R1),
+ BPF_ALU64_IMM(BPF_RSH, R0, 32),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0 } }
+ },
+ {
+ "ALU64_RSH_X: Shift == 32, low word",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
+ BPF_ALU32_IMM(BPF_MOV, R1, 32),
+ BPF_ALU64_REG(BPF_RSH, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x81234567 } }
+ },
+ {
+ "ALU64_RSH_X: Shift == 32, high word",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
+ BPF_ALU32_IMM(BPF_MOV, R1, 32),
+ BPF_ALU64_REG(BPF_RSH, R0, R1),
+ BPF_ALU64_IMM(BPF_RSH, R0, 32),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0 } }
+ },
+ {
+ "ALU64_RSH_X: Zero shift, low word",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
+ BPF_ALU32_IMM(BPF_MOV, R1, 0),
+ BPF_ALU64_REG(BPF_RSH, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x89abcdef } }
+ },
+ {
+ "ALU64_RSH_X: Zero shift, high word",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
+ BPF_ALU32_IMM(BPF_MOV, R1, 0),
+ BPF_ALU64_REG(BPF_RSH, R0, R1),
+ BPF_ALU64_IMM(BPF_RSH, R0, 32),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x81234567 } }
+ },
/* BPF_ALU | BPF_RSH | BPF_K */
{
"ALU_RSH_K: 2 >> 1 = 1",
@@ -3951,6 +7416,28 @@ static struct bpf_test tests[] = {
{ { 0, 1 } },
},
{
+ "ALU_RSH_K: 0x12345678 >> 20 = 0x123",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0x12345678),
+ BPF_ALU32_IMM(BPF_RSH, R0, 20),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x123 } }
+ },
+ {
+ "ALU_RSH_K: 0x12345678 >> 0 = 0x12345678",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0x12345678),
+ BPF_ALU32_IMM(BPF_RSH, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x12345678 } }
+ },
+ {
"ALU64_RSH_K: 2 >> 1 = 1",
.u.insns_int = {
BPF_LD_IMM64(R0, 2),
@@ -3972,9 +7459,101 @@ static struct bpf_test tests[] = {
{ },
{ { 0, 1 } },
},
+ {
+ "ALU64_RSH_K: Shift < 32, low word",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
+ BPF_ALU64_IMM(BPF_RSH, R0, 12),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x56789abc } }
+ },
+ {
+ "ALU64_RSH_K: Shift < 32, high word",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
+ BPF_ALU64_IMM(BPF_RSH, R0, 12),
+ BPF_ALU64_IMM(BPF_RSH, R0, 32),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x00081234 } }
+ },
+ {
+ "ALU64_RSH_K: Shift > 32, low word",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
+ BPF_ALU64_IMM(BPF_RSH, R0, 36),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x08123456 } }
+ },
+ {
+ "ALU64_RSH_K: Shift > 32, high word",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
+ BPF_ALU64_IMM(BPF_RSH, R0, 36),
+ BPF_ALU64_IMM(BPF_RSH, R0, 32),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0 } }
+ },
+ {
+ "ALU64_RSH_K: Shift == 32, low word",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
+ BPF_ALU64_IMM(BPF_RSH, R0, 32),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x81234567 } }
+ },
+ {
+ "ALU64_RSH_K: Shift == 32, high word",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
+ BPF_ALU64_IMM(BPF_RSH, R0, 32),
+ BPF_ALU64_IMM(BPF_RSH, R0, 32),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0 } }
+ },
+ {
+ "ALU64_RSH_K: Zero shift",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+ BPF_ALU64_IMM(BPF_RSH, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x89abcdef } }
+ },
/* BPF_ALU | BPF_ARSH | BPF_X */
{
- "ALU_ARSH_X: 0xff00ff0000000000 >> 40 = 0xffffffffffff00ff",
+ "ALU32_ARSH_X: -1234 >> 7 = -10",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, -1234),
+ BPF_ALU32_IMM(BPF_MOV, R1, 7),
+ BPF_ALU32_REG(BPF_ARSH, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, -10 } }
+ },
+ {
+ "ALU64_ARSH_X: 0xff00ff0000000000 >> 40 = 0xffffffffffff00ff",
.u.insns_int = {
BPF_LD_IMM64(R0, 0xff00ff0000000000LL),
BPF_ALU32_IMM(BPF_MOV, R1, 40),
@@ -3985,9 +7564,131 @@ static struct bpf_test tests[] = {
{ },
{ { 0, 0xffff00ff } },
},
+ {
+ "ALU64_ARSH_X: Shift < 32, low word",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
+ BPF_ALU32_IMM(BPF_MOV, R1, 12),
+ BPF_ALU64_REG(BPF_ARSH, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x56789abc } }
+ },
+ {
+ "ALU64_ARSH_X: Shift < 32, high word",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
+ BPF_ALU32_IMM(BPF_MOV, R1, 12),
+ BPF_ALU64_REG(BPF_ARSH, R0, R1),
+ BPF_ALU64_IMM(BPF_RSH, R0, 32),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0xfff81234 } }
+ },
+ {
+ "ALU64_ARSH_X: Shift > 32, low word",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
+ BPF_ALU32_IMM(BPF_MOV, R1, 36),
+ BPF_ALU64_REG(BPF_ARSH, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0xf8123456 } }
+ },
+ {
+ "ALU64_ARSH_X: Shift > 32, high word",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
+ BPF_ALU32_IMM(BPF_MOV, R1, 36),
+ BPF_ALU64_REG(BPF_ARSH, R0, R1),
+ BPF_ALU64_IMM(BPF_RSH, R0, 32),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, -1 } }
+ },
+ {
+ "ALU64_ARSH_X: Shift == 32, low word",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
+ BPF_ALU32_IMM(BPF_MOV, R1, 32),
+ BPF_ALU64_REG(BPF_ARSH, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x81234567 } }
+ },
+ {
+ "ALU64_ARSH_X: Shift == 32, high word",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
+ BPF_ALU32_IMM(BPF_MOV, R1, 32),
+ BPF_ALU64_REG(BPF_ARSH, R0, R1),
+ BPF_ALU64_IMM(BPF_RSH, R0, 32),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, -1 } }
+ },
+ {
+ "ALU64_ARSH_X: Zero shift, low word",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
+ BPF_ALU32_IMM(BPF_MOV, R1, 0),
+ BPF_ALU64_REG(BPF_ARSH, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x89abcdef } }
+ },
+ {
+ "ALU64_ARSH_X: Zero shift, high word",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
+ BPF_ALU32_IMM(BPF_MOV, R1, 0),
+ BPF_ALU64_REG(BPF_ARSH, R0, R1),
+ BPF_ALU64_IMM(BPF_RSH, R0, 32),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x81234567 } }
+ },
/* BPF_ALU | BPF_ARSH | BPF_K */
{
- "ALU_ARSH_K: 0xff00ff0000000000 >> 40 = 0xffffffffffff00ff",
+ "ALU32_ARSH_K: -1234 >> 7 = -10",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, -1234),
+ BPF_ALU32_IMM(BPF_ARSH, R0, 7),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, -10 } }
+ },
+ {
+ "ALU32_ARSH_K: -1234 >> 0 = -1234",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, -1234),
+ BPF_ALU32_IMM(BPF_ARSH, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, -1234 } }
+ },
+ {
+ "ALU64_ARSH_K: 0xff00ff0000000000 >> 40 = 0xffffffffffff00ff",
.u.insns_int = {
BPF_LD_IMM64(R0, 0xff00ff0000000000LL),
BPF_ALU64_IMM(BPF_ARSH, R0, 40),
@@ -3997,6 +7698,86 @@ static struct bpf_test tests[] = {
{ },
{ { 0, 0xffff00ff } },
},
+ {
+ "ALU64_ARSH_K: Shift < 32, low word",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
+ BPF_ALU64_IMM(BPF_RSH, R0, 12),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x56789abc } }
+ },
+ {
+ "ALU64_ARSH_K: Shift < 32, high word",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
+ BPF_ALU64_IMM(BPF_ARSH, R0, 12),
+ BPF_ALU64_IMM(BPF_RSH, R0, 32),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0xfff81234 } }
+ },
+ {
+ "ALU64_ARSH_K: Shift > 32, low word",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
+ BPF_ALU64_IMM(BPF_ARSH, R0, 36),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0xf8123456 } }
+ },
+ {
+ "ALU64_ARSH_K: Shift > 32, high word",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0xf123456789abcdefLL),
+ BPF_ALU64_IMM(BPF_ARSH, R0, 36),
+ BPF_ALU64_IMM(BPF_RSH, R0, 32),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, -1 } }
+ },
+ {
+ "ALU64_ARSH_K: Shift == 32, low word",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
+ BPF_ALU64_IMM(BPF_ARSH, R0, 32),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x81234567 } }
+ },
+ {
+ "ALU64_ARSH_K: Shift == 32, high word",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
+ BPF_ALU64_IMM(BPF_ARSH, R0, 32),
+ BPF_ALU64_IMM(BPF_RSH, R0, 32),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, -1 } }
+ },
+ {
+ "ALU64_ARSH_K: Zero shift",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
+ BPF_ALU64_IMM(BPF_ARSH, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x89abcdef } }
+ },
/* BPF_ALU | BPF_NEG */
{
"ALU_NEG: -(3) = -3",
@@ -4079,6 +7860,67 @@ static struct bpf_test tests[] = {
{ },
{ { 0, (u32) cpu_to_be64(0x0123456789abcdefLL) } },
},
+ {
+ "ALU_END_FROM_BE 64: 0x0123456789abcdef >> 32 -> 0x01234567",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+ BPF_ENDIAN(BPF_FROM_BE, R0, 64),
+ BPF_ALU64_IMM(BPF_RSH, R0, 32),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, (u32) (cpu_to_be64(0x0123456789abcdefLL) >> 32) } },
+ },
+ /* BPF_ALU | BPF_END | BPF_FROM_BE, reversed */
+ {
+ "ALU_END_FROM_BE 16: 0xfedcba9876543210 -> 0x3210",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0xfedcba9876543210ULL),
+ BPF_ENDIAN(BPF_FROM_BE, R0, 16),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, cpu_to_be16(0x3210) } },
+ },
+ {
+ "ALU_END_FROM_BE 32: 0xfedcba9876543210 -> 0x76543210",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0xfedcba9876543210ULL),
+ BPF_ENDIAN(BPF_FROM_BE, R0, 32),
+ BPF_ALU64_REG(BPF_MOV, R1, R0),
+ BPF_ALU64_IMM(BPF_RSH, R1, 32),
+ BPF_ALU32_REG(BPF_ADD, R0, R1), /* R1 = 0 */
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, cpu_to_be32(0x76543210) } },
+ },
+ {
+ "ALU_END_FROM_BE 64: 0xfedcba9876543210 -> 0x76543210",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0xfedcba9876543210ULL),
+ BPF_ENDIAN(BPF_FROM_BE, R0, 64),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, (u32) cpu_to_be64(0xfedcba9876543210ULL) } },
+ },
+ {
+ "ALU_END_FROM_BE 64: 0xfedcba9876543210 >> 32 -> 0xfedcba98",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0xfedcba9876543210ULL),
+ BPF_ENDIAN(BPF_FROM_BE, R0, 64),
+ BPF_ALU64_IMM(BPF_RSH, R0, 32),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, (u32) (cpu_to_be64(0xfedcba9876543210ULL) >> 32) } },
+ },
/* BPF_ALU | BPF_END | BPF_FROM_LE */
{
"ALU_END_FROM_LE 16: 0x0123456789abcdef -> 0xefcd",
@@ -4116,6 +7958,750 @@ static struct bpf_test tests[] = {
{ },
{ { 0, (u32) cpu_to_le64(0x0123456789abcdefLL) } },
},
+ {
+ "ALU_END_FROM_LE 64: 0x0123456789abcdef >> 32 -> 0xefcdab89",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+ BPF_ENDIAN(BPF_FROM_LE, R0, 64),
+ BPF_ALU64_IMM(BPF_RSH, R0, 32),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, (u32) (cpu_to_le64(0x0123456789abcdefLL) >> 32) } },
+ },
+ /* BPF_ALU | BPF_END | BPF_FROM_LE, reversed */
+ {
+ "ALU_END_FROM_LE 16: 0xfedcba9876543210 -> 0x1032",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0xfedcba9876543210ULL),
+ BPF_ENDIAN(BPF_FROM_LE, R0, 16),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, cpu_to_le16(0x3210) } },
+ },
+ {
+ "ALU_END_FROM_LE 32: 0xfedcba9876543210 -> 0x10325476",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0xfedcba9876543210ULL),
+ BPF_ENDIAN(BPF_FROM_LE, R0, 32),
+ BPF_ALU64_REG(BPF_MOV, R1, R0),
+ BPF_ALU64_IMM(BPF_RSH, R1, 32),
+ BPF_ALU32_REG(BPF_ADD, R0, R1), /* R1 = 0 */
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, cpu_to_le32(0x76543210) } },
+ },
+ {
+ "ALU_END_FROM_LE 64: 0xfedcba9876543210 -> 0x10325476",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0xfedcba9876543210ULL),
+ BPF_ENDIAN(BPF_FROM_LE, R0, 64),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, (u32) cpu_to_le64(0xfedcba9876543210ULL) } },
+ },
+ {
+ "ALU_END_FROM_LE 64: 0xfedcba9876543210 >> 32 -> 0x98badcfe",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0xfedcba9876543210ULL),
+ BPF_ENDIAN(BPF_FROM_LE, R0, 64),
+ BPF_ALU64_IMM(BPF_RSH, R0, 32),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, (u32) (cpu_to_le64(0xfedcba9876543210ULL) >> 32) } },
+ },
+ /* BSWAP */
+ {
+ "BSWAP 16: 0x0123456789abcdef -> 0xefcd",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+ BPF_BSWAP(R0, 16),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0xefcd } },
+ },
+ {
+ "BSWAP 32: 0x0123456789abcdef -> 0xefcdab89",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+ BPF_BSWAP(R0, 32),
+ BPF_ALU64_REG(BPF_MOV, R1, R0),
+ BPF_ALU64_IMM(BPF_RSH, R1, 32),
+ BPF_ALU32_REG(BPF_ADD, R0, R1), /* R1 = 0 */
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0xefcdab89 } },
+ },
+ {
+ "BSWAP 64: 0x0123456789abcdef -> 0x67452301",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+ BPF_BSWAP(R0, 64),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x67452301 } },
+ },
+ {
+ "BSWAP 64: 0x0123456789abcdef >> 32 -> 0xefcdab89",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+ BPF_BSWAP(R0, 64),
+ BPF_ALU64_IMM(BPF_RSH, R0, 32),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0xefcdab89 } },
+ },
+ /* BSWAP, reversed */
+ {
+ "BSWAP 16: 0xfedcba9876543210 -> 0x1032",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0xfedcba9876543210ULL),
+ BPF_BSWAP(R0, 16),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x1032 } },
+ },
+ {
+ "BSWAP 32: 0xfedcba9876543210 -> 0x10325476",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0xfedcba9876543210ULL),
+ BPF_BSWAP(R0, 32),
+ BPF_ALU64_REG(BPF_MOV, R1, R0),
+ BPF_ALU64_IMM(BPF_RSH, R1, 32),
+ BPF_ALU32_REG(BPF_ADD, R0, R1), /* R1 = 0 */
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x10325476 } },
+ },
+ {
+ "BSWAP 64: 0xfedcba9876543210 -> 0x98badcfe",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0xfedcba9876543210ULL),
+ BPF_BSWAP(R0, 64),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x98badcfe } },
+ },
+ {
+ "BSWAP 64: 0xfedcba9876543210 >> 32 -> 0x10325476",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0xfedcba9876543210ULL),
+ BPF_BSWAP(R0, 64),
+ BPF_ALU64_IMM(BPF_RSH, R0, 32),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x10325476 } },
+ },
+ /* BPF_LDX_MEM B/H/W/DW */
+ {
+ "BPF_LDX_MEM | BPF_B, base",
+ .u.insns_int = {
+ BPF_LD_IMM64(R1, 0x0102030405060708ULL),
+ BPF_LD_IMM64(R2, 0x0000000000000008ULL),
+ BPF_STX_MEM(BPF_DW, R10, R1, -8),
+#ifdef __BIG_ENDIAN
+ BPF_LDX_MEM(BPF_B, R0, R10, -1),
+#else
+ BPF_LDX_MEM(BPF_B, R0, R10, -8),
+#endif
+ BPF_JMP_REG(BPF_JNE, R0, R2, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0 } },
+ .stack_depth = 8,
+ },
+ {
+ "BPF_LDX_MEM | BPF_B, MSB set",
+ .u.insns_int = {
+ BPF_LD_IMM64(R1, 0x8182838485868788ULL),
+ BPF_LD_IMM64(R2, 0x0000000000000088ULL),
+ BPF_STX_MEM(BPF_DW, R10, R1, -8),
+#ifdef __BIG_ENDIAN
+ BPF_LDX_MEM(BPF_B, R0, R10, -1),
+#else
+ BPF_LDX_MEM(BPF_B, R0, R10, -8),
+#endif
+ BPF_JMP_REG(BPF_JNE, R0, R2, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0 } },
+ .stack_depth = 8,
+ },
+ {
+ "BPF_LDX_MEM | BPF_B, negative offset",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x8182838485868788ULL),
+ BPF_LD_IMM64(R3, 0x0000000000000088ULL),
+ BPF_ALU64_IMM(BPF_ADD, R1, 512),
+ BPF_STX_MEM(BPF_B, R1, R2, -256),
+ BPF_LDX_MEM(BPF_B, R0, R1, -256),
+ BPF_JMP_REG(BPF_JNE, R0, R3, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_LARGE_MEM,
+ { },
+ { { 512, 0 } },
+ .stack_depth = 0,
+ },
+ {
+ "BPF_LDX_MEM | BPF_B, small positive offset",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x8182838485868788ULL),
+ BPF_LD_IMM64(R3, 0x0000000000000088ULL),
+ BPF_STX_MEM(BPF_B, R1, R2, 256),
+ BPF_LDX_MEM(BPF_B, R0, R1, 256),
+ BPF_JMP_REG(BPF_JNE, R0, R3, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_LARGE_MEM,
+ { },
+ { { 512, 0 } },
+ .stack_depth = 0,
+ },
+ {
+ "BPF_LDX_MEM | BPF_B, large positive offset",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x8182838485868788ULL),
+ BPF_LD_IMM64(R3, 0x0000000000000088ULL),
+ BPF_STX_MEM(BPF_B, R1, R2, 4096),
+ BPF_LDX_MEM(BPF_B, R0, R1, 4096),
+ BPF_JMP_REG(BPF_JNE, R0, R3, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_LARGE_MEM,
+ { },
+ { { 4096 + 16, 0 } },
+ .stack_depth = 0,
+ },
+ {
+ "BPF_LDX_MEM | BPF_H, base",
+ .u.insns_int = {
+ BPF_LD_IMM64(R1, 0x0102030405060708ULL),
+ BPF_LD_IMM64(R2, 0x0000000000000708ULL),
+ BPF_STX_MEM(BPF_DW, R10, R1, -8),
+#ifdef __BIG_ENDIAN
+ BPF_LDX_MEM(BPF_H, R0, R10, -2),
+#else
+ BPF_LDX_MEM(BPF_H, R0, R10, -8),
+#endif
+ BPF_JMP_REG(BPF_JNE, R0, R2, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0 } },
+ .stack_depth = 8,
+ },
+ {
+ "BPF_LDX_MEM | BPF_H, MSB set",
+ .u.insns_int = {
+ BPF_LD_IMM64(R1, 0x8182838485868788ULL),
+ BPF_LD_IMM64(R2, 0x0000000000008788ULL),
+ BPF_STX_MEM(BPF_DW, R10, R1, -8),
+#ifdef __BIG_ENDIAN
+ BPF_LDX_MEM(BPF_H, R0, R10, -2),
+#else
+ BPF_LDX_MEM(BPF_H, R0, R10, -8),
+#endif
+ BPF_JMP_REG(BPF_JNE, R0, R2, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0 } },
+ .stack_depth = 8,
+ },
+ {
+ "BPF_LDX_MEM | BPF_H, negative offset",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x8182838485868788ULL),
+ BPF_LD_IMM64(R3, 0x0000000000008788ULL),
+ BPF_ALU64_IMM(BPF_ADD, R1, 512),
+ BPF_STX_MEM(BPF_H, R1, R2, -256),
+ BPF_LDX_MEM(BPF_H, R0, R1, -256),
+ BPF_JMP_REG(BPF_JNE, R0, R3, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_LARGE_MEM,
+ { },
+ { { 512, 0 } },
+ .stack_depth = 0,
+ },
+ {
+ "BPF_LDX_MEM | BPF_H, small positive offset",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x8182838485868788ULL),
+ BPF_LD_IMM64(R3, 0x0000000000008788ULL),
+ BPF_STX_MEM(BPF_H, R1, R2, 256),
+ BPF_LDX_MEM(BPF_H, R0, R1, 256),
+ BPF_JMP_REG(BPF_JNE, R0, R3, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_LARGE_MEM,
+ { },
+ { { 512, 0 } },
+ .stack_depth = 0,
+ },
+ {
+ "BPF_LDX_MEM | BPF_H, large positive offset",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x8182838485868788ULL),
+ BPF_LD_IMM64(R3, 0x0000000000008788ULL),
+ BPF_STX_MEM(BPF_H, R1, R2, 8192),
+ BPF_LDX_MEM(BPF_H, R0, R1, 8192),
+ BPF_JMP_REG(BPF_JNE, R0, R3, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_LARGE_MEM,
+ { },
+ { { 8192 + 16, 0 } },
+ .stack_depth = 0,
+ },
+ {
+ "BPF_LDX_MEM | BPF_H, unaligned positive offset",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x8182838485868788ULL),
+ BPF_LD_IMM64(R3, 0x0000000000008788ULL),
+ BPF_STX_MEM(BPF_H, R1, R2, 13),
+ BPF_LDX_MEM(BPF_H, R0, R1, 13),
+ BPF_JMP_REG(BPF_JNE, R0, R3, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_LARGE_MEM,
+ { },
+ { { 32, 0 } },
+ .stack_depth = 0,
+ },
+ {
+ "BPF_LDX_MEM | BPF_W, base",
+ .u.insns_int = {
+ BPF_LD_IMM64(R1, 0x0102030405060708ULL),
+ BPF_LD_IMM64(R2, 0x0000000005060708ULL),
+ BPF_STX_MEM(BPF_DW, R10, R1, -8),
+#ifdef __BIG_ENDIAN
+ BPF_LDX_MEM(BPF_W, R0, R10, -4),
+#else
+ BPF_LDX_MEM(BPF_W, R0, R10, -8),
+#endif
+ BPF_JMP_REG(BPF_JNE, R0, R2, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0 } },
+ .stack_depth = 8,
+ },
+ {
+ "BPF_LDX_MEM | BPF_W, MSB set",
+ .u.insns_int = {
+ BPF_LD_IMM64(R1, 0x8182838485868788ULL),
+ BPF_LD_IMM64(R2, 0x0000000085868788ULL),
+ BPF_STX_MEM(BPF_DW, R10, R1, -8),
+#ifdef __BIG_ENDIAN
+ BPF_LDX_MEM(BPF_W, R0, R10, -4),
+#else
+ BPF_LDX_MEM(BPF_W, R0, R10, -8),
+#endif
+ BPF_JMP_REG(BPF_JNE, R0, R2, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0 } },
+ .stack_depth = 8,
+ },
+ {
+ "BPF_LDX_MEM | BPF_W, negative offset",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x8182838485868788ULL),
+ BPF_LD_IMM64(R3, 0x0000000085868788ULL),
+ BPF_ALU64_IMM(BPF_ADD, R1, 512),
+ BPF_STX_MEM(BPF_W, R1, R2, -256),
+ BPF_LDX_MEM(BPF_W, R0, R1, -256),
+ BPF_JMP_REG(BPF_JNE, R0, R3, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_LARGE_MEM,
+ { },
+ { { 512, 0 } },
+ .stack_depth = 0,
+ },
+ {
+ "BPF_LDX_MEM | BPF_W, small positive offset",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x8182838485868788ULL),
+ BPF_LD_IMM64(R3, 0x0000000085868788ULL),
+ BPF_STX_MEM(BPF_W, R1, R2, 256),
+ BPF_LDX_MEM(BPF_W, R0, R1, 256),
+ BPF_JMP_REG(BPF_JNE, R0, R3, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_LARGE_MEM,
+ { },
+ { { 512, 0 } },
+ .stack_depth = 0,
+ },
+ {
+ "BPF_LDX_MEM | BPF_W, large positive offset",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x8182838485868788ULL),
+ BPF_LD_IMM64(R3, 0x0000000085868788ULL),
+ BPF_STX_MEM(BPF_W, R1, R2, 16384),
+ BPF_LDX_MEM(BPF_W, R0, R1, 16384),
+ BPF_JMP_REG(BPF_JNE, R0, R3, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_LARGE_MEM,
+ { },
+ { { 16384 + 16, 0 } },
+ .stack_depth = 0,
+ },
+ {
+ "BPF_LDX_MEM | BPF_W, unaligned positive offset",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x8182838485868788ULL),
+ BPF_LD_IMM64(R3, 0x0000000085868788ULL),
+ BPF_STX_MEM(BPF_W, R1, R2, 13),
+ BPF_LDX_MEM(BPF_W, R0, R1, 13),
+ BPF_JMP_REG(BPF_JNE, R0, R3, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_LARGE_MEM,
+ { },
+ { { 32, 0 } },
+ .stack_depth = 0,
+ },
+ {
+ "BPF_LDX_MEM | BPF_DW, base",
+ .u.insns_int = {
+ BPF_LD_IMM64(R1, 0x0102030405060708ULL),
+ BPF_STX_MEM(BPF_DW, R10, R1, -8),
+ BPF_LDX_MEM(BPF_DW, R0, R10, -8),
+ BPF_JMP_REG(BPF_JNE, R0, R1, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0 } },
+ .stack_depth = 8,
+ },
+ {
+ "BPF_LDX_MEM | BPF_DW, MSB set",
+ .u.insns_int = {
+ BPF_LD_IMM64(R1, 0x8182838485868788ULL),
+ BPF_STX_MEM(BPF_DW, R10, R1, -8),
+ BPF_LDX_MEM(BPF_DW, R0, R10, -8),
+ BPF_JMP_REG(BPF_JNE, R0, R1, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0 } },
+ .stack_depth = 8,
+ },
+ {
+ "BPF_LDX_MEM | BPF_DW, negative offset",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x8182838485868788ULL),
+ BPF_ALU64_IMM(BPF_ADD, R1, 512),
+ BPF_STX_MEM(BPF_DW, R1, R2, -256),
+ BPF_LDX_MEM(BPF_DW, R0, R1, -256),
+ BPF_JMP_REG(BPF_JNE, R0, R2, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_LARGE_MEM,
+ { },
+ { { 512, 0 } },
+ .stack_depth = 0,
+ },
+ {
+ "BPF_LDX_MEM | BPF_DW, small positive offset",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x8182838485868788ULL),
+ BPF_STX_MEM(BPF_DW, R1, R2, 256),
+ BPF_LDX_MEM(BPF_DW, R0, R1, 256),
+ BPF_JMP_REG(BPF_JNE, R0, R2, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_LARGE_MEM,
+ { },
+ { { 512, 0 } },
+ .stack_depth = 8,
+ },
+ {
+ "BPF_LDX_MEM | BPF_DW, large positive offset",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x8182838485868788ULL),
+ BPF_STX_MEM(BPF_DW, R1, R2, 32760),
+ BPF_LDX_MEM(BPF_DW, R0, R1, 32760),
+ BPF_JMP_REG(BPF_JNE, R0, R2, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_LARGE_MEM,
+ { },
+ { { 32768, 0 } },
+ .stack_depth = 0,
+ },
+ {
+ "BPF_LDX_MEM | BPF_DW, unaligned positive offset",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x8182838485868788ULL),
+ BPF_STX_MEM(BPF_DW, R1, R2, 13),
+ BPF_LDX_MEM(BPF_DW, R0, R1, 13),
+ BPF_JMP_REG(BPF_JNE, R0, R2, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_LARGE_MEM,
+ { },
+ { { 32, 0 } },
+ .stack_depth = 0,
+ },
+ /* BPF_LDX_MEMSX B/H/W */
+ {
+ "BPF_LDX_MEMSX | BPF_B",
+ .u.insns_int = {
+ BPF_LD_IMM64(R1, 0xdead0000000000f0ULL),
+ BPF_LD_IMM64(R2, 0xfffffffffffffff0ULL),
+ BPF_STX_MEM(BPF_DW, R10, R1, -8),
+#ifdef __BIG_ENDIAN
+ BPF_LDX_MEMSX(BPF_B, R0, R10, -1),
+#else
+ BPF_LDX_MEMSX(BPF_B, R0, R10, -8),
+#endif
+ BPF_JMP_REG(BPF_JNE, R0, R2, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0 } },
+ .stack_depth = 8,
+ },
+ {
+ "BPF_LDX_MEMSX | BPF_H",
+ .u.insns_int = {
+ BPF_LD_IMM64(R1, 0xdead00000000f123ULL),
+ BPF_LD_IMM64(R2, 0xfffffffffffff123ULL),
+ BPF_STX_MEM(BPF_DW, R10, R1, -8),
+#ifdef __BIG_ENDIAN
+ BPF_LDX_MEMSX(BPF_H, R0, R10, -2),
+#else
+ BPF_LDX_MEMSX(BPF_H, R0, R10, -8),
+#endif
+ BPF_JMP_REG(BPF_JNE, R0, R2, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0 } },
+ .stack_depth = 8,
+ },
+ {
+ "BPF_LDX_MEMSX | BPF_W",
+ .u.insns_int = {
+ BPF_LD_IMM64(R1, 0x00000000deadbeefULL),
+ BPF_LD_IMM64(R2, 0xffffffffdeadbeefULL),
+ BPF_STX_MEM(BPF_DW, R10, R1, -8),
+#ifdef __BIG_ENDIAN
+ BPF_LDX_MEMSX(BPF_W, R0, R10, -4),
+#else
+ BPF_LDX_MEMSX(BPF_W, R0, R10, -8),
+#endif
+ BPF_JMP_REG(BPF_JNE, R0, R2, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0 } },
+ .stack_depth = 8,
+ },
+ /* BPF_STX_MEM B/H/W/DW */
+ {
+ "BPF_STX_MEM | BPF_B",
+ .u.insns_int = {
+ BPF_LD_IMM64(R1, 0x8090a0b0c0d0e0f0ULL),
+ BPF_LD_IMM64(R2, 0x0102030405060708ULL),
+ BPF_LD_IMM64(R3, 0x8090a0b0c0d0e008ULL),
+ BPF_STX_MEM(BPF_DW, R10, R1, -8),
+#ifdef __BIG_ENDIAN
+ BPF_STX_MEM(BPF_B, R10, R2, -1),
+#else
+ BPF_STX_MEM(BPF_B, R10, R2, -8),
+#endif
+ BPF_LDX_MEM(BPF_DW, R0, R10, -8),
+ BPF_JMP_REG(BPF_JNE, R0, R3, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0 } },
+ .stack_depth = 8,
+ },
+ {
+ "BPF_STX_MEM | BPF_B, MSB set",
+ .u.insns_int = {
+ BPF_LD_IMM64(R1, 0x8090a0b0c0d0e0f0ULL),
+ BPF_LD_IMM64(R2, 0x8182838485868788ULL),
+ BPF_LD_IMM64(R3, 0x8090a0b0c0d0e088ULL),
+ BPF_STX_MEM(BPF_DW, R10, R1, -8),
+#ifdef __BIG_ENDIAN
+ BPF_STX_MEM(BPF_B, R10, R2, -1),
+#else
+ BPF_STX_MEM(BPF_B, R10, R2, -8),
+#endif
+ BPF_LDX_MEM(BPF_DW, R0, R10, -8),
+ BPF_JMP_REG(BPF_JNE, R0, R3, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0 } },
+ .stack_depth = 8,
+ },
+ {
+ "BPF_STX_MEM | BPF_H",
+ .u.insns_int = {
+ BPF_LD_IMM64(R1, 0x8090a0b0c0d0e0f0ULL),
+ BPF_LD_IMM64(R2, 0x0102030405060708ULL),
+ BPF_LD_IMM64(R3, 0x8090a0b0c0d00708ULL),
+ BPF_STX_MEM(BPF_DW, R10, R1, -8),
+#ifdef __BIG_ENDIAN
+ BPF_STX_MEM(BPF_H, R10, R2, -2),
+#else
+ BPF_STX_MEM(BPF_H, R10, R2, -8),
+#endif
+ BPF_LDX_MEM(BPF_DW, R0, R10, -8),
+ BPF_JMP_REG(BPF_JNE, R0, R3, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0 } },
+ .stack_depth = 8,
+ },
+ {
+ "BPF_STX_MEM | BPF_H, MSB set",
+ .u.insns_int = {
+ BPF_LD_IMM64(R1, 0x8090a0b0c0d0e0f0ULL),
+ BPF_LD_IMM64(R2, 0x8182838485868788ULL),
+ BPF_LD_IMM64(R3, 0x8090a0b0c0d08788ULL),
+ BPF_STX_MEM(BPF_DW, R10, R1, -8),
+#ifdef __BIG_ENDIAN
+ BPF_STX_MEM(BPF_H, R10, R2, -2),
+#else
+ BPF_STX_MEM(BPF_H, R10, R2, -8),
+#endif
+ BPF_LDX_MEM(BPF_DW, R0, R10, -8),
+ BPF_JMP_REG(BPF_JNE, R0, R3, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0 } },
+ .stack_depth = 8,
+ },
+ {
+ "BPF_STX_MEM | BPF_W",
+ .u.insns_int = {
+ BPF_LD_IMM64(R1, 0x8090a0b0c0d0e0f0ULL),
+ BPF_LD_IMM64(R2, 0x0102030405060708ULL),
+ BPF_LD_IMM64(R3, 0x8090a0b005060708ULL),
+ BPF_STX_MEM(BPF_DW, R10, R1, -8),
+#ifdef __BIG_ENDIAN
+ BPF_STX_MEM(BPF_W, R10, R2, -4),
+#else
+ BPF_STX_MEM(BPF_W, R10, R2, -8),
+#endif
+ BPF_LDX_MEM(BPF_DW, R0, R10, -8),
+ BPF_JMP_REG(BPF_JNE, R0, R3, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0 } },
+ .stack_depth = 8,
+ },
+ {
+ "BPF_STX_MEM | BPF_W, MSB set",
+ .u.insns_int = {
+ BPF_LD_IMM64(R1, 0x8090a0b0c0d0e0f0ULL),
+ BPF_LD_IMM64(R2, 0x8182838485868788ULL),
+ BPF_LD_IMM64(R3, 0x8090a0b085868788ULL),
+ BPF_STX_MEM(BPF_DW, R10, R1, -8),
+#ifdef __BIG_ENDIAN
+ BPF_STX_MEM(BPF_W, R10, R2, -4),
+#else
+ BPF_STX_MEM(BPF_W, R10, R2, -8),
+#endif
+ BPF_LDX_MEM(BPF_DW, R0, R10, -8),
+ BPF_JMP_REG(BPF_JNE, R0, R3, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0 } },
+ .stack_depth = 8,
+ },
/* BPF_ST(X) | BPF_MEM | BPF_B/H/W/DW */
{
"ST_MEM_B: Store/Load byte: max negative",
@@ -4286,8 +8872,8 @@ static struct bpf_test tests[] = {
.u.insns_int = {
BPF_LD_IMM64(R0, 0),
BPF_LD_IMM64(R1, 0xffffffffffffffffLL),
- BPF_STX_MEM(BPF_W, R10, R1, -40),
- BPF_LDX_MEM(BPF_W, R0, R10, -40),
+ BPF_STX_MEM(BPF_DW, R10, R1, -40),
+ BPF_LDX_MEM(BPF_DW, R0, R10, -40),
BPF_EXIT_INSN(),
},
INTERNAL,
@@ -4295,80 +8881,358 @@ static struct bpf_test tests[] = {
{ { 0, 0xffffffff } },
.stack_depth = 40,
},
- /* BPF_STX | BPF_XADD | BPF_W/DW */
{
- "STX_XADD_W: Test: 0x12 + 0x10 = 0x22",
+ "STX_MEM_DW: Store double word: first word in memory",
.u.insns_int = {
- BPF_ALU32_IMM(BPF_MOV, R0, 0x12),
- BPF_ST_MEM(BPF_W, R10, -40, 0x10),
- BPF_STX_XADD(BPF_W, R10, R0, -40),
+ BPF_LD_IMM64(R0, 0),
+ BPF_LD_IMM64(R1, 0x0123456789abcdefLL),
+ BPF_STX_MEM(BPF_DW, R10, R1, -40),
BPF_LDX_MEM(BPF_W, R0, R10, -40),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
- { { 0, 0x22 } },
+#ifdef __BIG_ENDIAN
+ { { 0, 0x01234567 } },
+#else
+ { { 0, 0x89abcdef } },
+#endif
.stack_depth = 40,
},
{
- "STX_XADD_W: Test side-effects, r10: 0x12 + 0x10 = 0x22",
+ "STX_MEM_DW: Store double word: second word in memory",
.u.insns_int = {
- BPF_ALU64_REG(BPF_MOV, R1, R10),
- BPF_ALU32_IMM(BPF_MOV, R0, 0x12),
- BPF_ST_MEM(BPF_W, R10, -40, 0x10),
- BPF_STX_XADD(BPF_W, R10, R0, -40),
- BPF_ALU64_REG(BPF_MOV, R0, R10),
- BPF_ALU64_REG(BPF_SUB, R0, R1),
+ BPF_LD_IMM64(R0, 0),
+ BPF_LD_IMM64(R1, 0x0123456789abcdefLL),
+ BPF_STX_MEM(BPF_DW, R10, R1, -40),
+ BPF_LDX_MEM(BPF_W, R0, R10, -36),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
- { { 0, 0 } },
+#ifdef __BIG_ENDIAN
+ { { 0, 0x89abcdef } },
+#else
+ { { 0, 0x01234567 } },
+#endif
.stack_depth = 40,
},
+ /* BPF_STX | BPF_ATOMIC | BPF_W/DW */
{
- "STX_XADD_W: Test side-effects, r0: 0x12 + 0x10 = 0x22",
+ "STX_XADD_W: X + 1 + 1 + 1 + ...",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 4134 } },
+ .fill_helper = bpf_fill_stxw,
+ },
+ {
+ "STX_XADD_DW: X + 1 + 1 + 1 + ...",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 4134 } },
+ .fill_helper = bpf_fill_stxdw,
+ },
+ /*
+ * Exhaustive tests of atomic operation variants.
+ * Individual tests are expanded from template macros for all
+ * combinations of ALU operation, word size and fetching.
+ */
+#define BPF_ATOMIC_POISON(width) ((width) == BPF_W ? (0xbaadf00dULL << 32) : 0)
+
+#define BPF_ATOMIC_OP_TEST1(width, op, logic, old, update, result) \
+{ \
+ "BPF_ATOMIC | " #width ", " #op ": Test: " \
+ #old " " #logic " " #update " = " #result, \
+ .u.insns_int = { \
+ BPF_LD_IMM64(R5, (update) | BPF_ATOMIC_POISON(width)), \
+ BPF_ST_MEM(width, R10, -40, old), \
+ BPF_ATOMIC_OP(width, op, R10, R5, -40), \
+ BPF_LDX_MEM(width, R0, R10, -40), \
+ BPF_ALU64_REG(BPF_MOV, R1, R0), \
+ BPF_ALU64_IMM(BPF_RSH, R1, 32), \
+ BPF_ALU64_REG(BPF_OR, R0, R1), \
+ BPF_EXIT_INSN(), \
+ }, \
+ INTERNAL, \
+ { }, \
+ { { 0, result } }, \
+ .stack_depth = 40, \
+}
+#define BPF_ATOMIC_OP_TEST2(width, op, logic, old, update, result) \
+{ \
+ "BPF_ATOMIC | " #width ", " #op ": Test side effects, r10: " \
+ #old " " #logic " " #update " = " #result, \
+ .u.insns_int = { \
+ BPF_ALU64_REG(BPF_MOV, R1, R10), \
+ BPF_LD_IMM64(R0, (update) | BPF_ATOMIC_POISON(width)), \
+ BPF_ST_MEM(BPF_W, R10, -40, old), \
+ BPF_ATOMIC_OP(width, op, R10, R0, -40), \
+ BPF_ALU64_REG(BPF_MOV, R0, R10), \
+ BPF_ALU64_REG(BPF_SUB, R0, R1), \
+ BPF_ALU64_REG(BPF_MOV, R1, R0), \
+ BPF_ALU64_IMM(BPF_RSH, R1, 32), \
+ BPF_ALU64_REG(BPF_OR, R0, R1), \
+ BPF_EXIT_INSN(), \
+ }, \
+ INTERNAL, \
+ { }, \
+ { { 0, 0 } }, \
+ .stack_depth = 40, \
+}
+#define BPF_ATOMIC_OP_TEST3(width, op, logic, old, update, result) \
+{ \
+ "BPF_ATOMIC | " #width ", " #op ": Test side effects, r0: " \
+ #old " " #logic " " #update " = " #result, \
+ .u.insns_int = { \
+ BPF_ALU64_REG(BPF_MOV, R0, R10), \
+ BPF_LD_IMM64(R1, (update) | BPF_ATOMIC_POISON(width)), \
+ BPF_ST_MEM(width, R10, -40, old), \
+ BPF_ATOMIC_OP(width, op, R10, R1, -40), \
+ BPF_ALU64_REG(BPF_SUB, R0, R10), \
+ BPF_ALU64_REG(BPF_MOV, R1, R0), \
+ BPF_ALU64_IMM(BPF_RSH, R1, 32), \
+ BPF_ALU64_REG(BPF_OR, R0, R1), \
+ BPF_EXIT_INSN(), \
+ }, \
+ INTERNAL, \
+ { }, \
+ { { 0, 0 } }, \
+ .stack_depth = 40, \
+}
+#define BPF_ATOMIC_OP_TEST4(width, op, logic, old, update, result) \
+{ \
+ "BPF_ATOMIC | " #width ", " #op ": Test fetch: " \
+ #old " " #logic " " #update " = " #result, \
+ .u.insns_int = { \
+ BPF_LD_IMM64(R3, (update) | BPF_ATOMIC_POISON(width)), \
+ BPF_ST_MEM(width, R10, -40, old), \
+ BPF_ATOMIC_OP(width, op, R10, R3, -40), \
+ BPF_ALU32_REG(BPF_MOV, R0, R3), \
+ BPF_EXIT_INSN(), \
+ }, \
+ INTERNAL, \
+ { }, \
+ { { 0, (op) & BPF_FETCH ? old : update } }, \
+ .stack_depth = 40, \
+}
+ /* BPF_ATOMIC | BPF_W: BPF_ADD */
+ BPF_ATOMIC_OP_TEST1(BPF_W, BPF_ADD, +, 0x12, 0xab, 0xbd),
+ BPF_ATOMIC_OP_TEST2(BPF_W, BPF_ADD, +, 0x12, 0xab, 0xbd),
+ BPF_ATOMIC_OP_TEST3(BPF_W, BPF_ADD, +, 0x12, 0xab, 0xbd),
+ BPF_ATOMIC_OP_TEST4(BPF_W, BPF_ADD, +, 0x12, 0xab, 0xbd),
+ /* BPF_ATOMIC | BPF_W: BPF_ADD | BPF_FETCH */
+ BPF_ATOMIC_OP_TEST1(BPF_W, BPF_ADD | BPF_FETCH, +, 0x12, 0xab, 0xbd),
+ BPF_ATOMIC_OP_TEST2(BPF_W, BPF_ADD | BPF_FETCH, +, 0x12, 0xab, 0xbd),
+ BPF_ATOMIC_OP_TEST3(BPF_W, BPF_ADD | BPF_FETCH, +, 0x12, 0xab, 0xbd),
+ BPF_ATOMIC_OP_TEST4(BPF_W, BPF_ADD | BPF_FETCH, +, 0x12, 0xab, 0xbd),
+ /* BPF_ATOMIC | BPF_DW: BPF_ADD */
+ BPF_ATOMIC_OP_TEST1(BPF_DW, BPF_ADD, +, 0x12, 0xab, 0xbd),
+ BPF_ATOMIC_OP_TEST2(BPF_DW, BPF_ADD, +, 0x12, 0xab, 0xbd),
+ BPF_ATOMIC_OP_TEST3(BPF_DW, BPF_ADD, +, 0x12, 0xab, 0xbd),
+ BPF_ATOMIC_OP_TEST4(BPF_DW, BPF_ADD, +, 0x12, 0xab, 0xbd),
+ /* BPF_ATOMIC | BPF_DW: BPF_ADD | BPF_FETCH */
+ BPF_ATOMIC_OP_TEST1(BPF_DW, BPF_ADD | BPF_FETCH, +, 0x12, 0xab, 0xbd),
+ BPF_ATOMIC_OP_TEST2(BPF_DW, BPF_ADD | BPF_FETCH, +, 0x12, 0xab, 0xbd),
+ BPF_ATOMIC_OP_TEST3(BPF_DW, BPF_ADD | BPF_FETCH, +, 0x12, 0xab, 0xbd),
+ BPF_ATOMIC_OP_TEST4(BPF_DW, BPF_ADD | BPF_FETCH, +, 0x12, 0xab, 0xbd),
+ /* BPF_ATOMIC | BPF_W: BPF_AND */
+ BPF_ATOMIC_OP_TEST1(BPF_W, BPF_AND, &, 0x12, 0xab, 0x02),
+ BPF_ATOMIC_OP_TEST2(BPF_W, BPF_AND, &, 0x12, 0xab, 0x02),
+ BPF_ATOMIC_OP_TEST3(BPF_W, BPF_AND, &, 0x12, 0xab, 0x02),
+ BPF_ATOMIC_OP_TEST4(BPF_W, BPF_AND, &, 0x12, 0xab, 0x02),
+ /* BPF_ATOMIC | BPF_W: BPF_AND | BPF_FETCH */
+ BPF_ATOMIC_OP_TEST1(BPF_W, BPF_AND | BPF_FETCH, &, 0x12, 0xab, 0x02),
+ BPF_ATOMIC_OP_TEST2(BPF_W, BPF_AND | BPF_FETCH, &, 0x12, 0xab, 0x02),
+ BPF_ATOMIC_OP_TEST3(BPF_W, BPF_AND | BPF_FETCH, &, 0x12, 0xab, 0x02),
+ BPF_ATOMIC_OP_TEST4(BPF_W, BPF_AND | BPF_FETCH, &, 0x12, 0xab, 0x02),
+ /* BPF_ATOMIC | BPF_DW: BPF_AND */
+ BPF_ATOMIC_OP_TEST1(BPF_DW, BPF_AND, &, 0x12, 0xab, 0x02),
+ BPF_ATOMIC_OP_TEST2(BPF_DW, BPF_AND, &, 0x12, 0xab, 0x02),
+ BPF_ATOMIC_OP_TEST3(BPF_DW, BPF_AND, &, 0x12, 0xab, 0x02),
+ BPF_ATOMIC_OP_TEST4(BPF_DW, BPF_AND, &, 0x12, 0xab, 0x02),
+ /* BPF_ATOMIC | BPF_DW: BPF_AND | BPF_FETCH */
+ BPF_ATOMIC_OP_TEST1(BPF_DW, BPF_AND | BPF_FETCH, &, 0x12, 0xab, 0x02),
+ BPF_ATOMIC_OP_TEST2(BPF_DW, BPF_AND | BPF_FETCH, &, 0x12, 0xab, 0x02),
+ BPF_ATOMIC_OP_TEST3(BPF_DW, BPF_AND | BPF_FETCH, &, 0x12, 0xab, 0x02),
+ BPF_ATOMIC_OP_TEST4(BPF_DW, BPF_AND | BPF_FETCH, &, 0x12, 0xab, 0x02),
+ /* BPF_ATOMIC | BPF_W: BPF_OR */
+ BPF_ATOMIC_OP_TEST1(BPF_W, BPF_OR, |, 0x12, 0xab, 0xbb),
+ BPF_ATOMIC_OP_TEST2(BPF_W, BPF_OR, |, 0x12, 0xab, 0xbb),
+ BPF_ATOMIC_OP_TEST3(BPF_W, BPF_OR, |, 0x12, 0xab, 0xbb),
+ BPF_ATOMIC_OP_TEST4(BPF_W, BPF_OR, |, 0x12, 0xab, 0xbb),
+ /* BPF_ATOMIC | BPF_W: BPF_OR | BPF_FETCH */
+ BPF_ATOMIC_OP_TEST1(BPF_W, BPF_OR | BPF_FETCH, |, 0x12, 0xab, 0xbb),
+ BPF_ATOMIC_OP_TEST2(BPF_W, BPF_OR | BPF_FETCH, |, 0x12, 0xab, 0xbb),
+ BPF_ATOMIC_OP_TEST3(BPF_W, BPF_OR | BPF_FETCH, |, 0x12, 0xab, 0xbb),
+ BPF_ATOMIC_OP_TEST4(BPF_W, BPF_OR | BPF_FETCH, |, 0x12, 0xab, 0xbb),
+ /* BPF_ATOMIC | BPF_DW: BPF_OR */
+ BPF_ATOMIC_OP_TEST1(BPF_DW, BPF_OR, |, 0x12, 0xab, 0xbb),
+ BPF_ATOMIC_OP_TEST2(BPF_DW, BPF_OR, |, 0x12, 0xab, 0xbb),
+ BPF_ATOMIC_OP_TEST3(BPF_DW, BPF_OR, |, 0x12, 0xab, 0xbb),
+ BPF_ATOMIC_OP_TEST4(BPF_DW, BPF_OR, |, 0x12, 0xab, 0xbb),
+ /* BPF_ATOMIC | BPF_DW: BPF_OR | BPF_FETCH */
+ BPF_ATOMIC_OP_TEST1(BPF_DW, BPF_OR | BPF_FETCH, |, 0x12, 0xab, 0xbb),
+ BPF_ATOMIC_OP_TEST2(BPF_DW, BPF_OR | BPF_FETCH, |, 0x12, 0xab, 0xbb),
+ BPF_ATOMIC_OP_TEST3(BPF_DW, BPF_OR | BPF_FETCH, |, 0x12, 0xab, 0xbb),
+ BPF_ATOMIC_OP_TEST4(BPF_DW, BPF_OR | BPF_FETCH, |, 0x12, 0xab, 0xbb),
+ /* BPF_ATOMIC | BPF_W: BPF_XOR */
+ BPF_ATOMIC_OP_TEST1(BPF_W, BPF_XOR, ^, 0x12, 0xab, 0xb9),
+ BPF_ATOMIC_OP_TEST2(BPF_W, BPF_XOR, ^, 0x12, 0xab, 0xb9),
+ BPF_ATOMIC_OP_TEST3(BPF_W, BPF_XOR, ^, 0x12, 0xab, 0xb9),
+ BPF_ATOMIC_OP_TEST4(BPF_W, BPF_XOR, ^, 0x12, 0xab, 0xb9),
+ /* BPF_ATOMIC | BPF_W: BPF_XOR | BPF_FETCH */
+ BPF_ATOMIC_OP_TEST1(BPF_W, BPF_XOR | BPF_FETCH, ^, 0x12, 0xab, 0xb9),
+ BPF_ATOMIC_OP_TEST2(BPF_W, BPF_XOR | BPF_FETCH, ^, 0x12, 0xab, 0xb9),
+ BPF_ATOMIC_OP_TEST3(BPF_W, BPF_XOR | BPF_FETCH, ^, 0x12, 0xab, 0xb9),
+ BPF_ATOMIC_OP_TEST4(BPF_W, BPF_XOR | BPF_FETCH, ^, 0x12, 0xab, 0xb9),
+ /* BPF_ATOMIC | BPF_DW: BPF_XOR */
+ BPF_ATOMIC_OP_TEST1(BPF_DW, BPF_XOR, ^, 0x12, 0xab, 0xb9),
+ BPF_ATOMIC_OP_TEST2(BPF_DW, BPF_XOR, ^, 0x12, 0xab, 0xb9),
+ BPF_ATOMIC_OP_TEST3(BPF_DW, BPF_XOR, ^, 0x12, 0xab, 0xb9),
+ BPF_ATOMIC_OP_TEST4(BPF_DW, BPF_XOR, ^, 0x12, 0xab, 0xb9),
+ /* BPF_ATOMIC | BPF_DW: BPF_XOR | BPF_FETCH */
+ BPF_ATOMIC_OP_TEST1(BPF_DW, BPF_XOR | BPF_FETCH, ^, 0x12, 0xab, 0xb9),
+ BPF_ATOMIC_OP_TEST2(BPF_DW, BPF_XOR | BPF_FETCH, ^, 0x12, 0xab, 0xb9),
+ BPF_ATOMIC_OP_TEST3(BPF_DW, BPF_XOR | BPF_FETCH, ^, 0x12, 0xab, 0xb9),
+ BPF_ATOMIC_OP_TEST4(BPF_DW, BPF_XOR | BPF_FETCH, ^, 0x12, 0xab, 0xb9),
+ /* BPF_ATOMIC | BPF_W: BPF_XCHG */
+ BPF_ATOMIC_OP_TEST1(BPF_W, BPF_XCHG, xchg, 0x12, 0xab, 0xab),
+ BPF_ATOMIC_OP_TEST2(BPF_W, BPF_XCHG, xchg, 0x12, 0xab, 0xab),
+ BPF_ATOMIC_OP_TEST3(BPF_W, BPF_XCHG, xchg, 0x12, 0xab, 0xab),
+ BPF_ATOMIC_OP_TEST4(BPF_W, BPF_XCHG, xchg, 0x12, 0xab, 0xab),
+ /* BPF_ATOMIC | BPF_DW: BPF_XCHG */
+ BPF_ATOMIC_OP_TEST1(BPF_DW, BPF_XCHG, xchg, 0x12, 0xab, 0xab),
+ BPF_ATOMIC_OP_TEST2(BPF_DW, BPF_XCHG, xchg, 0x12, 0xab, 0xab),
+ BPF_ATOMIC_OP_TEST3(BPF_DW, BPF_XCHG, xchg, 0x12, 0xab, 0xab),
+ BPF_ATOMIC_OP_TEST4(BPF_DW, BPF_XCHG, xchg, 0x12, 0xab, 0xab),
+#undef BPF_ATOMIC_POISON
+#undef BPF_ATOMIC_OP_TEST1
+#undef BPF_ATOMIC_OP_TEST2
+#undef BPF_ATOMIC_OP_TEST3
+#undef BPF_ATOMIC_OP_TEST4
+ /* BPF_ATOMIC | BPF_W, BPF_CMPXCHG */
+ {
+ "BPF_ATOMIC | BPF_W, BPF_CMPXCHG: Test successful return",
+ .u.insns_int = {
+ BPF_ST_MEM(BPF_W, R10, -40, 0x01234567),
+ BPF_ALU32_IMM(BPF_MOV, R0, 0x01234567),
+ BPF_ALU32_IMM(BPF_MOV, R3, 0x89abcdef),
+ BPF_ATOMIC_OP(BPF_W, BPF_CMPXCHG, R10, R3, -40),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x01234567 } },
+ .stack_depth = 40,
+ },
+ {
+ "BPF_ATOMIC | BPF_W, BPF_CMPXCHG: Test successful store",
.u.insns_int = {
- BPF_ALU32_IMM(BPF_MOV, R0, 0x12),
- BPF_ST_MEM(BPF_W, R10, -40, 0x10),
- BPF_STX_XADD(BPF_W, R10, R0, -40),
+ BPF_ST_MEM(BPF_W, R10, -40, 0x01234567),
+ BPF_ALU32_IMM(BPF_MOV, R0, 0x01234567),
+ BPF_ALU32_IMM(BPF_MOV, R3, 0x89abcdef),
+ BPF_ATOMIC_OP(BPF_W, BPF_CMPXCHG, R10, R3, -40),
+ BPF_LDX_MEM(BPF_W, R0, R10, -40),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
- { { 0, 0x12 } },
+ { { 0, 0x89abcdef } },
.stack_depth = 40,
},
{
- "STX_XADD_W: X + 1 + 1 + 1 + ...",
+ "BPF_ATOMIC | BPF_W, BPF_CMPXCHG: Test failure return",
+ .u.insns_int = {
+ BPF_ST_MEM(BPF_W, R10, -40, 0x01234567),
+ BPF_ALU32_IMM(BPF_MOV, R0, 0x76543210),
+ BPF_ALU32_IMM(BPF_MOV, R3, 0x89abcdef),
+ BPF_ATOMIC_OP(BPF_W, BPF_CMPXCHG, R10, R3, -40),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
{ },
+ { { 0, 0x01234567 } },
+ .stack_depth = 40,
+ },
+ {
+ "BPF_ATOMIC | BPF_W, BPF_CMPXCHG: Test failure store",
+ .u.insns_int = {
+ BPF_ST_MEM(BPF_W, R10, -40, 0x01234567),
+ BPF_ALU32_IMM(BPF_MOV, R0, 0x76543210),
+ BPF_ALU32_IMM(BPF_MOV, R3, 0x89abcdef),
+ BPF_ATOMIC_OP(BPF_W, BPF_CMPXCHG, R10, R3, -40),
+ BPF_LDX_MEM(BPF_W, R0, R10, -40),
+ BPF_EXIT_INSN(),
+ },
INTERNAL,
{ },
- { { 0, 4134 } },
- .fill_helper = bpf_fill_stxw,
+ { { 0, 0x01234567 } },
+ .stack_depth = 40,
},
{
- "STX_XADD_DW: Test: 0x12 + 0x10 = 0x22",
+ "BPF_ATOMIC | BPF_W, BPF_CMPXCHG: Test side effects",
.u.insns_int = {
- BPF_ALU32_IMM(BPF_MOV, R0, 0x12),
- BPF_ST_MEM(BPF_DW, R10, -40, 0x10),
- BPF_STX_XADD(BPF_DW, R10, R0, -40),
+ BPF_ST_MEM(BPF_W, R10, -40, 0x01234567),
+ BPF_ALU32_IMM(BPF_MOV, R0, 0x01234567),
+ BPF_ALU32_IMM(BPF_MOV, R3, 0x89abcdef),
+ BPF_ATOMIC_OP(BPF_W, BPF_CMPXCHG, R10, R3, -40),
+ BPF_ATOMIC_OP(BPF_W, BPF_CMPXCHG, R10, R3, -40),
+ BPF_ALU32_REG(BPF_MOV, R0, R3),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x89abcdef } },
+ .stack_depth = 40,
+ },
+ /* BPF_ATOMIC | BPF_DW, BPF_CMPXCHG */
+ {
+ "BPF_ATOMIC | BPF_DW, BPF_CMPXCHG: Test successful return",
+ .u.insns_int = {
+ BPF_LD_IMM64(R1, 0x0123456789abcdefULL),
+ BPF_LD_IMM64(R2, 0xfedcba9876543210ULL),
+ BPF_ALU64_REG(BPF_MOV, R0, R1),
+ BPF_STX_MEM(BPF_DW, R10, R1, -40),
+ BPF_ATOMIC_OP(BPF_DW, BPF_CMPXCHG, R10, R2, -40),
+ BPF_JMP_REG(BPF_JNE, R0, R1, 1),
+ BPF_ALU64_REG(BPF_SUB, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0 } },
+ .stack_depth = 40,
+ },
+ {
+ "BPF_ATOMIC | BPF_DW, BPF_CMPXCHG: Test successful store",
+ .u.insns_int = {
+ BPF_LD_IMM64(R1, 0x0123456789abcdefULL),
+ BPF_LD_IMM64(R2, 0xfedcba9876543210ULL),
+ BPF_ALU64_REG(BPF_MOV, R0, R1),
+ BPF_STX_MEM(BPF_DW, R10, R0, -40),
+ BPF_ATOMIC_OP(BPF_DW, BPF_CMPXCHG, R10, R2, -40),
BPF_LDX_MEM(BPF_DW, R0, R10, -40),
+ BPF_JMP_REG(BPF_JNE, R0, R2, 1),
+ BPF_ALU64_REG(BPF_SUB, R0, R2),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
- { { 0, 0x22 } },
+ { { 0, 0 } },
.stack_depth = 40,
},
{
- "STX_XADD_DW: Test side-effects, r10: 0x12 + 0x10 = 0x22",
+ "BPF_ATOMIC | BPF_DW, BPF_CMPXCHG: Test failure return",
.u.insns_int = {
- BPF_ALU64_REG(BPF_MOV, R1, R10),
- BPF_ALU32_IMM(BPF_MOV, R0, 0x12),
- BPF_ST_MEM(BPF_DW, R10, -40, 0x10),
- BPF_STX_XADD(BPF_DW, R10, R0, -40),
- BPF_ALU64_REG(BPF_MOV, R0, R10),
+ BPF_LD_IMM64(R1, 0x0123456789abcdefULL),
+ BPF_LD_IMM64(R2, 0xfedcba9876543210ULL),
+ BPF_ALU64_REG(BPF_MOV, R0, R1),
+ BPF_ALU64_IMM(BPF_ADD, R0, 1),
+ BPF_STX_MEM(BPF_DW, R10, R1, -40),
+ BPF_ATOMIC_OP(BPF_DW, BPF_CMPXCHG, R10, R2, -40),
+ BPF_JMP_REG(BPF_JNE, R0, R1, 1),
BPF_ALU64_REG(BPF_SUB, R0, R1),
BPF_EXIT_INSN(),
},
@@ -4378,25 +9242,552 @@ static struct bpf_test tests[] = {
.stack_depth = 40,
},
{
- "STX_XADD_DW: Test side-effects, r0: 0x12 + 0x10 = 0x22",
+ "BPF_ATOMIC | BPF_DW, BPF_CMPXCHG: Test failure store",
.u.insns_int = {
- BPF_ALU32_IMM(BPF_MOV, R0, 0x12),
- BPF_ST_MEM(BPF_DW, R10, -40, 0x10),
- BPF_STX_XADD(BPF_DW, R10, R0, -40),
+ BPF_LD_IMM64(R1, 0x0123456789abcdefULL),
+ BPF_LD_IMM64(R2, 0xfedcba9876543210ULL),
+ BPF_ALU64_REG(BPF_MOV, R0, R1),
+ BPF_ALU64_IMM(BPF_ADD, R0, 1),
+ BPF_STX_MEM(BPF_DW, R10, R1, -40),
+ BPF_ATOMIC_OP(BPF_DW, BPF_CMPXCHG, R10, R2, -40),
+ BPF_LDX_MEM(BPF_DW, R0, R10, -40),
+ BPF_JMP_REG(BPF_JNE, R0, R1, 1),
+ BPF_ALU64_REG(BPF_SUB, R0, R1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
- { { 0, 0x12 } },
+ { { 0, 0 } },
.stack_depth = 40,
},
{
- "STX_XADD_DW: X + 1 + 1 + 1 + ...",
+ "BPF_ATOMIC | BPF_DW, BPF_CMPXCHG: Test side effects",
+ .u.insns_int = {
+ BPF_LD_IMM64(R1, 0x0123456789abcdefULL),
+ BPF_LD_IMM64(R2, 0xfedcba9876543210ULL),
+ BPF_ALU64_REG(BPF_MOV, R0, R1),
+ BPF_STX_MEM(BPF_DW, R10, R1, -40),
+ BPF_ATOMIC_OP(BPF_DW, BPF_CMPXCHG, R10, R2, -40),
+ BPF_LD_IMM64(R0, 0xfedcba9876543210ULL),
+ BPF_JMP_REG(BPF_JNE, R0, R2, 1),
+ BPF_ALU64_REG(BPF_SUB, R0, R2),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
{ },
+ { { 0, 0 } },
+ .stack_depth = 40,
+ },
+ /* BPF_JMP32 | BPF_JEQ | BPF_K */
+ {
+ "JMP32_JEQ_K: Small immediate",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 123),
+ BPF_JMP32_IMM(BPF_JEQ, R0, 321, 1),
+ BPF_JMP32_IMM(BPF_JEQ, R0, 123, 1),
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
INTERNAL,
{ },
- { { 0, 4134 } },
- .fill_helper = bpf_fill_stxdw,
+ { { 0, 123 } }
+ },
+ {
+ "JMP32_JEQ_K: Large immediate",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 12345678),
+ BPF_JMP32_IMM(BPF_JEQ, R0, 12345678 & 0xffff, 1),
+ BPF_JMP32_IMM(BPF_JEQ, R0, 12345678, 1),
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 12345678 } }
+ },
+ {
+ "JMP32_JEQ_K: negative immediate",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, -123),
+ BPF_JMP32_IMM(BPF_JEQ, R0, 123, 1),
+ BPF_JMP32_IMM(BPF_JEQ, R0, -123, 1),
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, -123 } }
+ },
+ /* BPF_JMP32 | BPF_JEQ | BPF_X */
+ {
+ "JMP32_JEQ_X",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 1234),
+ BPF_ALU32_IMM(BPF_MOV, R1, 4321),
+ BPF_JMP32_REG(BPF_JEQ, R0, R1, 2),
+ BPF_ALU32_IMM(BPF_MOV, R1, 1234),
+ BPF_JMP32_REG(BPF_JEQ, R0, R1, 1),
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1234 } }
+ },
+ /* BPF_JMP32 | BPF_JNE | BPF_K */
+ {
+ "JMP32_JNE_K: Small immediate",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 123),
+ BPF_JMP32_IMM(BPF_JNE, R0, 123, 1),
+ BPF_JMP32_IMM(BPF_JNE, R0, 321, 1),
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 123 } }
+ },
+ {
+ "JMP32_JNE_K: Large immediate",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 12345678),
+ BPF_JMP32_IMM(BPF_JNE, R0, 12345678, 1),
+ BPF_JMP32_IMM(BPF_JNE, R0, 12345678 & 0xffff, 1),
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 12345678 } }
+ },
+ {
+ "JMP32_JNE_K: negative immediate",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, -123),
+ BPF_JMP32_IMM(BPF_JNE, R0, -123, 1),
+ BPF_JMP32_IMM(BPF_JNE, R0, 123, 1),
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, -123 } }
+ },
+ /* BPF_JMP32 | BPF_JNE | BPF_X */
+ {
+ "JMP32_JNE_X",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 1234),
+ BPF_ALU32_IMM(BPF_MOV, R1, 1234),
+ BPF_JMP32_REG(BPF_JNE, R0, R1, 2),
+ BPF_ALU32_IMM(BPF_MOV, R1, 4321),
+ BPF_JMP32_REG(BPF_JNE, R0, R1, 1),
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1234 } }
+ },
+ /* BPF_JMP32 | BPF_JSET | BPF_K */
+ {
+ "JMP32_JSET_K: Small immediate",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_JMP32_IMM(BPF_JSET, R0, 2, 1),
+ BPF_JMP32_IMM(BPF_JSET, R0, 3, 1),
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } }
+ },
+ {
+ "JMP32_JSET_K: Large immediate",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0x40000000),
+ BPF_JMP32_IMM(BPF_JSET, R0, 0x3fffffff, 1),
+ BPF_JMP32_IMM(BPF_JSET, R0, 0x60000000, 1),
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x40000000 } }
+ },
+ {
+ "JMP32_JSET_K: negative immediate",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, -123),
+ BPF_JMP32_IMM(BPF_JSET, R0, -1, 1),
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, -123 } }
+ },
+ /* BPF_JMP32 | BPF_JSET | BPF_X */
+ {
+ "JMP32_JSET_X",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 8),
+ BPF_ALU32_IMM(BPF_MOV, R1, 7),
+ BPF_JMP32_REG(BPF_JSET, R0, R1, 2),
+ BPF_ALU32_IMM(BPF_MOV, R1, 8 | 2),
+ BPF_JMP32_REG(BPF_JNE, R0, R1, 1),
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 8 } }
+ },
+ /* BPF_JMP32 | BPF_JGT | BPF_K */
+ {
+ "JMP32_JGT_K: Small immediate",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 123),
+ BPF_JMP32_IMM(BPF_JGT, R0, 123, 1),
+ BPF_JMP32_IMM(BPF_JGT, R0, 122, 1),
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 123 } }
+ },
+ {
+ "JMP32_JGT_K: Large immediate",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0xfffffffe),
+ BPF_JMP32_IMM(BPF_JGT, R0, 0xffffffff, 1),
+ BPF_JMP32_IMM(BPF_JGT, R0, 0xfffffffd, 1),
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0xfffffffe } }
+ },
+ /* BPF_JMP32 | BPF_JGT | BPF_X */
+ {
+ "JMP32_JGT_X",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0xfffffffe),
+ BPF_ALU32_IMM(BPF_MOV, R1, 0xffffffff),
+ BPF_JMP32_REG(BPF_JGT, R0, R1, 2),
+ BPF_ALU32_IMM(BPF_MOV, R1, 0xfffffffd),
+ BPF_JMP32_REG(BPF_JGT, R0, R1, 1),
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0xfffffffe } }
+ },
+ /* BPF_JMP32 | BPF_JGE | BPF_K */
+ {
+ "JMP32_JGE_K: Small immediate",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 123),
+ BPF_JMP32_IMM(BPF_JGE, R0, 124, 1),
+ BPF_JMP32_IMM(BPF_JGE, R0, 123, 1),
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 123 } }
+ },
+ {
+ "JMP32_JGE_K: Large immediate",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0xfffffffe),
+ BPF_JMP32_IMM(BPF_JGE, R0, 0xffffffff, 1),
+ BPF_JMP32_IMM(BPF_JGE, R0, 0xfffffffe, 1),
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0xfffffffe } }
+ },
+ /* BPF_JMP32 | BPF_JGE | BPF_X */
+ {
+ "JMP32_JGE_X",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0xfffffffe),
+ BPF_ALU32_IMM(BPF_MOV, R1, 0xffffffff),
+ BPF_JMP32_REG(BPF_JGE, R0, R1, 2),
+ BPF_ALU32_IMM(BPF_MOV, R1, 0xfffffffe),
+ BPF_JMP32_REG(BPF_JGE, R0, R1, 1),
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0xfffffffe } }
+ },
+ /* BPF_JMP32 | BPF_JLT | BPF_K */
+ {
+ "JMP32_JLT_K: Small immediate",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 123),
+ BPF_JMP32_IMM(BPF_JLT, R0, 123, 1),
+ BPF_JMP32_IMM(BPF_JLT, R0, 124, 1),
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 123 } }
+ },
+ {
+ "JMP32_JLT_K: Large immediate",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0xfffffffe),
+ BPF_JMP32_IMM(BPF_JLT, R0, 0xfffffffd, 1),
+ BPF_JMP32_IMM(BPF_JLT, R0, 0xffffffff, 1),
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0xfffffffe } }
+ },
+ /* BPF_JMP32 | BPF_JLT | BPF_X */
+ {
+ "JMP32_JLT_X",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0xfffffffe),
+ BPF_ALU32_IMM(BPF_MOV, R1, 0xfffffffd),
+ BPF_JMP32_REG(BPF_JLT, R0, R1, 2),
+ BPF_ALU32_IMM(BPF_MOV, R1, 0xffffffff),
+ BPF_JMP32_REG(BPF_JLT, R0, R1, 1),
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0xfffffffe } }
+ },
+ /* BPF_JMP32 | BPF_JLE | BPF_K */
+ {
+ "JMP32_JLE_K: Small immediate",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 123),
+ BPF_JMP32_IMM(BPF_JLE, R0, 122, 1),
+ BPF_JMP32_IMM(BPF_JLE, R0, 123, 1),
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 123 } }
+ },
+ {
+ "JMP32_JLE_K: Large immediate",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0xfffffffe),
+ BPF_JMP32_IMM(BPF_JLE, R0, 0xfffffffd, 1),
+ BPF_JMP32_IMM(BPF_JLE, R0, 0xfffffffe, 1),
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0xfffffffe } }
+ },
+ /* BPF_JMP32 | BPF_JLE | BPF_X */
+ {
+ "JMP32_JLE_X",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0xfffffffe),
+ BPF_ALU32_IMM(BPF_MOV, R1, 0xfffffffd),
+ BPF_JMP32_REG(BPF_JLE, R0, R1, 2),
+ BPF_ALU32_IMM(BPF_MOV, R1, 0xfffffffe),
+ BPF_JMP32_REG(BPF_JLE, R0, R1, 1),
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0xfffffffe } }
+ },
+ /* BPF_JMP32 | BPF_JSGT | BPF_K */
+ {
+ "JMP32_JSGT_K: Small immediate",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, -123),
+ BPF_JMP32_IMM(BPF_JSGT, R0, -123, 1),
+ BPF_JMP32_IMM(BPF_JSGT, R0, -124, 1),
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, -123 } }
+ },
+ {
+ "JMP32_JSGT_K: Large immediate",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, -12345678),
+ BPF_JMP32_IMM(BPF_JSGT, R0, -12345678, 1),
+ BPF_JMP32_IMM(BPF_JSGT, R0, -12345679, 1),
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, -12345678 } }
+ },
+ /* BPF_JMP32 | BPF_JSGT | BPF_X */
+ {
+ "JMP32_JSGT_X",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, -12345678),
+ BPF_ALU32_IMM(BPF_MOV, R1, -12345678),
+ BPF_JMP32_REG(BPF_JSGT, R0, R1, 2),
+ BPF_ALU32_IMM(BPF_MOV, R1, -12345679),
+ BPF_JMP32_REG(BPF_JSGT, R0, R1, 1),
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, -12345678 } }
+ },
+ /* BPF_JMP32 | BPF_JSGE | BPF_K */
+ {
+ "JMP32_JSGE_K: Small immediate",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, -123),
+ BPF_JMP32_IMM(BPF_JSGE, R0, -122, 1),
+ BPF_JMP32_IMM(BPF_JSGE, R0, -123, 1),
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, -123 } }
+ },
+ {
+ "JMP32_JSGE_K: Large immediate",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, -12345678),
+ BPF_JMP32_IMM(BPF_JSGE, R0, -12345677, 1),
+ BPF_JMP32_IMM(BPF_JSGE, R0, -12345678, 1),
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, -12345678 } }
+ },
+ /* BPF_JMP32 | BPF_JSGE | BPF_X */
+ {
+ "JMP32_JSGE_X",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, -12345678),
+ BPF_ALU32_IMM(BPF_MOV, R1, -12345677),
+ BPF_JMP32_REG(BPF_JSGE, R0, R1, 2),
+ BPF_ALU32_IMM(BPF_MOV, R1, -12345678),
+ BPF_JMP32_REG(BPF_JSGE, R0, R1, 1),
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, -12345678 } }
+ },
+ /* BPF_JMP32 | BPF_JSLT | BPF_K */
+ {
+ "JMP32_JSLT_K: Small immediate",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, -123),
+ BPF_JMP32_IMM(BPF_JSLT, R0, -123, 1),
+ BPF_JMP32_IMM(BPF_JSLT, R0, -122, 1),
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, -123 } }
+ },
+ {
+ "JMP32_JSLT_K: Large immediate",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, -12345678),
+ BPF_JMP32_IMM(BPF_JSLT, R0, -12345678, 1),
+ BPF_JMP32_IMM(BPF_JSLT, R0, -12345677, 1),
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, -12345678 } }
+ },
+ /* BPF_JMP32 | BPF_JSLT | BPF_X */
+ {
+ "JMP32_JSLT_X",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, -12345678),
+ BPF_ALU32_IMM(BPF_MOV, R1, -12345678),
+ BPF_JMP32_REG(BPF_JSLT, R0, R1, 2),
+ BPF_ALU32_IMM(BPF_MOV, R1, -12345677),
+ BPF_JMP32_REG(BPF_JSLT, R0, R1, 1),
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, -12345678 } }
+ },
+ /* BPF_JMP32 | BPF_JSLE | BPF_K */
+ {
+ "JMP32_JSLE_K: Small immediate",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, -123),
+ BPF_JMP32_IMM(BPF_JSLE, R0, -124, 1),
+ BPF_JMP32_IMM(BPF_JSLE, R0, -123, 1),
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, -123 } }
+ },
+ {
+ "JMP32_JSLE_K: Large immediate",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, -12345678),
+ BPF_JMP32_IMM(BPF_JSLE, R0, -12345679, 1),
+ BPF_JMP32_IMM(BPF_JSLE, R0, -12345678, 1),
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, -12345678 } }
+ },
+ /* BPF_JMP32 | BPF_JSLE | BPF_K */
+ {
+ "JMP32_JSLE_X",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, -12345678),
+ BPF_ALU32_IMM(BPF_MOV, R1, -12345679),
+ BPF_JMP32_REG(BPF_JSLE, R0, R1, 2),
+ BPF_ALU32_IMM(BPF_MOV, R1, -12345678),
+ BPF_JMP32_REG(BPF_JSLE, R0, R1, 1),
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, -12345678 } }
},
/* BPF_JMP | BPF_EXIT */
{
@@ -4424,6 +9815,20 @@ static struct bpf_test tests[] = {
{ },
{ { 0, 1 } },
},
+ /* BPF_JMP32 | BPF_JA */
+ {
+ "JMP32_JA: Unconditional jump: if (true) return 1",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_JMP32_IMM(BPF_JA, 0, 1, 0),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
/* BPF_JMP | BPF_JSLT | BPF_K */
{
"JMP_JSLT_K: Signed jump: if (-2 < -1) return 1",
@@ -5275,31 +10680,21 @@ static struct bpf_test tests[] = {
{ /* Mainly checking JIT here. */
"BPF_MAXINSNS: Ctx heavy transformations",
{ },
-#if defined(CONFIG_BPF_JIT_ALWAYS_ON) && defined(CONFIG_S390)
- CLASSIC | FLAG_EXPECTED_FAIL,
-#else
CLASSIC,
-#endif
{ },
{
{ 1, SKB_VLAN_PRESENT },
{ 10, SKB_VLAN_PRESENT }
},
.fill_helper = bpf_fill_maxinsns6,
- .expected_errcode = -ENOTSUPP,
},
{ /* Mainly checking JIT here. */
"BPF_MAXINSNS: Call heavy transformations",
{ },
-#if defined(CONFIG_BPF_JIT_ALWAYS_ON) && defined(CONFIG_S390)
- CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
-#else
CLASSIC | FLAG_NO_DATA,
-#endif
{ },
{ { 1, 0 }, { 10, 0 } },
.fill_helper = bpf_fill_maxinsns7,
- .expected_errcode = -ENOTSUPP,
},
{ /* Mainly checking JIT here. */
"BPF_MAXINSNS: Jump heavy test",
@@ -5328,15 +10723,10 @@ static struct bpf_test tests[] = {
{
"BPF_MAXINSNS: Jump, gap, jump, ...",
{ },
-#if defined(CONFIG_BPF_JIT_ALWAYS_ON) && defined(CONFIG_X86)
- CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
-#else
CLASSIC | FLAG_NO_DATA,
-#endif
{ },
{ { 0, 0xababcbac } },
.fill_helper = bpf_fill_maxinsns11,
- .expected_errcode = -ENOTSUPP,
},
{
"BPF_MAXINSNS: jump over MSH",
@@ -5350,28 +10740,18 @@ static struct bpf_test tests[] = {
{
"BPF_MAXINSNS: exec all MSH",
{ },
-#if defined(CONFIG_BPF_JIT_ALWAYS_ON) && defined(CONFIG_S390)
- CLASSIC | FLAG_EXPECTED_FAIL,
-#else
CLASSIC,
-#endif
{ 0xfa, 0xfb, 0xfc, 0xfd, },
{ { 4, 0xababab83 } },
.fill_helper = bpf_fill_maxinsns13,
- .expected_errcode = -ENOTSUPP,
},
{
"BPF_MAXINSNS: ld_abs+get_processor_id",
{ },
-#if defined(CONFIG_BPF_JIT_ALWAYS_ON) && defined(CONFIG_S390)
- CLASSIC | FLAG_EXPECTED_FAIL,
-#else
CLASSIC,
-#endif
{ },
{ { 1, 0xbee } },
.fill_helper = bpf_fill_ld_abs_get_processor_id,
- .expected_errcode = -ENOTSUPP,
},
/*
* LD_IND / LD_ABS on fragmented SKBs
@@ -6461,6 +11841,2839 @@ static struct bpf_test tests[] = {
{},
{ { 0, 2 } },
},
+ /* BPF_LDX_MEM with operand aliasing */
+ {
+ "LDX_MEM_B: operand register aliasing",
+ .u.insns_int = {
+ BPF_ST_MEM(BPF_B, R10, -8, 123),
+ BPF_MOV64_REG(R0, R10),
+ BPF_LDX_MEM(BPF_B, R0, R0, -8),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 123 } },
+ .stack_depth = 8,
+ },
+ {
+ "LDX_MEM_H: operand register aliasing",
+ .u.insns_int = {
+ BPF_ST_MEM(BPF_H, R10, -8, 12345),
+ BPF_MOV64_REG(R0, R10),
+ BPF_LDX_MEM(BPF_H, R0, R0, -8),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 12345 } },
+ .stack_depth = 8,
+ },
+ {
+ "LDX_MEM_W: operand register aliasing",
+ .u.insns_int = {
+ BPF_ST_MEM(BPF_W, R10, -8, 123456789),
+ BPF_MOV64_REG(R0, R10),
+ BPF_LDX_MEM(BPF_W, R0, R0, -8),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 123456789 } },
+ .stack_depth = 8,
+ },
+ {
+ "LDX_MEM_DW: operand register aliasing",
+ .u.insns_int = {
+ BPF_LD_IMM64(R1, 0x123456789abcdefULL),
+ BPF_STX_MEM(BPF_DW, R10, R1, -8),
+ BPF_MOV64_REG(R0, R10),
+ BPF_LDX_MEM(BPF_DW, R0, R0, -8),
+ BPF_ALU64_REG(BPF_SUB, R0, R1),
+ BPF_MOV64_REG(R1, R0),
+ BPF_ALU64_IMM(BPF_RSH, R1, 32),
+ BPF_ALU64_REG(BPF_OR, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0 } },
+ .stack_depth = 8,
+ },
+ /*
+ * Register (non-)clobbering tests for the case where a JIT implements
+ * complex ALU or ATOMIC operations via function calls. If so, the
+ * function call must be transparent to the eBPF registers. The JIT
+ * must therefore save and restore relevant registers across the call.
+ * The following tests check that the eBPF registers retain their
+ * values after such an operation. Mainly intended for complex ALU
+ * and atomic operation, but we run it for all. You never know...
+ *
+ * Note that each operations should be tested twice with different
+ * destinations, to check preservation for all registers.
+ */
+#define BPF_TEST_CLOBBER_ALU(alu, op, dst, src) \
+ { \
+ #alu "_" #op " to " #dst ": no clobbering", \
+ .u.insns_int = { \
+ BPF_ALU64_IMM(BPF_MOV, R0, R0), \
+ BPF_ALU64_IMM(BPF_MOV, R1, R1), \
+ BPF_ALU64_IMM(BPF_MOV, R2, R2), \
+ BPF_ALU64_IMM(BPF_MOV, R3, R3), \
+ BPF_ALU64_IMM(BPF_MOV, R4, R4), \
+ BPF_ALU64_IMM(BPF_MOV, R5, R5), \
+ BPF_ALU64_IMM(BPF_MOV, R6, R6), \
+ BPF_ALU64_IMM(BPF_MOV, R7, R7), \
+ BPF_ALU64_IMM(BPF_MOV, R8, R8), \
+ BPF_ALU64_IMM(BPF_MOV, R9, R9), \
+ BPF_##alu(BPF_ ##op, dst, src), \
+ BPF_ALU32_IMM(BPF_MOV, dst, dst), \
+ BPF_JMP_IMM(BPF_JNE, R0, R0, 10), \
+ BPF_JMP_IMM(BPF_JNE, R1, R1, 9), \
+ BPF_JMP_IMM(BPF_JNE, R2, R2, 8), \
+ BPF_JMP_IMM(BPF_JNE, R3, R3, 7), \
+ BPF_JMP_IMM(BPF_JNE, R4, R4, 6), \
+ BPF_JMP_IMM(BPF_JNE, R5, R5, 5), \
+ BPF_JMP_IMM(BPF_JNE, R6, R6, 4), \
+ BPF_JMP_IMM(BPF_JNE, R7, R7, 3), \
+ BPF_JMP_IMM(BPF_JNE, R8, R8, 2), \
+ BPF_JMP_IMM(BPF_JNE, R9, R9, 1), \
+ BPF_ALU64_IMM(BPF_MOV, R0, 1), \
+ BPF_EXIT_INSN(), \
+ }, \
+ INTERNAL, \
+ { }, \
+ { { 0, 1 } } \
+ }
+ /* ALU64 operations, register clobbering */
+ BPF_TEST_CLOBBER_ALU(ALU64_IMM, AND, R8, 123456789),
+ BPF_TEST_CLOBBER_ALU(ALU64_IMM, AND, R9, 123456789),
+ BPF_TEST_CLOBBER_ALU(ALU64_IMM, OR, R8, 123456789),
+ BPF_TEST_CLOBBER_ALU(ALU64_IMM, OR, R9, 123456789),
+ BPF_TEST_CLOBBER_ALU(ALU64_IMM, XOR, R8, 123456789),
+ BPF_TEST_CLOBBER_ALU(ALU64_IMM, XOR, R9, 123456789),
+ BPF_TEST_CLOBBER_ALU(ALU64_IMM, LSH, R8, 12),
+ BPF_TEST_CLOBBER_ALU(ALU64_IMM, LSH, R9, 12),
+ BPF_TEST_CLOBBER_ALU(ALU64_IMM, RSH, R8, 12),
+ BPF_TEST_CLOBBER_ALU(ALU64_IMM, RSH, R9, 12),
+ BPF_TEST_CLOBBER_ALU(ALU64_IMM, ARSH, R8, 12),
+ BPF_TEST_CLOBBER_ALU(ALU64_IMM, ARSH, R9, 12),
+ BPF_TEST_CLOBBER_ALU(ALU64_IMM, ADD, R8, 123456789),
+ BPF_TEST_CLOBBER_ALU(ALU64_IMM, ADD, R9, 123456789),
+ BPF_TEST_CLOBBER_ALU(ALU64_IMM, SUB, R8, 123456789),
+ BPF_TEST_CLOBBER_ALU(ALU64_IMM, SUB, R9, 123456789),
+ BPF_TEST_CLOBBER_ALU(ALU64_IMM, MUL, R8, 123456789),
+ BPF_TEST_CLOBBER_ALU(ALU64_IMM, MUL, R9, 123456789),
+ BPF_TEST_CLOBBER_ALU(ALU64_IMM, DIV, R8, 123456789),
+ BPF_TEST_CLOBBER_ALU(ALU64_IMM, DIV, R9, 123456789),
+ BPF_TEST_CLOBBER_ALU(ALU64_IMM, MOD, R8, 123456789),
+ BPF_TEST_CLOBBER_ALU(ALU64_IMM, MOD, R9, 123456789),
+ /* ALU32 immediate operations, register clobbering */
+ BPF_TEST_CLOBBER_ALU(ALU32_IMM, AND, R8, 123456789),
+ BPF_TEST_CLOBBER_ALU(ALU32_IMM, AND, R9, 123456789),
+ BPF_TEST_CLOBBER_ALU(ALU32_IMM, OR, R8, 123456789),
+ BPF_TEST_CLOBBER_ALU(ALU32_IMM, OR, R9, 123456789),
+ BPF_TEST_CLOBBER_ALU(ALU32_IMM, XOR, R8, 123456789),
+ BPF_TEST_CLOBBER_ALU(ALU32_IMM, XOR, R9, 123456789),
+ BPF_TEST_CLOBBER_ALU(ALU32_IMM, LSH, R8, 12),
+ BPF_TEST_CLOBBER_ALU(ALU32_IMM, LSH, R9, 12),
+ BPF_TEST_CLOBBER_ALU(ALU32_IMM, RSH, R8, 12),
+ BPF_TEST_CLOBBER_ALU(ALU32_IMM, RSH, R9, 12),
+ BPF_TEST_CLOBBER_ALU(ALU32_IMM, ARSH, R8, 12),
+ BPF_TEST_CLOBBER_ALU(ALU32_IMM, ARSH, R9, 12),
+ BPF_TEST_CLOBBER_ALU(ALU32_IMM, ADD, R8, 123456789),
+ BPF_TEST_CLOBBER_ALU(ALU32_IMM, ADD, R9, 123456789),
+ BPF_TEST_CLOBBER_ALU(ALU32_IMM, SUB, R8, 123456789),
+ BPF_TEST_CLOBBER_ALU(ALU32_IMM, SUB, R9, 123456789),
+ BPF_TEST_CLOBBER_ALU(ALU32_IMM, MUL, R8, 123456789),
+ BPF_TEST_CLOBBER_ALU(ALU32_IMM, MUL, R9, 123456789),
+ BPF_TEST_CLOBBER_ALU(ALU32_IMM, DIV, R8, 123456789),
+ BPF_TEST_CLOBBER_ALU(ALU32_IMM, DIV, R9, 123456789),
+ BPF_TEST_CLOBBER_ALU(ALU32_IMM, MOD, R8, 123456789),
+ BPF_TEST_CLOBBER_ALU(ALU32_IMM, MOD, R9, 123456789),
+ /* ALU64 register operations, register clobbering */
+ BPF_TEST_CLOBBER_ALU(ALU64_REG, AND, R8, R1),
+ BPF_TEST_CLOBBER_ALU(ALU64_REG, AND, R9, R1),
+ BPF_TEST_CLOBBER_ALU(ALU64_REG, OR, R8, R1),
+ BPF_TEST_CLOBBER_ALU(ALU64_REG, OR, R9, R1),
+ BPF_TEST_CLOBBER_ALU(ALU64_REG, XOR, R8, R1),
+ BPF_TEST_CLOBBER_ALU(ALU64_REG, XOR, R9, R1),
+ BPF_TEST_CLOBBER_ALU(ALU64_REG, LSH, R8, R1),
+ BPF_TEST_CLOBBER_ALU(ALU64_REG, LSH, R9, R1),
+ BPF_TEST_CLOBBER_ALU(ALU64_REG, RSH, R8, R1),
+ BPF_TEST_CLOBBER_ALU(ALU64_REG, RSH, R9, R1),
+ BPF_TEST_CLOBBER_ALU(ALU64_REG, ARSH, R8, R1),
+ BPF_TEST_CLOBBER_ALU(ALU64_REG, ARSH, R9, R1),
+ BPF_TEST_CLOBBER_ALU(ALU64_REG, ADD, R8, R1),
+ BPF_TEST_CLOBBER_ALU(ALU64_REG, ADD, R9, R1),
+ BPF_TEST_CLOBBER_ALU(ALU64_REG, SUB, R8, R1),
+ BPF_TEST_CLOBBER_ALU(ALU64_REG, SUB, R9, R1),
+ BPF_TEST_CLOBBER_ALU(ALU64_REG, MUL, R8, R1),
+ BPF_TEST_CLOBBER_ALU(ALU64_REG, MUL, R9, R1),
+ BPF_TEST_CLOBBER_ALU(ALU64_REG, DIV, R8, R1),
+ BPF_TEST_CLOBBER_ALU(ALU64_REG, DIV, R9, R1),
+ BPF_TEST_CLOBBER_ALU(ALU64_REG, MOD, R8, R1),
+ BPF_TEST_CLOBBER_ALU(ALU64_REG, MOD, R9, R1),
+ /* ALU32 register operations, register clobbering */
+ BPF_TEST_CLOBBER_ALU(ALU32_REG, AND, R8, R1),
+ BPF_TEST_CLOBBER_ALU(ALU32_REG, AND, R9, R1),
+ BPF_TEST_CLOBBER_ALU(ALU32_REG, OR, R8, R1),
+ BPF_TEST_CLOBBER_ALU(ALU32_REG, OR, R9, R1),
+ BPF_TEST_CLOBBER_ALU(ALU32_REG, XOR, R8, R1),
+ BPF_TEST_CLOBBER_ALU(ALU32_REG, XOR, R9, R1),
+ BPF_TEST_CLOBBER_ALU(ALU32_REG, LSH, R8, R1),
+ BPF_TEST_CLOBBER_ALU(ALU32_REG, LSH, R9, R1),
+ BPF_TEST_CLOBBER_ALU(ALU32_REG, RSH, R8, R1),
+ BPF_TEST_CLOBBER_ALU(ALU32_REG, RSH, R9, R1),
+ BPF_TEST_CLOBBER_ALU(ALU32_REG, ARSH, R8, R1),
+ BPF_TEST_CLOBBER_ALU(ALU32_REG, ARSH, R9, R1),
+ BPF_TEST_CLOBBER_ALU(ALU32_REG, ADD, R8, R1),
+ BPF_TEST_CLOBBER_ALU(ALU32_REG, ADD, R9, R1),
+ BPF_TEST_CLOBBER_ALU(ALU32_REG, SUB, R8, R1),
+ BPF_TEST_CLOBBER_ALU(ALU32_REG, SUB, R9, R1),
+ BPF_TEST_CLOBBER_ALU(ALU32_REG, MUL, R8, R1),
+ BPF_TEST_CLOBBER_ALU(ALU32_REG, MUL, R9, R1),
+ BPF_TEST_CLOBBER_ALU(ALU32_REG, DIV, R8, R1),
+ BPF_TEST_CLOBBER_ALU(ALU32_REG, DIV, R9, R1),
+ BPF_TEST_CLOBBER_ALU(ALU32_REG, MOD, R8, R1),
+ BPF_TEST_CLOBBER_ALU(ALU32_REG, MOD, R9, R1),
+#undef BPF_TEST_CLOBBER_ALU
+#define BPF_TEST_CLOBBER_ATOMIC(width, op) \
+ { \
+ "Atomic_" #width " " #op ": no clobbering", \
+ .u.insns_int = { \
+ BPF_ALU64_IMM(BPF_MOV, R0, 0), \
+ BPF_ALU64_IMM(BPF_MOV, R1, 1), \
+ BPF_ALU64_IMM(BPF_MOV, R2, 2), \
+ BPF_ALU64_IMM(BPF_MOV, R3, 3), \
+ BPF_ALU64_IMM(BPF_MOV, R4, 4), \
+ BPF_ALU64_IMM(BPF_MOV, R5, 5), \
+ BPF_ALU64_IMM(BPF_MOV, R6, 6), \
+ BPF_ALU64_IMM(BPF_MOV, R7, 7), \
+ BPF_ALU64_IMM(BPF_MOV, R8, 8), \
+ BPF_ALU64_IMM(BPF_MOV, R9, 9), \
+ BPF_ST_MEM(width, R10, -8, \
+ (op) == BPF_CMPXCHG ? 0 : \
+ (op) & BPF_FETCH ? 1 : 0), \
+ BPF_ATOMIC_OP(width, op, R10, R1, -8), \
+ BPF_JMP_IMM(BPF_JNE, R0, 0, 10), \
+ BPF_JMP_IMM(BPF_JNE, R1, 1, 9), \
+ BPF_JMP_IMM(BPF_JNE, R2, 2, 8), \
+ BPF_JMP_IMM(BPF_JNE, R3, 3, 7), \
+ BPF_JMP_IMM(BPF_JNE, R4, 4, 6), \
+ BPF_JMP_IMM(BPF_JNE, R5, 5, 5), \
+ BPF_JMP_IMM(BPF_JNE, R6, 6, 4), \
+ BPF_JMP_IMM(BPF_JNE, R7, 7, 3), \
+ BPF_JMP_IMM(BPF_JNE, R8, 8, 2), \
+ BPF_JMP_IMM(BPF_JNE, R9, 9, 1), \
+ BPF_ALU64_IMM(BPF_MOV, R0, 1), \
+ BPF_EXIT_INSN(), \
+ }, \
+ INTERNAL, \
+ { }, \
+ { { 0, 1 } }, \
+ .stack_depth = 8, \
+ }
+ /* 64-bit atomic operations, register clobbering */
+ BPF_TEST_CLOBBER_ATOMIC(BPF_DW, BPF_ADD),
+ BPF_TEST_CLOBBER_ATOMIC(BPF_DW, BPF_AND),
+ BPF_TEST_CLOBBER_ATOMIC(BPF_DW, BPF_OR),
+ BPF_TEST_CLOBBER_ATOMIC(BPF_DW, BPF_XOR),
+ BPF_TEST_CLOBBER_ATOMIC(BPF_DW, BPF_ADD | BPF_FETCH),
+ BPF_TEST_CLOBBER_ATOMIC(BPF_DW, BPF_AND | BPF_FETCH),
+ BPF_TEST_CLOBBER_ATOMIC(BPF_DW, BPF_OR | BPF_FETCH),
+ BPF_TEST_CLOBBER_ATOMIC(BPF_DW, BPF_XOR | BPF_FETCH),
+ BPF_TEST_CLOBBER_ATOMIC(BPF_DW, BPF_XCHG),
+ BPF_TEST_CLOBBER_ATOMIC(BPF_DW, BPF_CMPXCHG),
+ /* 32-bit atomic operations, register clobbering */
+ BPF_TEST_CLOBBER_ATOMIC(BPF_W, BPF_ADD),
+ BPF_TEST_CLOBBER_ATOMIC(BPF_W, BPF_AND),
+ BPF_TEST_CLOBBER_ATOMIC(BPF_W, BPF_OR),
+ BPF_TEST_CLOBBER_ATOMIC(BPF_W, BPF_XOR),
+ BPF_TEST_CLOBBER_ATOMIC(BPF_W, BPF_ADD | BPF_FETCH),
+ BPF_TEST_CLOBBER_ATOMIC(BPF_W, BPF_AND | BPF_FETCH),
+ BPF_TEST_CLOBBER_ATOMIC(BPF_W, BPF_OR | BPF_FETCH),
+ BPF_TEST_CLOBBER_ATOMIC(BPF_W, BPF_XOR | BPF_FETCH),
+ BPF_TEST_CLOBBER_ATOMIC(BPF_W, BPF_XCHG),
+ BPF_TEST_CLOBBER_ATOMIC(BPF_W, BPF_CMPXCHG),
+#undef BPF_TEST_CLOBBER_ATOMIC
+ /* Checking that ALU32 src is not zero extended in place */
+#define BPF_ALU32_SRC_ZEXT(op) \
+ { \
+ "ALU32_" #op "_X: src preserved in zext", \
+ .u.insns_int = { \
+ BPF_LD_IMM64(R1, 0x0123456789acbdefULL),\
+ BPF_LD_IMM64(R2, 0xfedcba9876543210ULL),\
+ BPF_ALU64_REG(BPF_MOV, R0, R1), \
+ BPF_ALU32_REG(BPF_##op, R2, R1), \
+ BPF_ALU64_REG(BPF_SUB, R0, R1), \
+ BPF_ALU64_REG(BPF_MOV, R1, R0), \
+ BPF_ALU64_IMM(BPF_RSH, R1, 32), \
+ BPF_ALU64_REG(BPF_OR, R0, R1), \
+ BPF_EXIT_INSN(), \
+ }, \
+ INTERNAL, \
+ { }, \
+ { { 0, 0 } }, \
+ }
+ BPF_ALU32_SRC_ZEXT(MOV),
+ BPF_ALU32_SRC_ZEXT(AND),
+ BPF_ALU32_SRC_ZEXT(OR),
+ BPF_ALU32_SRC_ZEXT(XOR),
+ BPF_ALU32_SRC_ZEXT(ADD),
+ BPF_ALU32_SRC_ZEXT(SUB),
+ BPF_ALU32_SRC_ZEXT(MUL),
+ BPF_ALU32_SRC_ZEXT(DIV),
+ BPF_ALU32_SRC_ZEXT(MOD),
+#undef BPF_ALU32_SRC_ZEXT
+ /* Checking that ATOMIC32 src is not zero extended in place */
+#define BPF_ATOMIC32_SRC_ZEXT(op) \
+ { \
+ "ATOMIC_W_" #op ": src preserved in zext", \
+ .u.insns_int = { \
+ BPF_LD_IMM64(R0, 0x0123456789acbdefULL), \
+ BPF_ALU64_REG(BPF_MOV, R1, R0), \
+ BPF_ST_MEM(BPF_W, R10, -4, 0), \
+ BPF_ATOMIC_OP(BPF_W, BPF_##op, R10, R1, -4), \
+ BPF_ALU64_REG(BPF_SUB, R0, R1), \
+ BPF_ALU64_REG(BPF_MOV, R1, R0), \
+ BPF_ALU64_IMM(BPF_RSH, R1, 32), \
+ BPF_ALU64_REG(BPF_OR, R0, R1), \
+ BPF_EXIT_INSN(), \
+ }, \
+ INTERNAL, \
+ { }, \
+ { { 0, 0 } }, \
+ .stack_depth = 8, \
+ }
+ BPF_ATOMIC32_SRC_ZEXT(ADD),
+ BPF_ATOMIC32_SRC_ZEXT(AND),
+ BPF_ATOMIC32_SRC_ZEXT(OR),
+ BPF_ATOMIC32_SRC_ZEXT(XOR),
+#undef BPF_ATOMIC32_SRC_ZEXT
+ /* Checking that CMPXCHG32 src is not zero extended in place */
+ {
+ "ATOMIC_W_CMPXCHG: src preserved in zext",
+ .u.insns_int = {
+ BPF_LD_IMM64(R1, 0x0123456789acbdefULL),
+ BPF_ALU64_REG(BPF_MOV, R2, R1),
+ BPF_ALU64_REG(BPF_MOV, R0, 0),
+ BPF_ST_MEM(BPF_W, R10, -4, 0),
+ BPF_ATOMIC_OP(BPF_W, BPF_CMPXCHG, R10, R1, -4),
+ BPF_ALU64_REG(BPF_SUB, R1, R2),
+ BPF_ALU64_REG(BPF_MOV, R2, R1),
+ BPF_ALU64_IMM(BPF_RSH, R2, 32),
+ BPF_ALU64_REG(BPF_OR, R1, R2),
+ BPF_ALU64_REG(BPF_MOV, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0 } },
+ .stack_depth = 8,
+ },
+ /* Checking that JMP32 immediate src is not zero extended in place */
+#define BPF_JMP32_IMM_ZEXT(op) \
+ { \
+ "JMP32_" #op "_K: operand preserved in zext", \
+ .u.insns_int = { \
+ BPF_LD_IMM64(R0, 0x0123456789acbdefULL),\
+ BPF_ALU64_REG(BPF_MOV, R1, R0), \
+ BPF_JMP32_IMM(BPF_##op, R0, 1234, 1), \
+ BPF_JMP_A(0), /* Nop */ \
+ BPF_ALU64_REG(BPF_SUB, R0, R1), \
+ BPF_ALU64_REG(BPF_MOV, R1, R0), \
+ BPF_ALU64_IMM(BPF_RSH, R1, 32), \
+ BPF_ALU64_REG(BPF_OR, R0, R1), \
+ BPF_EXIT_INSN(), \
+ }, \
+ INTERNAL, \
+ { }, \
+ { { 0, 0 } }, \
+ }
+ BPF_JMP32_IMM_ZEXT(JEQ),
+ BPF_JMP32_IMM_ZEXT(JNE),
+ BPF_JMP32_IMM_ZEXT(JSET),
+ BPF_JMP32_IMM_ZEXT(JGT),
+ BPF_JMP32_IMM_ZEXT(JGE),
+ BPF_JMP32_IMM_ZEXT(JLT),
+ BPF_JMP32_IMM_ZEXT(JLE),
+ BPF_JMP32_IMM_ZEXT(JSGT),
+ BPF_JMP32_IMM_ZEXT(JSGE),
+ BPF_JMP32_IMM_ZEXT(JSLT),
+ BPF_JMP32_IMM_ZEXT(JSLE),
+#undef BPF_JMP2_IMM_ZEXT
+ /* Checking that JMP32 dst & src are not zero extended in place */
+#define BPF_JMP32_REG_ZEXT(op) \
+ { \
+ "JMP32_" #op "_X: operands preserved in zext", \
+ .u.insns_int = { \
+ BPF_LD_IMM64(R0, 0x0123456789acbdefULL),\
+ BPF_LD_IMM64(R1, 0xfedcba9876543210ULL),\
+ BPF_ALU64_REG(BPF_MOV, R2, R0), \
+ BPF_ALU64_REG(BPF_MOV, R3, R1), \
+ BPF_JMP32_IMM(BPF_##op, R0, R1, 1), \
+ BPF_JMP_A(0), /* Nop */ \
+ BPF_ALU64_REG(BPF_SUB, R0, R2), \
+ BPF_ALU64_REG(BPF_SUB, R1, R3), \
+ BPF_ALU64_REG(BPF_OR, R0, R1), \
+ BPF_ALU64_REG(BPF_MOV, R1, R0), \
+ BPF_ALU64_IMM(BPF_RSH, R1, 32), \
+ BPF_ALU64_REG(BPF_OR, R0, R1), \
+ BPF_EXIT_INSN(), \
+ }, \
+ INTERNAL, \
+ { }, \
+ { { 0, 0 } }, \
+ }
+ BPF_JMP32_REG_ZEXT(JEQ),
+ BPF_JMP32_REG_ZEXT(JNE),
+ BPF_JMP32_REG_ZEXT(JSET),
+ BPF_JMP32_REG_ZEXT(JGT),
+ BPF_JMP32_REG_ZEXT(JGE),
+ BPF_JMP32_REG_ZEXT(JLT),
+ BPF_JMP32_REG_ZEXT(JLE),
+ BPF_JMP32_REG_ZEXT(JSGT),
+ BPF_JMP32_REG_ZEXT(JSGE),
+ BPF_JMP32_REG_ZEXT(JSLT),
+ BPF_JMP32_REG_ZEXT(JSLE),
+#undef BPF_JMP2_REG_ZEXT
+ /* ALU64 K register combinations */
+ {
+ "ALU64_MOV_K: registers",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_mov_imm_regs,
+ },
+ {
+ "ALU64_AND_K: registers",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_and_imm_regs,
+ },
+ {
+ "ALU64_OR_K: registers",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_or_imm_regs,
+ },
+ {
+ "ALU64_XOR_K: registers",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_xor_imm_regs,
+ },
+ {
+ "ALU64_LSH_K: registers",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_lsh_imm_regs,
+ },
+ {
+ "ALU64_RSH_K: registers",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_rsh_imm_regs,
+ },
+ {
+ "ALU64_ARSH_K: registers",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_arsh_imm_regs,
+ },
+ {
+ "ALU64_ADD_K: registers",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_add_imm_regs,
+ },
+ {
+ "ALU64_SUB_K: registers",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_sub_imm_regs,
+ },
+ {
+ "ALU64_MUL_K: registers",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_mul_imm_regs,
+ },
+ {
+ "ALU64_DIV_K: registers",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_div_imm_regs,
+ },
+ {
+ "ALU64_MOD_K: registers",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_mod_imm_regs,
+ },
+ /* ALU32 K registers */
+ {
+ "ALU32_MOV_K: registers",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_mov_imm_regs,
+ },
+ {
+ "ALU32_AND_K: registers",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_and_imm_regs,
+ },
+ {
+ "ALU32_OR_K: registers",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_or_imm_regs,
+ },
+ {
+ "ALU32_XOR_K: registers",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_xor_imm_regs,
+ },
+ {
+ "ALU32_LSH_K: registers",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_lsh_imm_regs,
+ },
+ {
+ "ALU32_RSH_K: registers",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_rsh_imm_regs,
+ },
+ {
+ "ALU32_ARSH_K: registers",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_arsh_imm_regs,
+ },
+ {
+ "ALU32_ADD_K: registers",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_add_imm_regs,
+ },
+ {
+ "ALU32_SUB_K: registers",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_sub_imm_regs,
+ },
+ {
+ "ALU32_MUL_K: registers",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_mul_imm_regs,
+ },
+ {
+ "ALU32_DIV_K: registers",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_div_imm_regs,
+ },
+ {
+ "ALU32_MOD_K: registers",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_mod_imm_regs,
+ },
+ /* ALU64 X register combinations */
+ {
+ "ALU64_MOV_X: register combinations",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_mov_reg_pairs,
+ },
+ {
+ "ALU64_AND_X: register combinations",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_and_reg_pairs,
+ },
+ {
+ "ALU64_OR_X: register combinations",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_or_reg_pairs,
+ },
+ {
+ "ALU64_XOR_X: register combinations",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_xor_reg_pairs,
+ },
+ {
+ "ALU64_LSH_X: register combinations",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_lsh_reg_pairs,
+ },
+ {
+ "ALU64_RSH_X: register combinations",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_rsh_reg_pairs,
+ },
+ {
+ "ALU64_ARSH_X: register combinations",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_arsh_reg_pairs,
+ },
+ {
+ "ALU64_ADD_X: register combinations",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_add_reg_pairs,
+ },
+ {
+ "ALU64_SUB_X: register combinations",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_sub_reg_pairs,
+ },
+ {
+ "ALU64_MUL_X: register combinations",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_mul_reg_pairs,
+ },
+ {
+ "ALU64_DIV_X: register combinations",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_div_reg_pairs,
+ },
+ {
+ "ALU64_MOD_X: register combinations",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_mod_reg_pairs,
+ },
+ /* ALU32 X register combinations */
+ {
+ "ALU32_MOV_X: register combinations",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_mov_reg_pairs,
+ },
+ {
+ "ALU32_AND_X: register combinations",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_and_reg_pairs,
+ },
+ {
+ "ALU32_OR_X: register combinations",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_or_reg_pairs,
+ },
+ {
+ "ALU32_XOR_X: register combinations",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_xor_reg_pairs,
+ },
+ {
+ "ALU32_LSH_X: register combinations",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_lsh_reg_pairs,
+ },
+ {
+ "ALU32_RSH_X: register combinations",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_rsh_reg_pairs,
+ },
+ {
+ "ALU32_ARSH_X: register combinations",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_arsh_reg_pairs,
+ },
+ {
+ "ALU32_ADD_X: register combinations",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_add_reg_pairs,
+ },
+ {
+ "ALU32_SUB_X: register combinations",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_sub_reg_pairs,
+ },
+ {
+ "ALU32_MUL_X: register combinations",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_mul_reg_pairs,
+ },
+ {
+ "ALU32_DIV_X: register combinations",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_div_reg_pairs,
+ },
+ {
+ "ALU32_MOD_X register combinations",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_mod_reg_pairs,
+ },
+ /* Exhaustive test of ALU64 shift operations */
+ {
+ "ALU64_LSH_K: all shift values",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_lsh_imm,
+ },
+ {
+ "ALU64_RSH_K: all shift values",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_rsh_imm,
+ },
+ {
+ "ALU64_ARSH_K: all shift values",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_arsh_imm,
+ },
+ {
+ "ALU64_LSH_X: all shift values",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_lsh_reg,
+ },
+ {
+ "ALU64_RSH_X: all shift values",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_rsh_reg,
+ },
+ {
+ "ALU64_ARSH_X: all shift values",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_arsh_reg,
+ },
+ /* Exhaustive test of ALU32 shift operations */
+ {
+ "ALU32_LSH_K: all shift values",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_lsh_imm,
+ },
+ {
+ "ALU32_RSH_K: all shift values",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_rsh_imm,
+ },
+ {
+ "ALU32_ARSH_K: all shift values",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_arsh_imm,
+ },
+ {
+ "ALU32_LSH_X: all shift values",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_lsh_reg,
+ },
+ {
+ "ALU32_RSH_X: all shift values",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_rsh_reg,
+ },
+ {
+ "ALU32_ARSH_X: all shift values",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_arsh_reg,
+ },
+ /*
+ * Exhaustive test of ALU64 shift operations when
+ * source and destination register are the same.
+ */
+ {
+ "ALU64_LSH_X: all shift values with the same register",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_lsh_same_reg,
+ },
+ {
+ "ALU64_RSH_X: all shift values with the same register",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_rsh_same_reg,
+ },
+ {
+ "ALU64_ARSH_X: all shift values with the same register",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_arsh_same_reg,
+ },
+ /*
+ * Exhaustive test of ALU32 shift operations when
+ * source and destination register are the same.
+ */
+ {
+ "ALU32_LSH_X: all shift values with the same register",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_lsh_same_reg,
+ },
+ {
+ "ALU32_RSH_X: all shift values with the same register",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_rsh_same_reg,
+ },
+ {
+ "ALU32_ARSH_X: all shift values with the same register",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_arsh_same_reg,
+ },
+ /* ALU64 immediate magnitudes */
+ {
+ "ALU64_MOV_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_mov_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ALU64_AND_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_and_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ALU64_OR_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_or_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ALU64_XOR_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_xor_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ALU64_ADD_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_add_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ALU64_SUB_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_sub_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ALU64_MUL_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_mul_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ALU64_DIV_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_div_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ALU64_MOD_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_mod_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ /* ALU32 immediate magnitudes */
+ {
+ "ALU32_MOV_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_mov_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ALU32_AND_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_and_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ALU32_OR_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_or_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ALU32_XOR_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_xor_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ALU32_ADD_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_add_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ALU32_SUB_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_sub_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ALU32_MUL_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_mul_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ALU32_DIV_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_div_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ALU32_MOD_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_mod_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ /* ALU64 register magnitudes */
+ {
+ "ALU64_MOV_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_mov_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ALU64_AND_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_and_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ALU64_OR_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_or_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ALU64_XOR_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_xor_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ALU64_ADD_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_add_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ALU64_SUB_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_sub_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ALU64_MUL_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_mul_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ALU64_DIV_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_div_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ALU64_MOD_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_mod_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ /* ALU32 register magnitudes */
+ {
+ "ALU32_MOV_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_mov_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ALU32_AND_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_and_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ALU32_OR_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_or_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ALU32_XOR_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_xor_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ALU32_ADD_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_add_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ALU32_SUB_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_sub_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ALU32_MUL_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_mul_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ALU32_DIV_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_div_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ALU32_MOD_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_mod_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ /* LD_IMM64 immediate magnitudes and byte patterns */
+ {
+ "LD_IMM64: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_ld_imm64_magn,
+ },
+ {
+ "LD_IMM64: checker byte patterns",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_ld_imm64_checker,
+ },
+ {
+ "LD_IMM64: random positive and zero byte patterns",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_ld_imm64_pos_zero,
+ },
+ {
+ "LD_IMM64: random negative and zero byte patterns",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_ld_imm64_neg_zero,
+ },
+ {
+ "LD_IMM64: random positive and negative byte patterns",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_ld_imm64_pos_neg,
+ },
+ /* 64-bit ATOMIC register combinations */
+ {
+ "ATOMIC_DW_ADD: register combinations",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_atomic64_add_reg_pairs,
+ .stack_depth = 8,
+ },
+ {
+ "ATOMIC_DW_AND: register combinations",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_atomic64_and_reg_pairs,
+ .stack_depth = 8,
+ },
+ {
+ "ATOMIC_DW_OR: register combinations",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_atomic64_or_reg_pairs,
+ .stack_depth = 8,
+ },
+ {
+ "ATOMIC_DW_XOR: register combinations",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_atomic64_xor_reg_pairs,
+ .stack_depth = 8,
+ },
+ {
+ "ATOMIC_DW_ADD_FETCH: register combinations",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_atomic64_add_fetch_reg_pairs,
+ .stack_depth = 8,
+ },
+ {
+ "ATOMIC_DW_AND_FETCH: register combinations",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_atomic64_and_fetch_reg_pairs,
+ .stack_depth = 8,
+ },
+ {
+ "ATOMIC_DW_OR_FETCH: register combinations",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_atomic64_or_fetch_reg_pairs,
+ .stack_depth = 8,
+ },
+ {
+ "ATOMIC_DW_XOR_FETCH: register combinations",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_atomic64_xor_fetch_reg_pairs,
+ .stack_depth = 8,
+ },
+ {
+ "ATOMIC_DW_XCHG: register combinations",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_atomic64_xchg_reg_pairs,
+ .stack_depth = 8,
+ },
+ {
+ "ATOMIC_DW_CMPXCHG: register combinations",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_atomic64_cmpxchg_reg_pairs,
+ .stack_depth = 8,
+ },
+ /* 32-bit ATOMIC register combinations */
+ {
+ "ATOMIC_W_ADD: register combinations",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_atomic32_add_reg_pairs,
+ .stack_depth = 8,
+ },
+ {
+ "ATOMIC_W_AND: register combinations",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_atomic32_and_reg_pairs,
+ .stack_depth = 8,
+ },
+ {
+ "ATOMIC_W_OR: register combinations",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_atomic32_or_reg_pairs,
+ .stack_depth = 8,
+ },
+ {
+ "ATOMIC_W_XOR: register combinations",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_atomic32_xor_reg_pairs,
+ .stack_depth = 8,
+ },
+ {
+ "ATOMIC_W_ADD_FETCH: register combinations",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_atomic32_add_fetch_reg_pairs,
+ .stack_depth = 8,
+ },
+ {
+ "ATOMIC_W_AND_FETCH: register combinations",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_atomic32_and_fetch_reg_pairs,
+ .stack_depth = 8,
+ },
+ {
+ "ATOMIC_W_OR_FETCH: register combinations",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_atomic32_or_fetch_reg_pairs,
+ .stack_depth = 8,
+ },
+ {
+ "ATOMIC_W_XOR_FETCH: register combinations",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_atomic32_xor_fetch_reg_pairs,
+ .stack_depth = 8,
+ },
+ {
+ "ATOMIC_W_XCHG: register combinations",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_atomic32_xchg_reg_pairs,
+ .stack_depth = 8,
+ },
+ {
+ "ATOMIC_W_CMPXCHG: register combinations",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_atomic32_cmpxchg_reg_pairs,
+ .stack_depth = 8,
+ },
+ /* 64-bit ATOMIC magnitudes */
+ {
+ "ATOMIC_DW_ADD: all operand magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_atomic64_add,
+ .stack_depth = 8,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ATOMIC_DW_AND: all operand magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_atomic64_and,
+ .stack_depth = 8,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ATOMIC_DW_OR: all operand magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_atomic64_or,
+ .stack_depth = 8,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ATOMIC_DW_XOR: all operand magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_atomic64_xor,
+ .stack_depth = 8,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ATOMIC_DW_ADD_FETCH: all operand magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_atomic64_add_fetch,
+ .stack_depth = 8,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ATOMIC_DW_AND_FETCH: all operand magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_atomic64_and_fetch,
+ .stack_depth = 8,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ATOMIC_DW_OR_FETCH: all operand magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_atomic64_or_fetch,
+ .stack_depth = 8,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ATOMIC_DW_XOR_FETCH: all operand magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_atomic64_xor_fetch,
+ .stack_depth = 8,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ATOMIC_DW_XCHG: all operand magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_atomic64_xchg,
+ .stack_depth = 8,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ATOMIC_DW_CMPXCHG: all operand magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_cmpxchg64,
+ .stack_depth = 8,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ /* 64-bit atomic magnitudes */
+ {
+ "ATOMIC_W_ADD: all operand magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_atomic32_add,
+ .stack_depth = 8,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ATOMIC_W_AND: all operand magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_atomic32_and,
+ .stack_depth = 8,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ATOMIC_W_OR: all operand magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_atomic32_or,
+ .stack_depth = 8,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ATOMIC_W_XOR: all operand magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_atomic32_xor,
+ .stack_depth = 8,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ATOMIC_W_ADD_FETCH: all operand magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_atomic32_add_fetch,
+ .stack_depth = 8,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ATOMIC_W_AND_FETCH: all operand magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_atomic32_and_fetch,
+ .stack_depth = 8,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ATOMIC_W_OR_FETCH: all operand magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_atomic32_or_fetch,
+ .stack_depth = 8,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ATOMIC_W_XOR_FETCH: all operand magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_atomic32_xor_fetch,
+ .stack_depth = 8,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ATOMIC_W_XCHG: all operand magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_atomic32_xchg,
+ .stack_depth = 8,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ATOMIC_W_CMPXCHG: all operand magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_cmpxchg32,
+ .stack_depth = 8,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ /* JMP immediate magnitudes */
+ {
+ "JMP_JSET_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp_jset_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP_JEQ_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp_jeq_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP_JNE_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp_jne_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP_JGT_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp_jgt_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP_JGE_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp_jge_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP_JLT_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp_jlt_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP_JLE_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp_jle_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP_JSGT_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp_jsgt_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP_JSGE_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp_jsge_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP_JSLT_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp_jslt_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP_JSLE_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp_jsle_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ /* JMP register magnitudes */
+ {
+ "JMP_JSET_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp_jset_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP_JEQ_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp_jeq_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP_JNE_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp_jne_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP_JGT_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp_jgt_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP_JGE_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp_jge_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP_JLT_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp_jlt_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP_JLE_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp_jle_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP_JSGT_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp_jsgt_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP_JSGE_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp_jsge_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP_JSLT_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp_jslt_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP_JSLE_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp_jsle_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ /* JMP32 immediate magnitudes */
+ {
+ "JMP32_JSET_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp32_jset_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP32_JEQ_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp32_jeq_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP32_JNE_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp32_jne_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP32_JGT_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp32_jgt_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP32_JGE_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp32_jge_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP32_JLT_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp32_jlt_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP32_JLE_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp32_jle_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP32_JSGT_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp32_jsgt_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP32_JSGE_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp32_jsge_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP32_JSLT_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp32_jslt_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP32_JSLE_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp32_jsle_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ /* JMP32 register magnitudes */
+ {
+ "JMP32_JSET_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp32_jset_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP32_JEQ_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp32_jeq_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP32_JNE_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp32_jne_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP32_JGT_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp32_jgt_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP32_JGE_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp32_jge_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP32_JLT_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp32_jlt_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP32_JLE_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp32_jle_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP32_JSGT_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp32_jsgt_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP32_JSGE_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp32_jsge_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP32_JSLT_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp32_jslt_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP32_JSLE_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp32_jsle_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ /* Conditional jumps with constant decision */
+ {
+ "JMP_JSET_K: imm = 0 -> never taken",
+ .u.insns_int = {
+ BPF_ALU64_IMM(BPF_MOV, R0, 1),
+ BPF_JMP_IMM(BPF_JSET, R1, 0, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 0 } },
+ },
+ {
+ "JMP_JLT_K: imm = 0 -> never taken",
+ .u.insns_int = {
+ BPF_ALU64_IMM(BPF_MOV, R0, 1),
+ BPF_JMP_IMM(BPF_JLT, R1, 0, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 0 } },
+ },
+ {
+ "JMP_JGE_K: imm = 0 -> always taken",
+ .u.insns_int = {
+ BPF_ALU64_IMM(BPF_MOV, R0, 1),
+ BPF_JMP_IMM(BPF_JGE, R1, 0, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ },
+ {
+ "JMP_JGT_K: imm = 0xffffffff -> never taken",
+ .u.insns_int = {
+ BPF_ALU64_IMM(BPF_MOV, R0, 1),
+ BPF_JMP_IMM(BPF_JGT, R1, U32_MAX, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 0 } },
+ },
+ {
+ "JMP_JLE_K: imm = 0xffffffff -> always taken",
+ .u.insns_int = {
+ BPF_ALU64_IMM(BPF_MOV, R0, 1),
+ BPF_JMP_IMM(BPF_JLE, R1, U32_MAX, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ },
+ {
+ "JMP32_JSGT_K: imm = 0x7fffffff -> never taken",
+ .u.insns_int = {
+ BPF_ALU64_IMM(BPF_MOV, R0, 1),
+ BPF_JMP32_IMM(BPF_JSGT, R1, S32_MAX, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 0 } },
+ },
+ {
+ "JMP32_JSGE_K: imm = -0x80000000 -> always taken",
+ .u.insns_int = {
+ BPF_ALU64_IMM(BPF_MOV, R0, 1),
+ BPF_JMP32_IMM(BPF_JSGE, R1, S32_MIN, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ },
+ {
+ "JMP32_JSLT_K: imm = -0x80000000 -> never taken",
+ .u.insns_int = {
+ BPF_ALU64_IMM(BPF_MOV, R0, 1),
+ BPF_JMP32_IMM(BPF_JSLT, R1, S32_MIN, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 0 } },
+ },
+ {
+ "JMP32_JSLE_K: imm = 0x7fffffff -> always taken",
+ .u.insns_int = {
+ BPF_ALU64_IMM(BPF_MOV, R0, 1),
+ BPF_JMP32_IMM(BPF_JSLE, R1, S32_MAX, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ },
+ {
+ "JMP_JEQ_X: dst = src -> always taken",
+ .u.insns_int = {
+ BPF_ALU64_IMM(BPF_MOV, R0, 1),
+ BPF_JMP_REG(BPF_JEQ, R1, R1, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ },
+ {
+ "JMP_JGE_X: dst = src -> always taken",
+ .u.insns_int = {
+ BPF_ALU64_IMM(BPF_MOV, R0, 1),
+ BPF_JMP_REG(BPF_JGE, R1, R1, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ },
+ {
+ "JMP_JLE_X: dst = src -> always taken",
+ .u.insns_int = {
+ BPF_ALU64_IMM(BPF_MOV, R0, 1),
+ BPF_JMP_REG(BPF_JLE, R1, R1, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ },
+ {
+ "JMP_JSGE_X: dst = src -> always taken",
+ .u.insns_int = {
+ BPF_ALU64_IMM(BPF_MOV, R0, 1),
+ BPF_JMP_REG(BPF_JSGE, R1, R1, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ },
+ {
+ "JMP_JSLE_X: dst = src -> always taken",
+ .u.insns_int = {
+ BPF_ALU64_IMM(BPF_MOV, R0, 1),
+ BPF_JMP_REG(BPF_JSLE, R1, R1, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ },
+ {
+ "JMP_JNE_X: dst = src -> never taken",
+ .u.insns_int = {
+ BPF_ALU64_IMM(BPF_MOV, R0, 1),
+ BPF_JMP_REG(BPF_JNE, R1, R1, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 0 } },
+ },
+ {
+ "JMP_JGT_X: dst = src -> never taken",
+ .u.insns_int = {
+ BPF_ALU64_IMM(BPF_MOV, R0, 1),
+ BPF_JMP_REG(BPF_JGT, R1, R1, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 0 } },
+ },
+ {
+ "JMP_JLT_X: dst = src -> never taken",
+ .u.insns_int = {
+ BPF_ALU64_IMM(BPF_MOV, R0, 1),
+ BPF_JMP_REG(BPF_JLT, R1, R1, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 0 } },
+ },
+ {
+ "JMP_JSGT_X: dst = src -> never taken",
+ .u.insns_int = {
+ BPF_ALU64_IMM(BPF_MOV, R0, 1),
+ BPF_JMP_REG(BPF_JSGT, R1, R1, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 0 } },
+ },
+ {
+ "JMP_JSLT_X: dst = src -> never taken",
+ .u.insns_int = {
+ BPF_ALU64_IMM(BPF_MOV, R0, 1),
+ BPF_JMP_REG(BPF_JSLT, R1, R1, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 0 } },
+ },
+ /* Short relative jumps */
+ {
+ "Short relative jump: offset=0",
+ .u.insns_int = {
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_JMP_IMM(BPF_JEQ, R0, 0, 0),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, R0, -1),
+ },
+ INTERNAL | FLAG_NO_DATA | FLAG_VERIFIER_ZEXT,
+ { },
+ { { 0, 0 } },
+ },
+ {
+ "Short relative jump: offset=1",
+ .u.insns_int = {
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_JMP_IMM(BPF_JEQ, R0, 0, 1),
+ BPF_ALU32_IMM(BPF_ADD, R0, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, R0, -1),
+ },
+ INTERNAL | FLAG_NO_DATA | FLAG_VERIFIER_ZEXT,
+ { },
+ { { 0, 0 } },
+ },
+ {
+ "Short relative jump: offset=2",
+ .u.insns_int = {
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_JMP_IMM(BPF_JEQ, R0, 0, 2),
+ BPF_ALU32_IMM(BPF_ADD, R0, 1),
+ BPF_ALU32_IMM(BPF_ADD, R0, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, R0, -1),
+ },
+ INTERNAL | FLAG_NO_DATA | FLAG_VERIFIER_ZEXT,
+ { },
+ { { 0, 0 } },
+ },
+ {
+ "Short relative jump: offset=3",
+ .u.insns_int = {
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_JMP_IMM(BPF_JEQ, R0, 0, 3),
+ BPF_ALU32_IMM(BPF_ADD, R0, 1),
+ BPF_ALU32_IMM(BPF_ADD, R0, 1),
+ BPF_ALU32_IMM(BPF_ADD, R0, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, R0, -1),
+ },
+ INTERNAL | FLAG_NO_DATA | FLAG_VERIFIER_ZEXT,
+ { },
+ { { 0, 0 } },
+ },
+ {
+ "Short relative jump: offset=4",
+ .u.insns_int = {
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_JMP_IMM(BPF_JEQ, R0, 0, 4),
+ BPF_ALU32_IMM(BPF_ADD, R0, 1),
+ BPF_ALU32_IMM(BPF_ADD, R0, 1),
+ BPF_ALU32_IMM(BPF_ADD, R0, 1),
+ BPF_ALU32_IMM(BPF_ADD, R0, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, R0, -1),
+ },
+ INTERNAL | FLAG_NO_DATA | FLAG_VERIFIER_ZEXT,
+ { },
+ { { 0, 0 } },
+ },
+ /* Conditional branch conversions */
+ {
+ "Long conditional jump: taken at runtime",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_max_jmp_taken,
+ },
+ {
+ "Long conditional jump: not taken at runtime",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 2 } },
+ .fill_helper = bpf_fill_max_jmp_not_taken,
+ },
+ {
+ "Long conditional jump: always taken, known at JIT time",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_max_jmp_always_taken,
+ },
+ {
+ "Long conditional jump: never taken, known at JIT time",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 2 } },
+ .fill_helper = bpf_fill_max_jmp_never_taken,
+ },
+ /* Staggered jump sequences, immediate */
+ {
+ "Staggered jumps: JMP_JA",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_ja,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP_JEQ_K",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jeq_imm,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP_JNE_K",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jne_imm,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP_JSET_K",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jset_imm,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP_JGT_K",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jgt_imm,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP_JGE_K",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jge_imm,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP_JLT_K",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jlt_imm,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP_JLE_K",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jle_imm,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP_JSGT_K",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jsgt_imm,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP_JSGE_K",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jsge_imm,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP_JSLT_K",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jslt_imm,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP_JSLE_K",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jsle_imm,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ /* Staggered jump sequences, register */
+ {
+ "Staggered jumps: JMP_JEQ_X",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jeq_reg,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP_JNE_X",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jne_reg,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP_JSET_X",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jset_reg,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP_JGT_X",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jgt_reg,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP_JGE_X",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jge_reg,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP_JLT_X",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jlt_reg,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP_JLE_X",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jle_reg,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP_JSGT_X",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jsgt_reg,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP_JSGE_X",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jsge_reg,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP_JSLT_X",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jslt_reg,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP_JSLE_X",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jsle_reg,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ /* Staggered jump sequences, JMP32 immediate */
+ {
+ "Staggered jumps: JMP32_JEQ_K",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jeq32_imm,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP32_JNE_K",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jne32_imm,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP32_JSET_K",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jset32_imm,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP32_JGT_K",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jgt32_imm,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP32_JGE_K",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jge32_imm,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP32_JLT_K",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jlt32_imm,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP32_JLE_K",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jle32_imm,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP32_JSGT_K",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jsgt32_imm,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP32_JSGE_K",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jsge32_imm,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP32_JSLT_K",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jslt32_imm,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP32_JSLE_K",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jsle32_imm,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ /* Staggered jump sequences, JMP32 register */
+ {
+ "Staggered jumps: JMP32_JEQ_X",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jeq32_reg,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP32_JNE_X",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jne32_reg,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP32_JSET_X",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jset32_reg,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP32_JGT_X",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jgt32_reg,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP32_JGE_X",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jge32_reg,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP32_JLT_X",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jlt32_reg,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP32_JLE_X",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jle32_reg,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP32_JSGT_X",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jsgt32_reg,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP32_JSGE_X",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jsge32_reg,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP32_JSLT_X",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jslt32_reg,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP32_JSLE_X",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jsle32_reg,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
};
static struct net_device dev;
@@ -6486,7 +14699,6 @@ static struct sk_buff *populate_skb(char *buf, int size)
skb->hash = SKB_HASH;
skb->queue_mapping = SKB_QUEUE_MAP;
skb->vlan_tci = SKB_VLAN_TCI;
- skb->vlan_present = SKB_VLAN_PRESENT;
skb->vlan_proto = htons(ETH_P_IP);
dev_net_set(&dev, &init_net);
skb->dev = &dev;
@@ -6505,6 +14717,9 @@ static void *generate_test_data(struct bpf_test *test, int sub)
if (test->aux & FLAG_NO_DATA)
return NULL;
+ if (test->aux & FLAG_LARGE_MEM)
+ return kmalloc(test->test[sub].data_size, GFP_KERNEL);
+
/* Test case expects an skb, so populate one. Various
* subtests generate skbs of different sizes based on
* the same data.
@@ -6519,25 +14734,15 @@ static void *generate_test_data(struct bpf_test *test, int sub)
* single fragment to the skb, filled with
* test->frag_data.
*/
- void *ptr;
-
page = alloc_page(GFP_KERNEL);
-
if (!page)
goto err_kfree_skb;
- ptr = kmap(page);
- if (!ptr)
- goto err_free_page;
- memcpy(ptr, test->frag_data, MAX_DATA);
- kunmap(page);
+ memcpy(page_address(page), test->frag_data, MAX_DATA);
skb_add_rx_frag(skb, 0, page, 0, MAX_DATA, MAX_DATA);
}
return skb;
-
-err_free_page:
- __free_page(page);
err_kfree_skb:
kfree_skb(skb);
return NULL;
@@ -6548,7 +14753,10 @@ static void release_test_data(const struct bpf_test *test, void *data)
if (test->aux & FLAG_NO_DATA)
return;
- kfree_skb(data);
+ if (test->aux & FLAG_LARGE_MEM)
+ kfree(data);
+ else
+ kfree_skb(data);
}
static int filter_length(int which)
@@ -6624,6 +14832,8 @@ static struct bpf_prog *generate_filter(int which, int *err)
fp->type = BPF_PROG_TYPE_SOCKET_FILTER;
memcpy(fp->insnsi, fptr, fp->len * sizeof(struct bpf_insn));
fp->aux->stack_depth = tests[which].stack_depth;
+ fp->aux->verifier_zext = !!(tests[which].aux &
+ FLAG_VERIFIER_ZEXT);
/* We cannot error here as we don't need type compatibility
* checks.
@@ -6660,14 +14870,14 @@ static int __run_one(const struct bpf_prog *fp, const void *data,
u64 start, finish;
int ret = 0, i;
- preempt_disable();
+ migrate_disable();
start = ktime_get_ns();
for (i = 0; i < runs; i++)
- ret = BPF_PROG_RUN(fp, data);
+ ret = bpf_prog_run(fp, data);
finish = ktime_get_ns();
- preempt_enable();
+ migrate_enable();
*duration = finish - start;
do_div(*duration, runs);
@@ -6679,12 +14889,22 @@ static int run_one(const struct bpf_prog *fp, struct bpf_test *test)
{
int err_cnt = 0, i, runs = MAX_TESTRUNS;
+ if (test->nr_testruns)
+ runs = min(test->nr_testruns, MAX_TESTRUNS);
+
for (i = 0; i < MAX_SUBTESTS; i++) {
void *data;
u64 duration;
u32 ret;
- if (test->test[i].data_size == 0 &&
+ /*
+ * NOTE: Several sub-tests may be present, in which case
+ * a zero {data_size, result} tuple indicates the end of
+ * the sub-test array. The first test is always run,
+ * even if both data_size and result happen to be zero.
+ */
+ if (i > 0 &&
+ test->test[i].data_size == 0 &&
test->test[i].result == 0)
break;
@@ -6700,8 +14920,10 @@ static int run_one(const struct bpf_prog *fp, struct bpf_test *test)
if (ret == test->test[i].result) {
pr_cont("%lld ", duration);
} else {
- pr_cont("ret %d != %d ", ret,
- test->test[i].result);
+ s32 res = test->test[i].result;
+
+ pr_cont("ret %d != %d (%#x != %#x)",
+ ret, res, ret, res);
err_cnt++;
}
}
@@ -6715,86 +14937,9 @@ module_param_string(test_name, test_name, sizeof(test_name), 0);
static int test_id = -1;
module_param(test_id, int, 0);
-static int test_range[2] = { 0, ARRAY_SIZE(tests) - 1 };
+static int test_range[2] = { 0, INT_MAX };
module_param_array(test_range, int, NULL, 0);
-static __init int find_test_index(const char *test_name)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(tests); i++) {
- if (!strcmp(tests[i].descr, test_name))
- return i;
- }
- return -1;
-}
-
-static __init int prepare_bpf_tests(void)
-{
- int i;
-
- if (test_id >= 0) {
- /*
- * if a test_id was specified, use test_range to
- * cover only that test.
- */
- if (test_id >= ARRAY_SIZE(tests)) {
- pr_err("test_bpf: invalid test_id specified.\n");
- return -EINVAL;
- }
-
- test_range[0] = test_id;
- test_range[1] = test_id;
- } else if (*test_name) {
- /*
- * if a test_name was specified, find it and setup
- * test_range to cover only that test.
- */
- int idx = find_test_index(test_name);
-
- if (idx < 0) {
- pr_err("test_bpf: no test named '%s' found.\n",
- test_name);
- return -EINVAL;
- }
- test_range[0] = idx;
- test_range[1] = idx;
- } else {
- /*
- * check that the supplied test_range is valid.
- */
- if (test_range[0] >= ARRAY_SIZE(tests) ||
- test_range[1] >= ARRAY_SIZE(tests) ||
- test_range[0] < 0 || test_range[1] < 0) {
- pr_err("test_bpf: test_range is out of bound.\n");
- return -EINVAL;
- }
-
- if (test_range[1] < test_range[0]) {
- pr_err("test_bpf: test_range is ending before it starts.\n");
- return -EINVAL;
- }
- }
-
- for (i = 0; i < ARRAY_SIZE(tests); i++) {
- if (tests[i].fill_helper &&
- tests[i].fill_helper(&tests[i]) < 0)
- return -ENOMEM;
- }
-
- return 0;
-}
-
-static __init void destroy_bpf_tests(void)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(tests); i++) {
- if (tests[i].fill_helper)
- kfree(tests[i].u.ptr.insns);
- }
-}
-
static bool exclude_test(int test_id)
{
return test_id < test_range[0] || test_id > test_range[1];
@@ -6841,6 +14986,7 @@ static __init struct sk_buff *build_test_skb(void)
skb_shinfo(skb[0])->gso_type |= SKB_GSO_DODGY;
skb_shinfo(skb[0])->gso_segs = 0;
skb_shinfo(skb[0])->frag_list = skb[1];
+ skb_shinfo(skb[0])->hwtstamps.hwtstamp = 1000;
/* adjust skb[0]'s len */
skb[0]->len += skb[1]->len;
@@ -6859,34 +15005,132 @@ err_page0:
return NULL;
}
-static __init int test_skb_segment(void)
+static __init struct sk_buff *build_test_skb_linear_no_head_frag(void)
{
+ unsigned int alloc_size = 2000;
+ unsigned int headroom = 102, doffset = 72, data_size = 1308;
+ struct sk_buff *skb[2];
+ int i;
+
+ /* skbs linked in a frag_list, both with linear data, with head_frag=0
+ * (data allocated by kmalloc), both have tcp data of 1308 bytes
+ * (total payload is 2616 bytes).
+ * Data offset is 72 bytes (40 ipv6 hdr, 32 tcp hdr). Some headroom.
+ */
+ for (i = 0; i < 2; i++) {
+ skb[i] = alloc_skb(alloc_size, GFP_KERNEL);
+ if (!skb[i]) {
+ if (i == 0)
+ goto err_skb0;
+ else
+ goto err_skb1;
+ }
+
+ skb[i]->protocol = htons(ETH_P_IPV6);
+ skb_reserve(skb[i], headroom);
+ skb_put(skb[i], doffset + data_size);
+ skb_reset_network_header(skb[i]);
+ if (i == 0)
+ skb_reset_mac_header(skb[i]);
+ else
+ skb_set_mac_header(skb[i], -ETH_HLEN);
+ __skb_pull(skb[i], doffset);
+ }
+
+ /* setup shinfo.
+ * mimic bpf_skb_proto_4_to_6, which resets gso_segs and assigns a
+ * reduced gso_size.
+ */
+ skb_shinfo(skb[0])->gso_size = 1288;
+ skb_shinfo(skb[0])->gso_type = SKB_GSO_TCPV6 | SKB_GSO_DODGY;
+ skb_shinfo(skb[0])->gso_segs = 0;
+ skb_shinfo(skb[0])->frag_list = skb[1];
+
+ /* adjust skb[0]'s len */
+ skb[0]->len += skb[1]->len;
+ skb[0]->data_len += skb[1]->len;
+ skb[0]->truesize += skb[1]->truesize;
+
+ return skb[0];
+
+err_skb1:
+ kfree_skb(skb[0]);
+err_skb0:
+ return NULL;
+}
+
+struct skb_segment_test {
+ const char *descr;
+ struct sk_buff *(*build_skb)(void);
netdev_features_t features;
+};
+
+static struct skb_segment_test skb_segment_tests[] __initconst = {
+ {
+ .descr = "gso_with_rx_frags",
+ .build_skb = build_test_skb,
+ .features = NETIF_F_SG | NETIF_F_GSO_PARTIAL | NETIF_F_IP_CSUM |
+ NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM
+ },
+ {
+ .descr = "gso_linear_no_head_frag",
+ .build_skb = build_test_skb_linear_no_head_frag,
+ .features = NETIF_F_SG | NETIF_F_FRAGLIST |
+ NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_GSO |
+ NETIF_F_LLTX | NETIF_F_GRO |
+ NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
+ NETIF_F_HW_VLAN_STAG_TX
+ }
+};
+
+static __init int test_skb_segment_single(const struct skb_segment_test *test)
+{
struct sk_buff *skb, *segs;
int ret = -1;
- features = NETIF_F_SG | NETIF_F_GSO_PARTIAL | NETIF_F_IP_CSUM |
- NETIF_F_IPV6_CSUM;
- features |= NETIF_F_RXCSUM;
- skb = build_test_skb();
+ skb = test->build_skb();
if (!skb) {
pr_info("%s: failed to build_test_skb", __func__);
goto done;
}
- segs = skb_segment(skb, features);
+ segs = skb_segment(skb, test->features);
if (!IS_ERR(segs)) {
kfree_skb_list(segs);
ret = 0;
- pr_info("%s: success in skb_segment!", __func__);
- } else {
- pr_info("%s: failed in skb_segment!", __func__);
}
kfree_skb(skb);
done:
return ret;
}
+static __init int test_skb_segment(void)
+{
+ int i, err_cnt = 0, pass_cnt = 0;
+
+ for (i = 0; i < ARRAY_SIZE(skb_segment_tests); i++) {
+ const struct skb_segment_test *test = &skb_segment_tests[i];
+
+ cond_resched();
+ if (exclude_test(i))
+ continue;
+
+ pr_info("#%d %s ", i, test->descr);
+
+ if (test_skb_segment_single(test)) {
+ pr_cont("FAIL\n");
+ err_cnt++;
+ } else {
+ pr_cont("PASS\n");
+ pass_cnt++;
+ }
+ }
+
+ pr_info("%s: Summary: %d PASSED, %d FAILED\n", __func__,
+ pass_cnt, err_cnt);
+ return err_cnt ? -EINVAL : 0;
+}
+
static __init int test_bpf(void)
{
int i, err_cnt = 0, pass_cnt = 0;
@@ -6902,7 +15146,19 @@ static __init int test_bpf(void)
pr_info("#%d %s ", i, tests[i].descr);
+ if (tests[i].fill_helper &&
+ tests[i].fill_helper(&tests[i]) < 0) {
+ pr_cont("FAIL to prog_fill\n");
+ continue;
+ }
+
fp = generate_filter(i, &err);
+
+ if (tests[i].fill_helper) {
+ kfree(tests[i].u.ptr.insns);
+ tests[i].u.ptr.insns = NULL;
+ }
+
if (fp == NULL) {
if (err == 0) {
pass_cnt++;
@@ -6936,20 +15192,511 @@ static __init int test_bpf(void)
return err_cnt ? -EINVAL : 0;
}
+struct tail_call_test {
+ const char *descr;
+ struct bpf_insn insns[MAX_INSNS];
+ int flags;
+ int result;
+ int stack_depth;
+};
+
+/* Flags that can be passed to tail call test cases */
+#define FLAG_NEED_STATE BIT(0)
+#define FLAG_RESULT_IN_STATE BIT(1)
+
+/*
+ * Magic marker used in test snippets for tail calls below.
+ * BPF_LD/MOV to R2 and R2 with this immediate value is replaced
+ * with the proper values by the test runner.
+ */
+#define TAIL_CALL_MARKER 0x7a11ca11
+
+/* Special offset to indicate a NULL call target */
+#define TAIL_CALL_NULL 0x7fff
+
+/* Special offset to indicate an out-of-range index */
+#define TAIL_CALL_INVALID 0x7ffe
+
+#define TAIL_CALL(offset) \
+ BPF_LD_IMM64(R2, TAIL_CALL_MARKER), \
+ BPF_RAW_INSN(BPF_ALU | BPF_MOV | BPF_K, R3, 0, \
+ offset, TAIL_CALL_MARKER), \
+ BPF_JMP_IMM(BPF_TAIL_CALL, 0, 0, 0)
+
+/*
+ * A test function to be called from a BPF program, clobbering a lot of
+ * CPU registers in the process. A JITed BPF program calling this function
+ * must save and restore any caller-saved registers it uses for internal
+ * state, for example the current tail call count.
+ */
+BPF_CALL_1(bpf_test_func, u64, arg)
+{
+ char buf[64];
+ long a = 0;
+ long b = 1;
+ long c = 2;
+ long d = 3;
+ long e = 4;
+ long f = 5;
+ long g = 6;
+ long h = 7;
+
+ return snprintf(buf, sizeof(buf),
+ "%ld %lu %lx %ld %lu %lx %ld %lu %x",
+ a, b, c, d, e, f, g, h, (int)arg);
+}
+#define BPF_FUNC_test_func __BPF_FUNC_MAX_ID
+
+/*
+ * Tail call tests. Each test case may call any other test in the table,
+ * including itself, specified as a relative index offset from the calling
+ * test. The index TAIL_CALL_NULL can be used to specify a NULL target
+ * function to test the JIT error path. Similarly, the index TAIL_CALL_INVALID
+ * results in a target index that is out of range.
+ */
+static struct tail_call_test tail_call_tests[] = {
+ {
+ "Tail call leaf",
+ .insns = {
+ BPF_ALU64_REG(BPF_MOV, R0, R1),
+ BPF_ALU64_IMM(BPF_ADD, R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .result = 1,
+ },
+ {
+ "Tail call 2",
+ .insns = {
+ BPF_ALU64_IMM(BPF_ADD, R1, 2),
+ TAIL_CALL(-1),
+ BPF_ALU64_IMM(BPF_MOV, R0, -1),
+ BPF_EXIT_INSN(),
+ },
+ .result = 3,
+ },
+ {
+ "Tail call 3",
+ .insns = {
+ BPF_ALU64_IMM(BPF_ADD, R1, 3),
+ TAIL_CALL(-1),
+ BPF_ALU64_IMM(BPF_MOV, R0, -1),
+ BPF_EXIT_INSN(),
+ },
+ .result = 6,
+ },
+ {
+ "Tail call 4",
+ .insns = {
+ BPF_ALU64_IMM(BPF_ADD, R1, 4),
+ TAIL_CALL(-1),
+ BPF_ALU64_IMM(BPF_MOV, R0, -1),
+ BPF_EXIT_INSN(),
+ },
+ .result = 10,
+ },
+ {
+ "Tail call load/store leaf",
+ .insns = {
+ BPF_ALU64_IMM(BPF_MOV, R1, 1),
+ BPF_ALU64_IMM(BPF_MOV, R2, 2),
+ BPF_ALU64_REG(BPF_MOV, R3, BPF_REG_FP),
+ BPF_STX_MEM(BPF_DW, R3, R1, -8),
+ BPF_STX_MEM(BPF_DW, R3, R2, -16),
+ BPF_LDX_MEM(BPF_DW, R0, BPF_REG_FP, -8),
+ BPF_JMP_REG(BPF_JNE, R0, R1, 3),
+ BPF_LDX_MEM(BPF_DW, R0, BPF_REG_FP, -16),
+ BPF_JMP_REG(BPF_JNE, R0, R2, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = 0,
+ .stack_depth = 32,
+ },
+ {
+ "Tail call load/store",
+ .insns = {
+ BPF_ALU64_IMM(BPF_MOV, R0, 3),
+ BPF_STX_MEM(BPF_DW, BPF_REG_FP, R0, -8),
+ TAIL_CALL(-1),
+ BPF_ALU64_IMM(BPF_MOV, R0, -1),
+ BPF_EXIT_INSN(),
+ },
+ .result = 0,
+ .stack_depth = 16,
+ },
+ {
+ "Tail call error path, max count reached",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, R2, R1, 0),
+ BPF_ALU64_IMM(BPF_ADD, R2, 1),
+ BPF_STX_MEM(BPF_W, R1, R2, 0),
+ TAIL_CALL(0),
+ BPF_EXIT_INSN(),
+ },
+ .flags = FLAG_NEED_STATE | FLAG_RESULT_IN_STATE,
+ .result = (MAX_TAIL_CALL_CNT + 1) * MAX_TESTRUNS,
+ },
+ {
+ "Tail call count preserved across function calls",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, R2, R1, 0),
+ BPF_ALU64_IMM(BPF_ADD, R2, 1),
+ BPF_STX_MEM(BPF_W, R1, R2, 0),
+ BPF_STX_MEM(BPF_DW, R10, R1, -8),
+ BPF_CALL_REL(BPF_FUNC_get_numa_node_id),
+ BPF_CALL_REL(BPF_FUNC_ktime_get_ns),
+ BPF_CALL_REL(BPF_FUNC_ktime_get_boot_ns),
+ BPF_CALL_REL(BPF_FUNC_ktime_get_coarse_ns),
+ BPF_CALL_REL(BPF_FUNC_jiffies64),
+ BPF_CALL_REL(BPF_FUNC_test_func),
+ BPF_LDX_MEM(BPF_DW, R1, R10, -8),
+ BPF_ALU32_REG(BPF_MOV, R0, R1),
+ TAIL_CALL(0),
+ BPF_EXIT_INSN(),
+ },
+ .stack_depth = 8,
+ .flags = FLAG_NEED_STATE | FLAG_RESULT_IN_STATE,
+ .result = (MAX_TAIL_CALL_CNT + 1) * MAX_TESTRUNS,
+ },
+ {
+ "Tail call error path, NULL target",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, R2, R1, 0),
+ BPF_ALU64_IMM(BPF_ADD, R2, 1),
+ BPF_STX_MEM(BPF_W, R1, R2, 0),
+ TAIL_CALL(TAIL_CALL_NULL),
+ BPF_EXIT_INSN(),
+ },
+ .flags = FLAG_NEED_STATE | FLAG_RESULT_IN_STATE,
+ .result = MAX_TESTRUNS,
+ },
+ {
+ "Tail call error path, index out of range",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, R2, R1, 0),
+ BPF_ALU64_IMM(BPF_ADD, R2, 1),
+ BPF_STX_MEM(BPF_W, R1, R2, 0),
+ TAIL_CALL(TAIL_CALL_INVALID),
+ BPF_EXIT_INSN(),
+ },
+ .flags = FLAG_NEED_STATE | FLAG_RESULT_IN_STATE,
+ .result = MAX_TESTRUNS,
+ },
+};
+
+static void __init destroy_tail_call_tests(struct bpf_array *progs)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(tail_call_tests); i++)
+ if (progs->ptrs[i])
+ bpf_prog_free(progs->ptrs[i]);
+ kfree(progs);
+}
+
+static __init int prepare_tail_call_tests(struct bpf_array **pprogs)
+{
+ int ntests = ARRAY_SIZE(tail_call_tests);
+ struct bpf_array *progs;
+ int which, err;
+
+ /* Allocate the table of programs to be used for tail calls */
+ progs = kzalloc(struct_size(progs, ptrs, ntests + 1), GFP_KERNEL);
+ if (!progs)
+ goto out_nomem;
+
+ /* Create all eBPF programs and populate the table */
+ for (which = 0; which < ntests; which++) {
+ struct tail_call_test *test = &tail_call_tests[which];
+ struct bpf_prog *fp;
+ int len, i;
+
+ /* Compute the number of program instructions */
+ for (len = 0; len < MAX_INSNS; len++) {
+ struct bpf_insn *insn = &test->insns[len];
+
+ if (len < MAX_INSNS - 1 &&
+ insn->code == (BPF_LD | BPF_DW | BPF_IMM))
+ len++;
+ if (insn->code == 0)
+ break;
+ }
+
+ /* Allocate and initialize the program */
+ fp = bpf_prog_alloc(bpf_prog_size(len), 0);
+ if (!fp)
+ goto out_nomem;
+
+ fp->len = len;
+ fp->type = BPF_PROG_TYPE_SOCKET_FILTER;
+ fp->aux->stack_depth = test->stack_depth;
+ memcpy(fp->insnsi, test->insns, len * sizeof(struct bpf_insn));
+
+ /* Relocate runtime tail call offsets and addresses */
+ for (i = 0; i < len; i++) {
+ struct bpf_insn *insn = &fp->insnsi[i];
+ long addr = 0;
+
+ switch (insn->code) {
+ case BPF_LD | BPF_DW | BPF_IMM:
+ if (insn->imm != TAIL_CALL_MARKER)
+ break;
+ insn[0].imm = (u32)(long)progs;
+ insn[1].imm = ((u64)(long)progs) >> 32;
+ break;
+
+ case BPF_ALU | BPF_MOV | BPF_K:
+ if (insn->imm != TAIL_CALL_MARKER)
+ break;
+ if (insn->off == TAIL_CALL_NULL)
+ insn->imm = ntests;
+ else if (insn->off == TAIL_CALL_INVALID)
+ insn->imm = ntests + 1;
+ else
+ insn->imm = which + insn->off;
+ insn->off = 0;
+ break;
+
+ case BPF_JMP | BPF_CALL:
+ if (insn->src_reg != BPF_PSEUDO_CALL)
+ break;
+ switch (insn->imm) {
+ case BPF_FUNC_get_numa_node_id:
+ addr = (long)&numa_node_id;
+ break;
+ case BPF_FUNC_ktime_get_ns:
+ addr = (long)&ktime_get_ns;
+ break;
+ case BPF_FUNC_ktime_get_boot_ns:
+ addr = (long)&ktime_get_boot_fast_ns;
+ break;
+ case BPF_FUNC_ktime_get_coarse_ns:
+ addr = (long)&ktime_get_coarse_ns;
+ break;
+ case BPF_FUNC_jiffies64:
+ addr = (long)&get_jiffies_64;
+ break;
+ case BPF_FUNC_test_func:
+ addr = (long)&bpf_test_func;
+ break;
+ default:
+ err = -EFAULT;
+ goto out_err;
+ }
+ *insn = BPF_EMIT_CALL(addr);
+ if ((long)__bpf_call_base + insn->imm != addr)
+ *insn = BPF_JMP_A(0); /* Skip: NOP */
+ break;
+ }
+ }
+
+ fp = bpf_prog_select_runtime(fp, &err);
+ if (err)
+ goto out_err;
+
+ progs->ptrs[which] = fp;
+ }
+
+ /* The last entry contains a NULL program pointer */
+ progs->map.max_entries = ntests + 1;
+ *pprogs = progs;
+ return 0;
+
+out_nomem:
+ err = -ENOMEM;
+
+out_err:
+ if (progs)
+ destroy_tail_call_tests(progs);
+ return err;
+}
+
+static __init int test_tail_calls(struct bpf_array *progs)
+{
+ int i, err_cnt = 0, pass_cnt = 0;
+ int jit_cnt = 0, run_cnt = 0;
+
+ for (i = 0; i < ARRAY_SIZE(tail_call_tests); i++) {
+ struct tail_call_test *test = &tail_call_tests[i];
+ struct bpf_prog *fp = progs->ptrs[i];
+ int *data = NULL;
+ int state = 0;
+ u64 duration;
+ int ret;
+
+ cond_resched();
+ if (exclude_test(i))
+ continue;
+
+ pr_info("#%d %s ", i, test->descr);
+ if (!fp) {
+ err_cnt++;
+ continue;
+ }
+ pr_cont("jited:%u ", fp->jited);
+
+ run_cnt++;
+ if (fp->jited)
+ jit_cnt++;
+
+ if (test->flags & FLAG_NEED_STATE)
+ data = &state;
+ ret = __run_one(fp, data, MAX_TESTRUNS, &duration);
+ if (test->flags & FLAG_RESULT_IN_STATE)
+ ret = state;
+ if (ret == test->result) {
+ pr_cont("%lld PASS", duration);
+ pass_cnt++;
+ } else {
+ pr_cont("ret %d != %d FAIL", ret, test->result);
+ err_cnt++;
+ }
+ }
+
+ pr_info("%s: Summary: %d PASSED, %d FAILED, [%d/%d JIT'ed]\n",
+ __func__, pass_cnt, err_cnt, jit_cnt, run_cnt);
+
+ return err_cnt ? -EINVAL : 0;
+}
+
+static char test_suite[32];
+module_param_string(test_suite, test_suite, sizeof(test_suite), 0);
+
+static __init int find_test_index(const char *test_name)
+{
+ int i;
+
+ if (!strcmp(test_suite, "test_bpf")) {
+ for (i = 0; i < ARRAY_SIZE(tests); i++) {
+ if (!strcmp(tests[i].descr, test_name))
+ return i;
+ }
+ }
+
+ if (!strcmp(test_suite, "test_tail_calls")) {
+ for (i = 0; i < ARRAY_SIZE(tail_call_tests); i++) {
+ if (!strcmp(tail_call_tests[i].descr, test_name))
+ return i;
+ }
+ }
+
+ if (!strcmp(test_suite, "test_skb_segment")) {
+ for (i = 0; i < ARRAY_SIZE(skb_segment_tests); i++) {
+ if (!strcmp(skb_segment_tests[i].descr, test_name))
+ return i;
+ }
+ }
+
+ return -1;
+}
+
+static __init int prepare_test_range(void)
+{
+ int valid_range;
+
+ if (!strcmp(test_suite, "test_bpf"))
+ valid_range = ARRAY_SIZE(tests);
+ else if (!strcmp(test_suite, "test_tail_calls"))
+ valid_range = ARRAY_SIZE(tail_call_tests);
+ else if (!strcmp(test_suite, "test_skb_segment"))
+ valid_range = ARRAY_SIZE(skb_segment_tests);
+ else
+ return 0;
+
+ if (test_id >= 0) {
+ /*
+ * if a test_id was specified, use test_range to
+ * cover only that test.
+ */
+ if (test_id >= valid_range) {
+ pr_err("test_bpf: invalid test_id specified for '%s' suite.\n",
+ test_suite);
+ return -EINVAL;
+ }
+
+ test_range[0] = test_id;
+ test_range[1] = test_id;
+ } else if (*test_name) {
+ /*
+ * if a test_name was specified, find it and setup
+ * test_range to cover only that test.
+ */
+ int idx = find_test_index(test_name);
+
+ if (idx < 0) {
+ pr_err("test_bpf: no test named '%s' found for '%s' suite.\n",
+ test_name, test_suite);
+ return -EINVAL;
+ }
+ test_range[0] = idx;
+ test_range[1] = idx;
+ } else if (test_range[0] != 0 || test_range[1] != INT_MAX) {
+ /*
+ * check that the supplied test_range is valid.
+ */
+ if (test_range[0] < 0 || test_range[1] >= valid_range) {
+ pr_err("test_bpf: test_range is out of bound for '%s' suite.\n",
+ test_suite);
+ return -EINVAL;
+ }
+
+ if (test_range[1] < test_range[0]) {
+ pr_err("test_bpf: test_range is ending before it starts.\n");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
static int __init test_bpf_init(void)
{
+ struct bpf_array *progs = NULL;
int ret;
- ret = prepare_bpf_tests();
+ if (strlen(test_suite) &&
+ strcmp(test_suite, "test_bpf") &&
+ strcmp(test_suite, "test_tail_calls") &&
+ strcmp(test_suite, "test_skb_segment")) {
+ pr_err("test_bpf: invalid test_suite '%s' specified.\n", test_suite);
+ return -EINVAL;
+ }
+
+ /*
+ * if test_suite is not specified, but test_id, test_name or test_range
+ * is specified, set 'test_bpf' as the default test suite.
+ */
+ if (!strlen(test_suite) &&
+ (test_id != -1 || strlen(test_name) ||
+ (test_range[0] != 0 || test_range[1] != INT_MAX))) {
+ pr_info("test_bpf: set 'test_bpf' as the default test_suite.\n");
+ strscpy(test_suite, "test_bpf", sizeof(test_suite));
+ }
+
+ ret = prepare_test_range();
if (ret < 0)
return ret;
- ret = test_bpf();
- destroy_bpf_tests();
- if (ret)
- return ret;
+ if (!strlen(test_suite) || !strcmp(test_suite, "test_bpf")) {
+ ret = test_bpf();
+ if (ret)
+ return ret;
+ }
- return test_skb_segment();
+ if (!strlen(test_suite) || !strcmp(test_suite, "test_tail_calls")) {
+ ret = prepare_tail_call_tests(&progs);
+ if (ret)
+ return ret;
+ ret = test_tail_calls(progs);
+ destroy_tail_call_tests(progs);
+ if (ret)
+ return ret;
+ }
+
+ if (!strlen(test_suite) || !strcmp(test_suite, "test_skb_segment"))
+ return test_skb_segment();
+
+ return 0;
}
static void __exit test_bpf_exit(void)
diff --git a/lib/test_dynamic_debug.c b/lib/test_dynamic_debug.c
new file mode 100644
index 000000000000..8dd250ad022b
--- /dev/null
+++ b/lib/test_dynamic_debug.c
@@ -0,0 +1,165 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Kernel module for testing dynamic_debug
+ *
+ * Authors:
+ * Jim Cromie <jim.cromie@gmail.com>
+ */
+
+#define pr_fmt(fmt) "test_dd: " fmt
+
+#include <linux/module.h>
+
+/* run tests by reading or writing sysfs node: do_prints */
+
+static void do_prints(void); /* device under test */
+static int param_set_do_prints(const char *instr, const struct kernel_param *kp)
+{
+ do_prints();
+ return 0;
+}
+static int param_get_do_prints(char *buffer, const struct kernel_param *kp)
+{
+ do_prints();
+ return scnprintf(buffer, PAGE_SIZE, "did do_prints\n");
+}
+static const struct kernel_param_ops param_ops_do_prints = {
+ .set = param_set_do_prints,
+ .get = param_get_do_prints,
+};
+module_param_cb(do_prints, &param_ops_do_prints, NULL, 0600);
+
+/*
+ * Using the CLASSMAP api:
+ * - classmaps must have corresponding enum
+ * - enum symbols must match/correlate with class-name strings in the map.
+ * - base must equal enum's 1st value
+ * - multiple maps must set their base to share the 0-30 class_id space !!
+ * (build-bug-on tips welcome)
+ * Additionally, here:
+ * - tie together sysname, mapname, bitsname, flagsname
+ */
+#define DD_SYS_WRAP(_model, _flags) \
+ static unsigned long bits_##_model; \
+ static struct ddebug_class_param _flags##_model = { \
+ .bits = &bits_##_model, \
+ .flags = #_flags, \
+ .map = &map_##_model, \
+ }; \
+ module_param_cb(_flags##_##_model, &param_ops_dyndbg_classes, &_flags##_model, 0600)
+
+/* numeric input, independent bits */
+enum cat_disjoint_bits {
+ D2_CORE = 0,
+ D2_DRIVER,
+ D2_KMS,
+ D2_PRIME,
+ D2_ATOMIC,
+ D2_VBL,
+ D2_STATE,
+ D2_LEASE,
+ D2_DP,
+ D2_DRMRES };
+DECLARE_DYNDBG_CLASSMAP(map_disjoint_bits, DD_CLASS_TYPE_DISJOINT_BITS, 0,
+ "D2_CORE",
+ "D2_DRIVER",
+ "D2_KMS",
+ "D2_PRIME",
+ "D2_ATOMIC",
+ "D2_VBL",
+ "D2_STATE",
+ "D2_LEASE",
+ "D2_DP",
+ "D2_DRMRES");
+DD_SYS_WRAP(disjoint_bits, p);
+DD_SYS_WRAP(disjoint_bits, T);
+
+/* symbolic input, independent bits */
+enum cat_disjoint_names { LOW = 11, MID, HI };
+DECLARE_DYNDBG_CLASSMAP(map_disjoint_names, DD_CLASS_TYPE_DISJOINT_NAMES, 10,
+ "LOW", "MID", "HI");
+DD_SYS_WRAP(disjoint_names, p);
+DD_SYS_WRAP(disjoint_names, T);
+
+/* numeric verbosity, V2 > V1 related */
+enum cat_level_num { V0 = 14, V1, V2, V3, V4, V5, V6, V7 };
+DECLARE_DYNDBG_CLASSMAP(map_level_num, DD_CLASS_TYPE_LEVEL_NUM, 14,
+ "V0", "V1", "V2", "V3", "V4", "V5", "V6", "V7");
+DD_SYS_WRAP(level_num, p);
+DD_SYS_WRAP(level_num, T);
+
+/* symbolic verbosity */
+enum cat_level_names { L0 = 22, L1, L2, L3, L4, L5, L6, L7 };
+DECLARE_DYNDBG_CLASSMAP(map_level_names, DD_CLASS_TYPE_LEVEL_NAMES, 22,
+ "L0", "L1", "L2", "L3", "L4", "L5", "L6", "L7");
+DD_SYS_WRAP(level_names, p);
+DD_SYS_WRAP(level_names, T);
+
+/* stand-in for all pr_debug etc */
+#define prdbg(SYM) __pr_debug_cls(SYM, #SYM " msg\n")
+
+static void do_cats(void)
+{
+ pr_debug("doing categories\n");
+
+ prdbg(LOW);
+ prdbg(MID);
+ prdbg(HI);
+
+ prdbg(D2_CORE);
+ prdbg(D2_DRIVER);
+ prdbg(D2_KMS);
+ prdbg(D2_PRIME);
+ prdbg(D2_ATOMIC);
+ prdbg(D2_VBL);
+ prdbg(D2_STATE);
+ prdbg(D2_LEASE);
+ prdbg(D2_DP);
+ prdbg(D2_DRMRES);
+}
+
+static void do_levels(void)
+{
+ pr_debug("doing levels\n");
+
+ prdbg(V1);
+ prdbg(V2);
+ prdbg(V3);
+ prdbg(V4);
+ prdbg(V5);
+ prdbg(V6);
+ prdbg(V7);
+
+ prdbg(L1);
+ prdbg(L2);
+ prdbg(L3);
+ prdbg(L4);
+ prdbg(L5);
+ prdbg(L6);
+ prdbg(L7);
+}
+
+static void do_prints(void)
+{
+ do_cats();
+ do_levels();
+}
+
+static int __init test_dynamic_debug_init(void)
+{
+ pr_debug("init start\n");
+ do_prints();
+ pr_debug("init done\n");
+ return 0;
+}
+
+static void __exit test_dynamic_debug_exit(void)
+{
+ pr_debug("exited\n");
+}
+
+module_init(test_dynamic_debug_init);
+module_exit(test_dynamic_debug_exit);
+
+MODULE_AUTHOR("Jim Cromie <jim.cromie@gmail.com>");
+MODULE_LICENSE("GPL");
diff --git a/lib/test_firmware.c b/lib/test_firmware.c
index 6ca97a63b3d6..9cfdcd6d21db 100644
--- a/lib/test_firmware.c
+++ b/lib/test_firmware.c
@@ -18,17 +18,26 @@
#include <linux/device.h>
#include <linux/fs.h>
#include <linux/miscdevice.h>
+#include <linux/sizes.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <linux/delay.h>
+#include <linux/kstrtox.h>
#include <linux/kthread.h>
#include <linux/vmalloc.h>
+#include <linux/efi_embedded_fw.h>
+
+MODULE_IMPORT_NS(TEST_FIRMWARE);
#define TEST_FIRMWARE_NAME "test-firmware.bin"
#define TEST_FIRMWARE_NUM_REQS 4
+#define TEST_FIRMWARE_BUF_SIZE SZ_1K
+#define TEST_UPLOAD_MAX_SIZE SZ_2K
+#define TEST_UPLOAD_BLK_SIZE 37 /* Avoid powers of two in testing */
static DEFINE_MUTEX(test_fw_mutex);
static const struct firmware *test_firmware;
+static LIST_HEAD(test_upload_list);
struct test_batched_req {
u8 idx;
@@ -36,15 +45,21 @@ struct test_batched_req {
bool sent;
const struct firmware *fw;
const char *name;
+ const char *fw_buf;
struct completion completion;
struct task_struct *task;
struct device *dev;
};
/**
- * test_config - represents configuration for the test for different triggers
+ * struct test_config - represents configuration for the test for different triggers
*
* @name: the name of the firmware file to look for
+ * @into_buf: when the into_buf is used if this is true
+ * request_firmware_into_buf() will be used instead.
+ * @buf_size: size of buf to allocate when into_buf is true
+ * @file_offset: file offset to request when calling request_firmware_into_buf
+ * @partial: partial read opt when calling request_firmware_into_buf
* @sync_direct: when the sync trigger is used if this is true
* request_firmware_direct() will be used instead.
* @send_uevent: whether or not to send a uevent for async requests
@@ -53,6 +68,7 @@ struct test_batched_req {
* @reqs: stores all requests information
* @read_fw_idx: index of thread from which we want to read firmware results
* from through the read_fw trigger.
+ * @upload_name: firmware name to be used with upload_read sysfs node
* @test_result: a test may use this to collect the result from the call
* of the request_firmware*() calls used in their tests. In order of
* priority we always keep first any setup error. If no setup errors were
@@ -83,10 +99,15 @@ struct test_batched_req {
*/
struct test_config {
char *name;
+ bool into_buf;
+ size_t buf_size;
+ size_t file_offset;
+ bool partial;
bool sync_direct;
bool send_uevent;
u8 num_requests;
u8 read_fw_idx;
+ char *upload_name;
/*
* These below don't belong her but we'll move them once we create
@@ -98,8 +119,34 @@ struct test_config {
struct device *device);
};
+struct upload_inject_err {
+ const char *prog;
+ enum fw_upload_err err_code;
+};
+
+struct test_firmware_upload {
+ char *name;
+ struct list_head node;
+ char *buf;
+ size_t size;
+ bool cancel_request;
+ struct upload_inject_err inject;
+ struct fw_upload *fwl;
+};
+
static struct test_config *test_fw_config;
+static struct test_firmware_upload *upload_lookup_name(const char *name)
+{
+ struct test_firmware_upload *tst;
+
+ list_for_each_entry(tst, &test_upload_list, node)
+ if (strncmp(name, tst->name, strlen(tst->name)) == 0)
+ return tst;
+
+ return NULL;
+}
+
static ssize_t test_fw_misc_read(struct file *f, char __user *buf,
size_t size, loff_t *offset)
{
@@ -129,8 +176,14 @@ static void __test_release_all_firmware(void)
for (i = 0; i < test_fw_config->num_requests; i++) {
req = &test_fw_config->reqs[i];
- if (req->fw)
+ if (req->fw) {
+ if (req->fw_buf) {
+ kfree_const(req->fw_buf);
+ req->fw_buf = NULL;
+ }
release_firmware(req->fw);
+ req->fw = NULL;
+ }
}
vfree(test_fw_config->reqs);
@@ -161,7 +214,7 @@ static int __kstrncpy(char **dst, const char *name, size_t count, gfp_t gfp)
{
*dst = kstrndup(name, count, gfp);
if (!*dst)
- return -ENOSPC;
+ return -ENOMEM;
return count;
}
@@ -176,10 +229,15 @@ static int __test_firmware_config_init(void)
test_fw_config->num_requests = TEST_FIRMWARE_NUM_REQS;
test_fw_config->send_uevent = true;
+ test_fw_config->into_buf = false;
+ test_fw_config->buf_size = TEST_FIRMWARE_BUF_SIZE;
+ test_fw_config->file_offset = 0;
+ test_fw_config->partial = false;
test_fw_config->sync_direct = false;
test_fw_config->req_firmware = request_firmware;
test_fw_config->test_result = 0;
test_fw_config->reqs = NULL;
+ test_fw_config->upload_name = NULL;
return 0;
@@ -229,26 +287,43 @@ static ssize_t config_show(struct device *dev,
dev_name(dev));
if (test_fw_config->name)
- len += scnprintf(buf+len, PAGE_SIZE - len,
+ len += scnprintf(buf + len, PAGE_SIZE - len,
"name:\t%s\n",
test_fw_config->name);
else
- len += scnprintf(buf+len, PAGE_SIZE - len,
- "name:\tEMTPY\n");
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "name:\tEMPTY\n");
- len += scnprintf(buf+len, PAGE_SIZE - len,
+ len += scnprintf(buf + len, PAGE_SIZE - len,
"num_requests:\t%u\n", test_fw_config->num_requests);
- len += scnprintf(buf+len, PAGE_SIZE - len,
+ len += scnprintf(buf + len, PAGE_SIZE - len,
"send_uevent:\t\t%s\n",
test_fw_config->send_uevent ?
- "FW_ACTION_HOTPLUG" :
- "FW_ACTION_NOHOTPLUG");
- len += scnprintf(buf+len, PAGE_SIZE - len,
+ "FW_ACTION_UEVENT" :
+ "FW_ACTION_NOUEVENT");
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "into_buf:\t\t%s\n",
+ test_fw_config->into_buf ? "true" : "false");
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "buf_size:\t%zu\n", test_fw_config->buf_size);
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "file_offset:\t%zu\n", test_fw_config->file_offset);
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "partial:\t\t%s\n",
+ test_fw_config->partial ? "true" : "false");
+ len += scnprintf(buf + len, PAGE_SIZE - len,
"sync_direct:\t\t%s\n",
test_fw_config->sync_direct ? "true" : "false");
- len += scnprintf(buf+len, PAGE_SIZE - len,
+ len += scnprintf(buf + len, PAGE_SIZE - len,
"read_fw_idx:\t%u\n", test_fw_config->read_fw_idx);
+ if (test_fw_config->upload_name)
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "upload_name:\t%s\n",
+ test_fw_config->upload_name);
+ else
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "upload_name:\tEMPTY\n");
mutex_unlock(&test_fw_mutex);
@@ -285,46 +360,40 @@ static ssize_t config_test_show_str(char *dst,
return len;
}
-static int test_dev_config_update_bool(const char *buf, size_t size,
+static inline int __test_dev_config_update_bool(const char *buf, size_t size,
bool *cfg)
{
int ret;
- mutex_lock(&test_fw_mutex);
- if (strtobool(buf, cfg) < 0)
+ if (kstrtobool(buf, cfg) < 0)
ret = -EINVAL;
else
ret = size;
- mutex_unlock(&test_fw_mutex);
return ret;
}
-static ssize_t
-test_dev_config_show_bool(char *buf,
- bool config)
+static int test_dev_config_update_bool(const char *buf, size_t size,
+ bool *cfg)
{
- bool val;
+ int ret;
mutex_lock(&test_fw_mutex);
- val = config;
+ ret = __test_dev_config_update_bool(buf, size, cfg);
mutex_unlock(&test_fw_mutex);
- return snprintf(buf, PAGE_SIZE, "%d\n", val);
+ return ret;
}
-static ssize_t test_dev_config_show_int(char *buf, int cfg)
+static ssize_t test_dev_config_show_bool(char *buf, bool val)
{
- int val;
-
- mutex_lock(&test_fw_mutex);
- val = cfg;
- mutex_unlock(&test_fw_mutex);
-
return snprintf(buf, PAGE_SIZE, "%d\n", val);
}
-static int test_dev_config_update_u8(const char *buf, size_t size, u8 *cfg)
+static int __test_dev_config_update_size_t(
+ const char *buf,
+ size_t size,
+ size_t *cfg)
{
int ret;
long new;
@@ -333,25 +402,50 @@ static int test_dev_config_update_u8(const char *buf, size_t size, u8 *cfg)
if (ret)
return ret;
- if (new > U8_MAX)
- return -EINVAL;
-
- mutex_lock(&test_fw_mutex);
- *(u8 *)cfg = new;
- mutex_unlock(&test_fw_mutex);
+ *(size_t *)cfg = new;
/* Always return full write size even if we didn't consume all */
return size;
}
-static ssize_t test_dev_config_show_u8(char *buf, u8 cfg)
+static ssize_t test_dev_config_show_size_t(char *buf, size_t val)
+{
+ return snprintf(buf, PAGE_SIZE, "%zu\n", val);
+}
+
+static ssize_t test_dev_config_show_int(char *buf, int val)
+{
+ return snprintf(buf, PAGE_SIZE, "%d\n", val);
+}
+
+static int __test_dev_config_update_u8(const char *buf, size_t size, u8 *cfg)
{
u8 val;
+ int ret;
+
+ ret = kstrtou8(buf, 10, &val);
+ if (ret)
+ return ret;
+
+ *(u8 *)cfg = val;
+
+ /* Always return full write size even if we didn't consume all */
+ return size;
+}
+
+static int test_dev_config_update_u8(const char *buf, size_t size, u8 *cfg)
+{
+ int ret;
mutex_lock(&test_fw_mutex);
- val = cfg;
+ ret = __test_dev_config_update_u8(buf, size, cfg);
mutex_unlock(&test_fw_mutex);
+ return ret;
+}
+
+static ssize_t test_dev_config_show_u8(char *buf, u8 val)
+{
return snprintf(buf, PAGE_SIZE, "%u\n", val);
}
@@ -363,6 +457,32 @@ static ssize_t config_name_show(struct device *dev,
}
static DEVICE_ATTR_RW(config_name);
+static ssize_t config_upload_name_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct test_firmware_upload *tst;
+ int ret = count;
+
+ mutex_lock(&test_fw_mutex);
+ tst = upload_lookup_name(buf);
+ if (tst)
+ test_fw_config->upload_name = tst->name;
+ else
+ ret = -EINVAL;
+ mutex_unlock(&test_fw_mutex);
+
+ return ret;
+}
+
+static ssize_t config_upload_name_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return config_test_show_str(buf, test_fw_config->upload_name);
+}
+static DEVICE_ATTR_RW(config_upload_name);
+
static ssize_t config_num_requests_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
@@ -376,10 +496,10 @@ static ssize_t config_num_requests_store(struct device *dev,
mutex_unlock(&test_fw_mutex);
goto out;
}
- mutex_unlock(&test_fw_mutex);
- rc = test_dev_config_update_u8(buf, count,
- &test_fw_config->num_requests);
+ rc = __test_dev_config_update_u8(buf, count,
+ &test_fw_config->num_requests);
+ mutex_unlock(&test_fw_mutex);
out:
return rc;
@@ -393,6 +513,100 @@ static ssize_t config_num_requests_show(struct device *dev,
}
static DEVICE_ATTR_RW(config_num_requests);
+static ssize_t config_into_buf_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ return test_dev_config_update_bool(buf,
+ count,
+ &test_fw_config->into_buf);
+}
+
+static ssize_t config_into_buf_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return test_dev_config_show_bool(buf, test_fw_config->into_buf);
+}
+static DEVICE_ATTR_RW(config_into_buf);
+
+static ssize_t config_buf_size_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int rc;
+
+ mutex_lock(&test_fw_mutex);
+ if (test_fw_config->reqs) {
+ pr_err("Must call release_all_firmware prior to changing config\n");
+ rc = -EINVAL;
+ mutex_unlock(&test_fw_mutex);
+ goto out;
+ }
+
+ rc = __test_dev_config_update_size_t(buf, count,
+ &test_fw_config->buf_size);
+ mutex_unlock(&test_fw_mutex);
+
+out:
+ return rc;
+}
+
+static ssize_t config_buf_size_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return test_dev_config_show_size_t(buf, test_fw_config->buf_size);
+}
+static DEVICE_ATTR_RW(config_buf_size);
+
+static ssize_t config_file_offset_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int rc;
+
+ mutex_lock(&test_fw_mutex);
+ if (test_fw_config->reqs) {
+ pr_err("Must call release_all_firmware prior to changing config\n");
+ rc = -EINVAL;
+ mutex_unlock(&test_fw_mutex);
+ goto out;
+ }
+
+ rc = __test_dev_config_update_size_t(buf, count,
+ &test_fw_config->file_offset);
+ mutex_unlock(&test_fw_mutex);
+
+out:
+ return rc;
+}
+
+static ssize_t config_file_offset_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return test_dev_config_show_size_t(buf, test_fw_config->file_offset);
+}
+static DEVICE_ATTR_RW(config_file_offset);
+
+static ssize_t config_partial_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ return test_dev_config_update_bool(buf,
+ count,
+ &test_fw_config->partial);
+}
+
+static ssize_t config_partial_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return test_dev_config_show_bool(buf, test_fw_config->partial);
+}
+static DEVICE_ATTR_RW(config_partial);
+
static ssize_t config_sync_direct_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
@@ -457,12 +671,14 @@ static ssize_t trigger_request_store(struct device *dev,
name = kstrndup(buf, count, GFP_KERNEL);
if (!name)
- return -ENOSPC;
+ return -ENOMEM;
pr_info("loading '%s'\n", name);
mutex_lock(&test_fw_mutex);
release_firmware(test_firmware);
+ if (test_fw_config->reqs)
+ __test_release_all_firmware();
test_firmware = NULL;
rc = request_firmware(&test_firmware, name, dev);
if (rc) {
@@ -481,6 +697,64 @@ out:
}
static DEVICE_ATTR_WO(trigger_request);
+#ifdef CONFIG_EFI_EMBEDDED_FIRMWARE
+extern struct list_head efi_embedded_fw_list;
+extern bool efi_embedded_fw_checked;
+
+static ssize_t trigger_request_platform_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ static const u8 test_data[] = {
+ 0x55, 0xaa, 0x55, 0xaa, 0x01, 0x02, 0x03, 0x04,
+ 0x55, 0xaa, 0x55, 0xaa, 0x05, 0x06, 0x07, 0x08,
+ 0x55, 0xaa, 0x55, 0xaa, 0x10, 0x20, 0x30, 0x40,
+ 0x55, 0xaa, 0x55, 0xaa, 0x50, 0x60, 0x70, 0x80
+ };
+ struct efi_embedded_fw efi_embedded_fw;
+ const struct firmware *firmware = NULL;
+ bool saved_efi_embedded_fw_checked;
+ char *name;
+ int rc;
+
+ name = kstrndup(buf, count, GFP_KERNEL);
+ if (!name)
+ return -ENOMEM;
+
+ pr_info("inserting test platform fw '%s'\n", name);
+ efi_embedded_fw.name = name;
+ efi_embedded_fw.data = (void *)test_data;
+ efi_embedded_fw.length = sizeof(test_data);
+ list_add(&efi_embedded_fw.list, &efi_embedded_fw_list);
+ saved_efi_embedded_fw_checked = efi_embedded_fw_checked;
+ efi_embedded_fw_checked = true;
+
+ pr_info("loading '%s'\n", name);
+ rc = firmware_request_platform(&firmware, name, dev);
+ if (rc) {
+ pr_info("load of '%s' failed: %d\n", name, rc);
+ goto out;
+ }
+ if (firmware->size != sizeof(test_data) ||
+ memcmp(firmware->data, test_data, sizeof(test_data)) != 0) {
+ pr_info("firmware contents mismatch for '%s'\n", name);
+ rc = -EINVAL;
+ goto out;
+ }
+ pr_info("loaded: %zu\n", firmware->size);
+ rc = count;
+
+out:
+ efi_embedded_fw_checked = saved_efi_embedded_fw_checked;
+ release_firmware(firmware);
+ list_del(&efi_embedded_fw.list);
+ kfree(name);
+
+ return rc;
+}
+static DEVICE_ATTR_WO(trigger_request_platform);
+#endif
+
static DECLARE_COMPLETION(async_fw_done);
static void trigger_async_request_cb(const struct firmware *fw, void *context)
@@ -498,13 +772,15 @@ static ssize_t trigger_async_request_store(struct device *dev,
name = kstrndup(buf, count, GFP_KERNEL);
if (!name)
- return -ENOSPC;
+ return -ENOMEM;
pr_info("loading '%s'\n", name);
mutex_lock(&test_fw_mutex);
release_firmware(test_firmware);
test_firmware = NULL;
+ if (test_fw_config->reqs)
+ __test_release_all_firmware();
rc = request_firmware_nowait(THIS_MODULE, 1, name, dev, GFP_KERNEL,
NULL, trigger_async_request_cb);
if (rc) {
@@ -522,7 +798,7 @@ static ssize_t trigger_async_request_store(struct device *dev,
rc = count;
} else {
pr_err("failed to async load firmware\n");
- rc = -ENODEV;
+ rc = -ENOMEM;
}
out:
@@ -541,14 +817,16 @@ static ssize_t trigger_custom_fallback_store(struct device *dev,
name = kstrndup(buf, count, GFP_KERNEL);
if (!name)
- return -ENOSPC;
+ return -ENOMEM;
pr_info("loading '%s' using custom fallback mechanism\n", name);
mutex_lock(&test_fw_mutex);
release_firmware(test_firmware);
+ if (test_fw_config->reqs)
+ __test_release_all_firmware();
test_firmware = NULL;
- rc = request_firmware_nowait(THIS_MODULE, FW_ACTION_NOHOTPLUG, name,
+ rc = request_firmware_nowait(THIS_MODULE, FW_ACTION_NOUEVENT, name,
dev, GFP_KERNEL, NULL,
trigger_async_request_cb);
if (rc) {
@@ -585,7 +863,38 @@ static int test_fw_run_batch_request(void *data)
return -EINVAL;
}
- req->rc = test_fw_config->req_firmware(&req->fw, req->name, req->dev);
+ if (test_fw_config->into_buf) {
+ void *test_buf;
+
+ test_buf = kzalloc(TEST_FIRMWARE_BUF_SIZE, GFP_KERNEL);
+ if (!test_buf)
+ return -ENOMEM;
+
+ if (test_fw_config->partial)
+ req->rc = request_partial_firmware_into_buf
+ (&req->fw,
+ req->name,
+ req->dev,
+ test_buf,
+ test_fw_config->buf_size,
+ test_fw_config->file_offset);
+ else
+ req->rc = request_firmware_into_buf
+ (&req->fw,
+ req->name,
+ req->dev,
+ test_buf,
+ test_fw_config->buf_size);
+ if (!req->fw)
+ kfree(test_buf);
+ else
+ req->fw_buf = test_buf;
+ } else {
+ req->rc = test_fw_config->req_firmware(&req->fw,
+ req->name,
+ req->dev);
+ }
+
if (req->rc) {
pr_info("#%u: batched sync load failed: %d\n",
req->idx, req->rc);
@@ -619,6 +928,11 @@ static ssize_t trigger_batched_requests_store(struct device *dev,
mutex_lock(&test_fw_mutex);
+ if (test_fw_config->reqs) {
+ rc = -EBUSY;
+ goto out_bail;
+ }
+
test_fw_config->reqs =
vzalloc(array3_size(sizeof(struct test_batched_req),
test_fw_config->num_requests, 2));
@@ -635,6 +949,7 @@ static ssize_t trigger_batched_requests_store(struct device *dev,
req->fw = NULL;
req->idx = i;
req->name = test_fw_config->name;
+ req->fw_buf = NULL;
req->dev = dev;
init_completion(&req->completion);
req->task = kthread_run(test_fw_run_batch_request, req,
@@ -717,6 +1032,11 @@ ssize_t trigger_batched_requests_async_store(struct device *dev,
mutex_lock(&test_fw_mutex);
+ if (test_fw_config->reqs) {
+ rc = -EBUSY;
+ goto out_bail;
+ }
+
test_fw_config->reqs =
vzalloc(array3_size(sizeof(struct test_batched_req),
test_fw_config->num_requests, 2));
@@ -728,12 +1048,13 @@ ssize_t trigger_batched_requests_async_store(struct device *dev,
pr_info("batched loading '%s' custom fallback mechanism %u times\n",
test_fw_config->name, test_fw_config->num_requests);
- send_uevent = test_fw_config->send_uevent ? FW_ACTION_HOTPLUG :
- FW_ACTION_NOHOTPLUG;
+ send_uevent = test_fw_config->send_uevent ? FW_ACTION_UEVENT :
+ FW_ACTION_NOUEVENT;
for (i = 0; i < test_fw_config->num_requests; i++) {
req = &test_fw_config->reqs[i];
req->name = test_fw_config->name;
+ req->fw_buf = NULL;
req->fw = NULL;
req->idx = i;
init_completion(&req->completion);
@@ -779,6 +1100,279 @@ out:
}
static DEVICE_ATTR_WO(trigger_batched_requests_async);
+static void upload_release(struct test_firmware_upload *tst)
+{
+ firmware_upload_unregister(tst->fwl);
+ kfree(tst->buf);
+ kfree(tst->name);
+ kfree(tst);
+}
+
+static void upload_release_all(void)
+{
+ struct test_firmware_upload *tst, *tmp;
+
+ list_for_each_entry_safe(tst, tmp, &test_upload_list, node) {
+ list_del(&tst->node);
+ upload_release(tst);
+ }
+ test_fw_config->upload_name = NULL;
+}
+
+/*
+ * This table is replicated from .../firmware_loader/sysfs_upload.c
+ * and needs to be kept in sync.
+ */
+static const char * const fw_upload_err_str[] = {
+ [FW_UPLOAD_ERR_NONE] = "none",
+ [FW_UPLOAD_ERR_HW_ERROR] = "hw-error",
+ [FW_UPLOAD_ERR_TIMEOUT] = "timeout",
+ [FW_UPLOAD_ERR_CANCELED] = "user-abort",
+ [FW_UPLOAD_ERR_BUSY] = "device-busy",
+ [FW_UPLOAD_ERR_INVALID_SIZE] = "invalid-file-size",
+ [FW_UPLOAD_ERR_RW_ERROR] = "read-write-error",
+ [FW_UPLOAD_ERR_WEAROUT] = "flash-wearout",
+ [FW_UPLOAD_ERR_FW_INVALID] = "firmware-invalid",
+};
+
+static void upload_err_inject_error(struct test_firmware_upload *tst,
+ const u8 *p, const char *prog)
+{
+ enum fw_upload_err err;
+
+ for (err = FW_UPLOAD_ERR_NONE + 1; err < FW_UPLOAD_ERR_MAX; err++) {
+ if (strncmp(p, fw_upload_err_str[err],
+ strlen(fw_upload_err_str[err])) == 0) {
+ tst->inject.prog = prog;
+ tst->inject.err_code = err;
+ return;
+ }
+ }
+}
+
+static void upload_err_inject_prog(struct test_firmware_upload *tst,
+ const u8 *p)
+{
+ static const char * const progs[] = {
+ "preparing:", "transferring:", "programming:"
+ };
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(progs); i++) {
+ if (strncmp(p, progs[i], strlen(progs[i])) == 0) {
+ upload_err_inject_error(tst, p + strlen(progs[i]),
+ progs[i]);
+ return;
+ }
+ }
+}
+
+#define FIVE_MINUTES_MS (5 * 60 * 1000)
+static enum fw_upload_err
+fw_upload_wait_on_cancel(struct test_firmware_upload *tst)
+{
+ int ms_delay;
+
+ for (ms_delay = 0; ms_delay < FIVE_MINUTES_MS; ms_delay += 100) {
+ msleep(100);
+ if (tst->cancel_request)
+ return FW_UPLOAD_ERR_CANCELED;
+ }
+ return FW_UPLOAD_ERR_NONE;
+}
+
+static enum fw_upload_err test_fw_upload_prepare(struct fw_upload *fwl,
+ const u8 *data, u32 size)
+{
+ struct test_firmware_upload *tst = fwl->dd_handle;
+ enum fw_upload_err ret = FW_UPLOAD_ERR_NONE;
+ const char *progress = "preparing:";
+
+ tst->cancel_request = false;
+
+ if (!size || size > TEST_UPLOAD_MAX_SIZE) {
+ ret = FW_UPLOAD_ERR_INVALID_SIZE;
+ goto err_out;
+ }
+
+ if (strncmp(data, "inject:", strlen("inject:")) == 0)
+ upload_err_inject_prog(tst, data + strlen("inject:"));
+
+ memset(tst->buf, 0, TEST_UPLOAD_MAX_SIZE);
+ tst->size = size;
+
+ if (tst->inject.err_code == FW_UPLOAD_ERR_NONE ||
+ strncmp(tst->inject.prog, progress, strlen(progress)) != 0)
+ return FW_UPLOAD_ERR_NONE;
+
+ if (tst->inject.err_code == FW_UPLOAD_ERR_CANCELED)
+ ret = fw_upload_wait_on_cancel(tst);
+ else
+ ret = tst->inject.err_code;
+
+err_out:
+ /*
+ * The cleanup op only executes if the prepare op succeeds.
+ * If the prepare op fails, it must do it's own clean-up.
+ */
+ tst->inject.err_code = FW_UPLOAD_ERR_NONE;
+ tst->inject.prog = NULL;
+
+ return ret;
+}
+
+static enum fw_upload_err test_fw_upload_write(struct fw_upload *fwl,
+ const u8 *data, u32 offset,
+ u32 size, u32 *written)
+{
+ struct test_firmware_upload *tst = fwl->dd_handle;
+ const char *progress = "transferring:";
+ u32 blk_size;
+
+ if (tst->cancel_request)
+ return FW_UPLOAD_ERR_CANCELED;
+
+ blk_size = min_t(u32, TEST_UPLOAD_BLK_SIZE, size);
+ memcpy(tst->buf + offset, data + offset, blk_size);
+
+ *written = blk_size;
+
+ if (tst->inject.err_code == FW_UPLOAD_ERR_NONE ||
+ strncmp(tst->inject.prog, progress, strlen(progress)) != 0)
+ return FW_UPLOAD_ERR_NONE;
+
+ if (tst->inject.err_code == FW_UPLOAD_ERR_CANCELED)
+ return fw_upload_wait_on_cancel(tst);
+
+ return tst->inject.err_code;
+}
+
+static enum fw_upload_err test_fw_upload_complete(struct fw_upload *fwl)
+{
+ struct test_firmware_upload *tst = fwl->dd_handle;
+ const char *progress = "programming:";
+
+ if (tst->cancel_request)
+ return FW_UPLOAD_ERR_CANCELED;
+
+ if (tst->inject.err_code == FW_UPLOAD_ERR_NONE ||
+ strncmp(tst->inject.prog, progress, strlen(progress)) != 0)
+ return FW_UPLOAD_ERR_NONE;
+
+ if (tst->inject.err_code == FW_UPLOAD_ERR_CANCELED)
+ return fw_upload_wait_on_cancel(tst);
+
+ return tst->inject.err_code;
+}
+
+static void test_fw_upload_cancel(struct fw_upload *fwl)
+{
+ struct test_firmware_upload *tst = fwl->dd_handle;
+
+ tst->cancel_request = true;
+}
+
+static void test_fw_cleanup(struct fw_upload *fwl)
+{
+ struct test_firmware_upload *tst = fwl->dd_handle;
+
+ tst->inject.err_code = FW_UPLOAD_ERR_NONE;
+ tst->inject.prog = NULL;
+}
+
+static const struct fw_upload_ops upload_test_ops = {
+ .prepare = test_fw_upload_prepare,
+ .write = test_fw_upload_write,
+ .poll_complete = test_fw_upload_complete,
+ .cancel = test_fw_upload_cancel,
+ .cleanup = test_fw_cleanup
+};
+
+static ssize_t upload_register_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct test_firmware_upload *tst;
+ struct fw_upload *fwl;
+ char *name;
+ int ret;
+
+ name = kstrndup(buf, count, GFP_KERNEL);
+ if (!name)
+ return -ENOMEM;
+
+ mutex_lock(&test_fw_mutex);
+ tst = upload_lookup_name(name);
+ if (tst) {
+ ret = -EEXIST;
+ goto free_name;
+ }
+
+ tst = kzalloc(sizeof(*tst), GFP_KERNEL);
+ if (!tst) {
+ ret = -ENOMEM;
+ goto free_name;
+ }
+
+ tst->name = name;
+ tst->buf = kzalloc(TEST_UPLOAD_MAX_SIZE, GFP_KERNEL);
+ if (!tst->buf) {
+ ret = -ENOMEM;
+ goto free_tst;
+ }
+
+ fwl = firmware_upload_register(THIS_MODULE, dev, tst->name,
+ &upload_test_ops, tst);
+ if (IS_ERR(fwl)) {
+ ret = PTR_ERR(fwl);
+ goto free_buf;
+ }
+
+ tst->fwl = fwl;
+ list_add_tail(&tst->node, &test_upload_list);
+ mutex_unlock(&test_fw_mutex);
+ return count;
+
+free_buf:
+ kfree(tst->buf);
+
+free_tst:
+ kfree(tst);
+
+free_name:
+ mutex_unlock(&test_fw_mutex);
+ kfree(name);
+
+ return ret;
+}
+static DEVICE_ATTR_WO(upload_register);
+
+static ssize_t upload_unregister_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct test_firmware_upload *tst;
+ int ret = count;
+
+ mutex_lock(&test_fw_mutex);
+ tst = upload_lookup_name(buf);
+ if (!tst) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (test_fw_config->upload_name == tst->name)
+ test_fw_config->upload_name = NULL;
+
+ list_del(&tst->node);
+ upload_release(tst);
+
+out:
+ mutex_unlock(&test_fw_mutex);
+ return ret;
+}
+static DEVICE_ATTR_WO(upload_unregister);
+
static ssize_t test_result_show(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -841,6 +1435,45 @@ out:
}
static DEVICE_ATTR_RO(read_firmware);
+static ssize_t upload_read_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct test_firmware_upload *tst = NULL;
+ struct test_firmware_upload *tst_iter;
+ int ret = -EINVAL;
+
+ if (!test_fw_config->upload_name) {
+ pr_err("Set config_upload_name before using upload_read\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&test_fw_mutex);
+ list_for_each_entry(tst_iter, &test_upload_list, node)
+ if (tst_iter->name == test_fw_config->upload_name) {
+ tst = tst_iter;
+ break;
+ }
+
+ if (!tst) {
+ pr_err("Firmware name not found: %s\n",
+ test_fw_config->upload_name);
+ goto out;
+ }
+
+ if (tst->size > PAGE_SIZE) {
+ pr_err("Testing interface must use PAGE_SIZE firmware for now\n");
+ goto out;
+ }
+
+ memcpy(buf, tst->buf, tst->size);
+ ret = tst->size;
+out:
+ mutex_unlock(&test_fw_mutex);
+ return ret;
+}
+static DEVICE_ATTR_RO(upload_read);
+
#define TEST_FW_DEV_ATTR(name) &dev_attr_##name.attr
static struct attribute *test_dev_attrs[] = {
@@ -849,14 +1482,22 @@ static struct attribute *test_dev_attrs[] = {
TEST_FW_DEV_ATTR(config),
TEST_FW_DEV_ATTR(config_name),
TEST_FW_DEV_ATTR(config_num_requests),
+ TEST_FW_DEV_ATTR(config_into_buf),
+ TEST_FW_DEV_ATTR(config_buf_size),
+ TEST_FW_DEV_ATTR(config_file_offset),
+ TEST_FW_DEV_ATTR(config_partial),
TEST_FW_DEV_ATTR(config_sync_direct),
TEST_FW_DEV_ATTR(config_send_uevent),
TEST_FW_DEV_ATTR(config_read_fw_idx),
+ TEST_FW_DEV_ATTR(config_upload_name),
/* These don't use the config at all - they could be ported! */
TEST_FW_DEV_ATTR(trigger_request),
TEST_FW_DEV_ATTR(trigger_async_request),
TEST_FW_DEV_ATTR(trigger_custom_fallback),
+#ifdef CONFIG_EFI_EMBEDDED_FIRMWARE
+ TEST_FW_DEV_ATTR(trigger_request_platform),
+#endif
/* These use the config and can use the test_result */
TEST_FW_DEV_ATTR(trigger_batched_requests),
@@ -865,6 +1506,9 @@ static struct attribute *test_dev_attrs[] = {
TEST_FW_DEV_ATTR(release_all_firmware),
TEST_FW_DEV_ATTR(test_result),
TEST_FW_DEV_ATTR(read_firmware),
+ TEST_FW_DEV_ATTR(upload_read),
+ TEST_FW_DEV_ATTR(upload_register),
+ TEST_FW_DEV_ATTR(upload_unregister),
NULL,
};
@@ -894,6 +1538,7 @@ static int __init test_firmware_init(void)
rc = misc_register(&test_fw_misc_device);
if (rc) {
+ __test_firmware_config_free();
kfree(test_fw_config);
pr_err("could not register misc device: %d\n", rc);
return rc;
@@ -911,6 +1556,7 @@ static void __exit test_firmware_exit(void)
mutex_lock(&test_fw_mutex);
release_firmware(test_firmware);
misc_deregister(&test_fw_misc_device);
+ upload_release_all();
__test_firmware_config_free();
kfree(test_fw_config);
mutex_unlock(&test_fw_mutex);
diff --git a/lib/test_fortify/read_overflow-memchr.c b/lib/test_fortify/read_overflow-memchr.c
new file mode 100644
index 000000000000..2743084b32af
--- /dev/null
+++ b/lib/test_fortify/read_overflow-memchr.c
@@ -0,0 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#define TEST \
+ memchr(small, 0x7A, sizeof(small) + 1)
+
+#include "test_fortify.h"
diff --git a/lib/test_fortify/read_overflow-memchr_inv.c b/lib/test_fortify/read_overflow-memchr_inv.c
new file mode 100644
index 000000000000..b26e1f1bc217
--- /dev/null
+++ b/lib/test_fortify/read_overflow-memchr_inv.c
@@ -0,0 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#define TEST \
+ memchr_inv(small, 0x7A, sizeof(small) + 1)
+
+#include "test_fortify.h"
diff --git a/lib/test_fortify/read_overflow-memcmp.c b/lib/test_fortify/read_overflow-memcmp.c
new file mode 100644
index 000000000000..d5d301ff64ef
--- /dev/null
+++ b/lib/test_fortify/read_overflow-memcmp.c
@@ -0,0 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#define TEST \
+ memcmp(small, large, sizeof(small) + 1)
+
+#include "test_fortify.h"
diff --git a/lib/test_fortify/read_overflow-memscan.c b/lib/test_fortify/read_overflow-memscan.c
new file mode 100644
index 000000000000..c1a97f2df0f0
--- /dev/null
+++ b/lib/test_fortify/read_overflow-memscan.c
@@ -0,0 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#define TEST \
+ memscan(small, 0x7A, sizeof(small) + 1)
+
+#include "test_fortify.h"
diff --git a/lib/test_fortify/read_overflow2-memcmp.c b/lib/test_fortify/read_overflow2-memcmp.c
new file mode 100644
index 000000000000..c6091e640f76
--- /dev/null
+++ b/lib/test_fortify/read_overflow2-memcmp.c
@@ -0,0 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#define TEST \
+ memcmp(large, small, sizeof(small) + 1)
+
+#include "test_fortify.h"
diff --git a/lib/test_fortify/read_overflow2-memcpy.c b/lib/test_fortify/read_overflow2-memcpy.c
new file mode 100644
index 000000000000..07b62e56cf16
--- /dev/null
+++ b/lib/test_fortify/read_overflow2-memcpy.c
@@ -0,0 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#define TEST \
+ memcpy(large, instance.buf, sizeof(large))
+
+#include "test_fortify.h"
diff --git a/lib/test_fortify/read_overflow2-memmove.c b/lib/test_fortify/read_overflow2-memmove.c
new file mode 100644
index 000000000000..34edfab040a3
--- /dev/null
+++ b/lib/test_fortify/read_overflow2-memmove.c
@@ -0,0 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#define TEST \
+ memmove(large, instance.buf, sizeof(large))
+
+#include "test_fortify.h"
diff --git a/lib/test_fortify/read_overflow2_field-memcpy.c b/lib/test_fortify/read_overflow2_field-memcpy.c
new file mode 100644
index 000000000000..de9569266223
--- /dev/null
+++ b/lib/test_fortify/read_overflow2_field-memcpy.c
@@ -0,0 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#define TEST \
+ memcpy(large, instance.buf, sizeof(instance.buf) + 1)
+
+#include "test_fortify.h"
diff --git a/lib/test_fortify/read_overflow2_field-memmove.c b/lib/test_fortify/read_overflow2_field-memmove.c
new file mode 100644
index 000000000000..6cc2724c8f62
--- /dev/null
+++ b/lib/test_fortify/read_overflow2_field-memmove.c
@@ -0,0 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#define TEST \
+ memmove(large, instance.buf, sizeof(instance.buf) + 1)
+
+#include "test_fortify.h"
diff --git a/lib/test_fortify/test_fortify.h b/lib/test_fortify/test_fortify.h
new file mode 100644
index 000000000000..d22664fff197
--- /dev/null
+++ b/lib/test_fortify/test_fortify.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#include <linux/kernel.h>
+#include <linux/printk.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+
+void do_fortify_tests(void);
+
+#define __BUF_SMALL 16
+#define __BUF_LARGE 32
+struct fortify_object {
+ int a;
+ char buf[__BUF_SMALL];
+ int c;
+};
+
+#define LITERAL_SMALL "AAAAAAAAAAAAAAA"
+#define LITERAL_LARGE "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
+const char small_src[__BUF_SMALL] = LITERAL_SMALL;
+const char large_src[__BUF_LARGE] = LITERAL_LARGE;
+
+char small[__BUF_SMALL];
+char large[__BUF_LARGE];
+struct fortify_object instance;
+size_t size;
+
+void do_fortify_tests(void)
+{
+ /* Normal initializations. */
+ memset(&instance, 0x32, sizeof(instance));
+ memset(small, 0xA5, sizeof(small));
+ memset(large, 0x5A, sizeof(large));
+
+ TEST;
+}
diff --git a/lib/test_fortify/write_overflow-memcpy.c b/lib/test_fortify/write_overflow-memcpy.c
new file mode 100644
index 000000000000..3b3984e428fb
--- /dev/null
+++ b/lib/test_fortify/write_overflow-memcpy.c
@@ -0,0 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#define TEST \
+ memcpy(instance.buf, large_src, sizeof(large_src))
+
+#include "test_fortify.h"
diff --git a/lib/test_fortify/write_overflow-memmove.c b/lib/test_fortify/write_overflow-memmove.c
new file mode 100644
index 000000000000..640437c3b3e0
--- /dev/null
+++ b/lib/test_fortify/write_overflow-memmove.c
@@ -0,0 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#define TEST \
+ memmove(instance.buf, large_src, sizeof(large_src))
+
+#include "test_fortify.h"
diff --git a/lib/test_fortify/write_overflow-memset.c b/lib/test_fortify/write_overflow-memset.c
new file mode 100644
index 000000000000..36e34908cfb3
--- /dev/null
+++ b/lib/test_fortify/write_overflow-memset.c
@@ -0,0 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#define TEST \
+ memset(instance.buf, 0x5A, sizeof(large_src))
+
+#include "test_fortify.h"
diff --git a/lib/test_fortify/write_overflow-strcpy-lit.c b/lib/test_fortify/write_overflow-strcpy-lit.c
new file mode 100644
index 000000000000..51effb3e50f9
--- /dev/null
+++ b/lib/test_fortify/write_overflow-strcpy-lit.c
@@ -0,0 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#define TEST \
+ strcpy(small, LITERAL_LARGE)
+
+#include "test_fortify.h"
diff --git a/lib/test_fortify/write_overflow-strcpy.c b/lib/test_fortify/write_overflow-strcpy.c
new file mode 100644
index 000000000000..84f1c56a64c8
--- /dev/null
+++ b/lib/test_fortify/write_overflow-strcpy.c
@@ -0,0 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#define TEST \
+ strcpy(small, large_src)
+
+#include "test_fortify.h"
diff --git a/lib/test_fortify/write_overflow-strncpy-src.c b/lib/test_fortify/write_overflow-strncpy-src.c
new file mode 100644
index 000000000000..8dcfb8c788dd
--- /dev/null
+++ b/lib/test_fortify/write_overflow-strncpy-src.c
@@ -0,0 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#define TEST \
+ strncpy(small, large_src, sizeof(small) + 1)
+
+#include "test_fortify.h"
diff --git a/lib/test_fortify/write_overflow-strncpy.c b/lib/test_fortify/write_overflow-strncpy.c
new file mode 100644
index 000000000000..b85f079c815d
--- /dev/null
+++ b/lib/test_fortify/write_overflow-strncpy.c
@@ -0,0 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#define TEST \
+ strncpy(instance.buf, large_src, sizeof(instance.buf) + 1)
+
+#include "test_fortify.h"
diff --git a/lib/test_fortify/write_overflow-strscpy.c b/lib/test_fortify/write_overflow-strscpy.c
new file mode 100644
index 000000000000..38feddf377dc
--- /dev/null
+++ b/lib/test_fortify/write_overflow-strscpy.c
@@ -0,0 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#define TEST \
+ strscpy(instance.buf, large_src, sizeof(instance.buf) + 1)
+
+#include "test_fortify.h"
diff --git a/lib/test_fortify/write_overflow_field-memcpy.c b/lib/test_fortify/write_overflow_field-memcpy.c
new file mode 100644
index 000000000000..28cc81058dd3
--- /dev/null
+++ b/lib/test_fortify/write_overflow_field-memcpy.c
@@ -0,0 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#define TEST \
+ memcpy(instance.buf, large, sizeof(instance.buf) + 1)
+
+#include "test_fortify.h"
diff --git a/lib/test_fortify/write_overflow_field-memmove.c b/lib/test_fortify/write_overflow_field-memmove.c
new file mode 100644
index 000000000000..377fcf9bb2fd
--- /dev/null
+++ b/lib/test_fortify/write_overflow_field-memmove.c
@@ -0,0 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#define TEST \
+ memmove(instance.buf, large, sizeof(instance.buf) + 1)
+
+#include "test_fortify.h"
diff --git a/lib/test_fortify/write_overflow_field-memset.c b/lib/test_fortify/write_overflow_field-memset.c
new file mode 100644
index 000000000000..2331da26909e
--- /dev/null
+++ b/lib/test_fortify/write_overflow_field-memset.c
@@ -0,0 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#define TEST \
+ memset(instance.buf, 0x42, sizeof(instance.buf) + 1)
+
+#include "test_fortify.h"
diff --git a/lib/test_fprobe.c b/lib/test_fprobe.c
new file mode 100644
index 000000000000..24de0e5ff859
--- /dev/null
+++ b/lib/test_fprobe.c
@@ -0,0 +1,275 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * test_fprobe.c - simple sanity test for fprobe
+ */
+
+#include <linux/kernel.h>
+#include <linux/fprobe.h>
+#include <linux/random.h>
+#include <kunit/test.h>
+
+#define div_factor 3
+
+static struct kunit *current_test;
+
+static u32 rand1, entry_val, exit_val;
+
+/* Use indirect calls to avoid inlining the target functions */
+static u32 (*target)(u32 value);
+static u32 (*target2)(u32 value);
+static u32 (*target_nest)(u32 value, u32 (*nest)(u32));
+static unsigned long target_ip;
+static unsigned long target2_ip;
+static unsigned long target_nest_ip;
+static int entry_return_value;
+
+static noinline u32 fprobe_selftest_target(u32 value)
+{
+ return (value / div_factor);
+}
+
+static noinline u32 fprobe_selftest_target2(u32 value)
+{
+ return (value / div_factor) + 1;
+}
+
+static noinline u32 fprobe_selftest_nest_target(u32 value, u32 (*nest)(u32))
+{
+ return nest(value + 2);
+}
+
+static notrace int fp_entry_handler(struct fprobe *fp, unsigned long ip,
+ unsigned long ret_ip,
+ struct pt_regs *regs, void *data)
+{
+ KUNIT_EXPECT_FALSE(current_test, preemptible());
+ /* This can be called on the fprobe_selftest_target and the fprobe_selftest_target2 */
+ if (ip != target_ip)
+ KUNIT_EXPECT_EQ(current_test, ip, target2_ip);
+ entry_val = (rand1 / div_factor);
+ if (fp->entry_data_size) {
+ KUNIT_EXPECT_NOT_NULL(current_test, data);
+ if (data)
+ *(u32 *)data = entry_val;
+ } else
+ KUNIT_EXPECT_NULL(current_test, data);
+
+ return entry_return_value;
+}
+
+static notrace void fp_exit_handler(struct fprobe *fp, unsigned long ip,
+ unsigned long ret_ip,
+ struct pt_regs *regs, void *data)
+{
+ unsigned long ret = regs_return_value(regs);
+
+ KUNIT_EXPECT_FALSE(current_test, preemptible());
+ if (ip != target_ip) {
+ KUNIT_EXPECT_EQ(current_test, ip, target2_ip);
+ KUNIT_EXPECT_EQ(current_test, ret, (rand1 / div_factor) + 1);
+ } else
+ KUNIT_EXPECT_EQ(current_test, ret, (rand1 / div_factor));
+ KUNIT_EXPECT_EQ(current_test, entry_val, (rand1 / div_factor));
+ exit_val = entry_val + div_factor;
+ if (fp->entry_data_size) {
+ KUNIT_EXPECT_NOT_NULL(current_test, data);
+ if (data)
+ KUNIT_EXPECT_EQ(current_test, *(u32 *)data, entry_val);
+ } else
+ KUNIT_EXPECT_NULL(current_test, data);
+}
+
+static notrace int nest_entry_handler(struct fprobe *fp, unsigned long ip,
+ unsigned long ret_ip,
+ struct pt_regs *regs, void *data)
+{
+ KUNIT_EXPECT_FALSE(current_test, preemptible());
+ return 0;
+}
+
+static notrace void nest_exit_handler(struct fprobe *fp, unsigned long ip,
+ unsigned long ret_ip,
+ struct pt_regs *regs, void *data)
+{
+ KUNIT_EXPECT_FALSE(current_test, preemptible());
+ KUNIT_EXPECT_EQ(current_test, ip, target_nest_ip);
+}
+
+/* Test entry only (no rethook) */
+static void test_fprobe_entry(struct kunit *test)
+{
+ struct fprobe fp_entry = {
+ .entry_handler = fp_entry_handler,
+ };
+
+ current_test = test;
+
+ /* Before register, unregister should be failed. */
+ KUNIT_EXPECT_NE(test, 0, unregister_fprobe(&fp_entry));
+ KUNIT_EXPECT_EQ(test, 0, register_fprobe(&fp_entry, "fprobe_selftest_target*", NULL));
+
+ entry_val = 0;
+ exit_val = 0;
+ target(rand1);
+ KUNIT_EXPECT_NE(test, 0, entry_val);
+ KUNIT_EXPECT_EQ(test, 0, exit_val);
+
+ entry_val = 0;
+ exit_val = 0;
+ target2(rand1);
+ KUNIT_EXPECT_NE(test, 0, entry_val);
+ KUNIT_EXPECT_EQ(test, 0, exit_val);
+
+ KUNIT_EXPECT_EQ(test, 0, unregister_fprobe(&fp_entry));
+}
+
+static void test_fprobe(struct kunit *test)
+{
+ struct fprobe fp = {
+ .entry_handler = fp_entry_handler,
+ .exit_handler = fp_exit_handler,
+ };
+
+ current_test = test;
+ KUNIT_EXPECT_EQ(test, 0, register_fprobe(&fp, "fprobe_selftest_target*", NULL));
+
+ entry_val = 0;
+ exit_val = 0;
+ target(rand1);
+ KUNIT_EXPECT_NE(test, 0, entry_val);
+ KUNIT_EXPECT_EQ(test, entry_val + div_factor, exit_val);
+
+ entry_val = 0;
+ exit_val = 0;
+ target2(rand1);
+ KUNIT_EXPECT_NE(test, 0, entry_val);
+ KUNIT_EXPECT_EQ(test, entry_val + div_factor, exit_val);
+
+ KUNIT_EXPECT_EQ(test, 0, unregister_fprobe(&fp));
+}
+
+static void test_fprobe_syms(struct kunit *test)
+{
+ static const char *syms[] = {"fprobe_selftest_target", "fprobe_selftest_target2"};
+ struct fprobe fp = {
+ .entry_handler = fp_entry_handler,
+ .exit_handler = fp_exit_handler,
+ };
+
+ current_test = test;
+ KUNIT_EXPECT_EQ(test, 0, register_fprobe_syms(&fp, syms, 2));
+
+ entry_val = 0;
+ exit_val = 0;
+ target(rand1);
+ KUNIT_EXPECT_NE(test, 0, entry_val);
+ KUNIT_EXPECT_EQ(test, entry_val + div_factor, exit_val);
+
+ entry_val = 0;
+ exit_val = 0;
+ target2(rand1);
+ KUNIT_EXPECT_NE(test, 0, entry_val);
+ KUNIT_EXPECT_EQ(test, entry_val + div_factor, exit_val);
+
+ KUNIT_EXPECT_EQ(test, 0, unregister_fprobe(&fp));
+}
+
+/* Test private entry_data */
+static void test_fprobe_data(struct kunit *test)
+{
+ struct fprobe fp = {
+ .entry_handler = fp_entry_handler,
+ .exit_handler = fp_exit_handler,
+ .entry_data_size = sizeof(u32),
+ };
+
+ current_test = test;
+ KUNIT_EXPECT_EQ(test, 0, register_fprobe(&fp, "fprobe_selftest_target", NULL));
+
+ target(rand1);
+
+ KUNIT_EXPECT_EQ(test, 0, unregister_fprobe(&fp));
+}
+
+/* Test nr_maxactive */
+static void test_fprobe_nest(struct kunit *test)
+{
+ static const char *syms[] = {"fprobe_selftest_target", "fprobe_selftest_nest_target"};
+ struct fprobe fp = {
+ .entry_handler = nest_entry_handler,
+ .exit_handler = nest_exit_handler,
+ .nr_maxactive = 1,
+ };
+
+ current_test = test;
+ KUNIT_EXPECT_EQ(test, 0, register_fprobe_syms(&fp, syms, 2));
+
+ target_nest(rand1, target);
+ KUNIT_EXPECT_EQ(test, 1, fp.nmissed);
+
+ KUNIT_EXPECT_EQ(test, 0, unregister_fprobe(&fp));
+}
+
+static void test_fprobe_skip(struct kunit *test)
+{
+ struct fprobe fp = {
+ .entry_handler = fp_entry_handler,
+ .exit_handler = fp_exit_handler,
+ };
+
+ current_test = test;
+ KUNIT_EXPECT_EQ(test, 0, register_fprobe(&fp, "fprobe_selftest_target", NULL));
+
+ entry_return_value = 1;
+ entry_val = 0;
+ exit_val = 0;
+ target(rand1);
+ KUNIT_EXPECT_NE(test, 0, entry_val);
+ KUNIT_EXPECT_EQ(test, 0, exit_val);
+ KUNIT_EXPECT_EQ(test, 0, fp.nmissed);
+ entry_return_value = 0;
+
+ KUNIT_EXPECT_EQ(test, 0, unregister_fprobe(&fp));
+}
+
+static unsigned long get_ftrace_location(void *func)
+{
+ unsigned long size, addr = (unsigned long)func;
+
+ if (!kallsyms_lookup_size_offset(addr, &size, NULL) || !size)
+ return 0;
+
+ return ftrace_location_range(addr, addr + size - 1);
+}
+
+static int fprobe_test_init(struct kunit *test)
+{
+ rand1 = get_random_u32_above(div_factor);
+ target = fprobe_selftest_target;
+ target2 = fprobe_selftest_target2;
+ target_nest = fprobe_selftest_nest_target;
+ target_ip = get_ftrace_location(target);
+ target2_ip = get_ftrace_location(target2);
+ target_nest_ip = get_ftrace_location(target_nest);
+
+ return 0;
+}
+
+static struct kunit_case fprobe_testcases[] = {
+ KUNIT_CASE(test_fprobe_entry),
+ KUNIT_CASE(test_fprobe),
+ KUNIT_CASE(test_fprobe_syms),
+ KUNIT_CASE(test_fprobe_data),
+ KUNIT_CASE(test_fprobe_nest),
+ KUNIT_CASE(test_fprobe_skip),
+ {}
+};
+
+static struct kunit_suite fprobe_test_suite = {
+ .name = "fprobe_test",
+ .init = fprobe_test_init,
+ .test_cases = fprobe_testcases,
+};
+
+kunit_test_suites(&fprobe_test_suite);
+
diff --git a/lib/test_fpu.c b/lib/test_fpu.c
new file mode 100644
index 000000000000..e82db19fed84
--- /dev/null
+++ b/lib/test_fpu.c
@@ -0,0 +1,89 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Test cases for using floating point operations inside a kernel module.
+ *
+ * This tests kernel_fpu_begin() and kernel_fpu_end() functions, especially
+ * when userland has modified the floating point control registers. The kernel
+ * state might depend on the state set by the userland thread that was active
+ * before a syscall.
+ *
+ * To facilitate the test, this module registers file
+ * /sys/kernel/debug/selftest_helpers/test_fpu, which when read causes a
+ * sequence of floating point operations. If the operations fail, either the
+ * read returns error status or the kernel crashes.
+ * If the operations succeed, the read returns "1\n".
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/debugfs.h>
+#include <asm/fpu/api.h>
+
+static int test_fpu(void)
+{
+ /*
+ * This sequence of operations tests that rounding mode is
+ * to nearest and that denormal numbers are supported.
+ * Volatile variables are used to avoid compiler optimizing
+ * the calculations away.
+ */
+ volatile double a, b, c, d, e, f, g;
+
+ a = 4.0;
+ b = 1e-15;
+ c = 1e-310;
+
+ /* Sets precision flag */
+ d = a + b;
+
+ /* Result depends on rounding mode */
+ e = a + b / 2;
+
+ /* Denormal and very large values */
+ f = b / c;
+
+ /* Depends on denormal support */
+ g = a + c * f;
+
+ if (d > a && e > a && g > a)
+ return 0;
+ else
+ return -EINVAL;
+}
+
+static int test_fpu_get(void *data, u64 *val)
+{
+ int status = -EINVAL;
+
+ kernel_fpu_begin();
+ status = test_fpu();
+ kernel_fpu_end();
+
+ *val = 1;
+ return status;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(test_fpu_fops, test_fpu_get, NULL, "%lld\n");
+static struct dentry *selftest_dir;
+
+static int __init test_fpu_init(void)
+{
+ selftest_dir = debugfs_create_dir("selftest_helpers", NULL);
+ if (!selftest_dir)
+ return -ENOMEM;
+
+ debugfs_create_file_unsafe("test_fpu", 0444, selftest_dir, NULL,
+ &test_fpu_fops);
+
+ return 0;
+}
+
+static void __exit test_fpu_exit(void)
+{
+ debugfs_remove(selftest_dir);
+}
+
+module_init(test_fpu_init);
+module_exit(test_fpu_exit);
+
+MODULE_LICENSE("GPL");
diff --git a/lib/test_free_pages.c b/lib/test_free_pages.c
new file mode 100644
index 000000000000..9ebf6f5549f3
--- /dev/null
+++ b/lib/test_free_pages.c
@@ -0,0 +1,47 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * test_free_pages.c: Check that free_pages() doesn't leak memory
+ * Copyright (c) 2020 Oracle
+ * Author: Matthew Wilcox <willy@infradead.org>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/gfp.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+
+static void test_free_pages(gfp_t gfp)
+{
+ unsigned int i;
+
+ for (i = 0; i < 1000 * 1000; i++) {
+ unsigned long addr = __get_free_pages(gfp, 3);
+ struct page *page = virt_to_page((void *)addr);
+
+ /* Simulate page cache getting a speculative reference */
+ get_page(page);
+ free_pages(addr, 3);
+ put_page(page);
+ }
+}
+
+static int m_in(void)
+{
+ pr_info("Testing with GFP_KERNEL\n");
+ test_free_pages(GFP_KERNEL);
+ pr_info("Testing with GFP_KERNEL | __GFP_COMP\n");
+ test_free_pages(GFP_KERNEL | __GFP_COMP);
+ pr_info("Test completed\n");
+
+ return 0;
+}
+
+static void m_ex(void)
+{
+}
+
+module_init(m_in);
+module_exit(m_ex);
+MODULE_AUTHOR("Matthew Wilcox <willy@infradead.org>");
+MODULE_LICENSE("GPL");
diff --git a/lib/test_hash.c b/lib/test_hash.c
index 0ee40b4a56dd..bb25fda34794 100644
--- a/lib/test_hash.c
+++ b/lib/test_hash.c
@@ -14,17 +14,15 @@
* and hash_64().
*/
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt "\n"
-
#include <linux/compiler.h>
#include <linux/types.h>
#include <linux/module.h>
#include <linux/hash.h>
#include <linux/stringhash.h>
-#include <linux/printk.h>
+#include <kunit/test.h>
/* 32-bit XORSHIFT generator. Seed must not be zero. */
-static u32 __init __attribute_const__
+static u32 __attribute_const__
xorshift(u32 seed)
{
seed ^= seed << 13;
@@ -34,7 +32,7 @@ xorshift(u32 seed)
}
/* Given a non-zero x, returns a non-zero byte. */
-static u8 __init __attribute_const__
+static u8 __attribute_const__
mod255(u32 x)
{
x = (x & 0xffff) + (x >> 16); /* 1 <= x <= 0x1fffe */
@@ -45,8 +43,7 @@ mod255(u32 x)
}
/* Fill the buffer with non-zero bytes. */
-static void __init
-fill_buf(char *buf, size_t len, u32 seed)
+static void fill_buf(char *buf, size_t len, u32 seed)
{
size_t i;
@@ -56,6 +53,50 @@ fill_buf(char *buf, size_t len, u32 seed)
}
}
+/* Holds most testing variables for the int test. */
+struct test_hash_params {
+ /* Pointer to integer to be hashed. */
+ unsigned long long *h64;
+ /* Low 32-bits of integer to be hashed. */
+ u32 h0;
+ /* Arch-specific hash result. */
+ u32 h1;
+ /* Generic hash result. */
+ u32 h2;
+ /* ORed hashes of given size (in bits). */
+ u32 (*hash_or)[33];
+};
+
+#ifdef HAVE_ARCH__HASH_32
+static void
+test_int__hash_32(struct kunit *test, struct test_hash_params *params)
+{
+ params->hash_or[1][0] |= params->h2 = __hash_32_generic(params->h0);
+#if HAVE_ARCH__HASH_32 == 1
+ KUNIT_EXPECT_EQ_MSG(test, params->h1, params->h2,
+ "__hash_32(%#x) = %#x != __hash_32_generic() = %#x",
+ params->h0, params->h1, params->h2);
+#endif
+}
+#endif
+
+#ifdef HAVE_ARCH_HASH_64
+static void
+test_int_hash_64(struct kunit *test, struct test_hash_params *params, u32 const *m, int *k)
+{
+ params->h2 = hash_64_generic(*params->h64, *k);
+#if HAVE_ARCH_HASH_64 == 1
+ KUNIT_EXPECT_EQ_MSG(test, params->h1, params->h2,
+ "hash_64(%#llx, %d) = %#x != hash_64_generic() = %#x",
+ *params->h64, *k, params->h1, params->h2);
+#else
+ KUNIT_EXPECT_LE_MSG(test, params->h1, params->h2,
+ "hash_64_generic(%#llx, %d) = %#x > %#x",
+ *params->h64, *k, params->h1, *m);
+#endif
+}
+#endif
+
/*
* Test the various integer hash functions. h64 (or its low-order bits)
* is the integer to hash. hash_or accumulates the OR of the hash values,
@@ -65,23 +106,16 @@ fill_buf(char *buf, size_t len, u32 seed)
* inline, the code being tested is actually in the module, and you can
* recompile and re-test the module without rebooting.
*/
-static bool __init
-test_int_hash(unsigned long long h64, u32 hash_or[2][33])
+static void
+test_int_hash(struct kunit *test, unsigned long long h64, u32 hash_or[2][33])
{
int k;
- u32 h0 = (u32)h64, h1, h2;
+ struct test_hash_params params = { &h64, (u32)h64, 0, 0, hash_or };
/* Test __hash32 */
- hash_or[0][0] |= h1 = __hash_32(h0);
+ hash_or[0][0] |= params.h1 = __hash_32(params.h0);
#ifdef HAVE_ARCH__HASH_32
- hash_or[1][0] |= h2 = __hash_32_generic(h0);
-#if HAVE_ARCH__HASH_32 == 1
- if (h1 != h2) {
- pr_err("__hash_32(%#x) = %#x != __hash_32_generic() = %#x",
- h0, h1, h2);
- return false;
- }
-#endif
+ test_int__hash_32(test, &params);
#endif
/* Test k = 1..32 bits */
@@ -89,63 +123,53 @@ test_int_hash(unsigned long long h64, u32 hash_or[2][33])
u32 const m = ((u32)2 << (k-1)) - 1; /* Low k bits set */
/* Test hash_32 */
- hash_or[0][k] |= h1 = hash_32(h0, k);
- if (h1 > m) {
- pr_err("hash_32(%#x, %d) = %#x > %#x", h0, k, h1, m);
- return false;
- }
-#ifdef HAVE_ARCH_HASH_32
- h2 = hash_32_generic(h0, k);
-#if HAVE_ARCH_HASH_32 == 1
- if (h1 != h2) {
- pr_err("hash_32(%#x, %d) = %#x != hash_32_generic() "
- " = %#x", h0, k, h1, h2);
- return false;
- }
-#else
- if (h2 > m) {
- pr_err("hash_32_generic(%#x, %d) = %#x > %#x",
- h0, k, h1, m);
- return false;
- }
-#endif
-#endif
+ hash_or[0][k] |= params.h1 = hash_32(params.h0, k);
+ KUNIT_EXPECT_LE_MSG(test, params.h1, m,
+ "hash_32(%#x, %d) = %#x > %#x",
+ params.h0, k, params.h1, m);
+
/* Test hash_64 */
- hash_or[1][k] |= h1 = hash_64(h64, k);
- if (h1 > m) {
- pr_err("hash_64(%#llx, %d) = %#x > %#x", h64, k, h1, m);
- return false;
- }
+ hash_or[1][k] |= params.h1 = hash_64(h64, k);
+ KUNIT_EXPECT_LE_MSG(test, params.h1, m,
+ "hash_64(%#llx, %d) = %#x > %#x",
+ h64, k, params.h1, m);
#ifdef HAVE_ARCH_HASH_64
- h2 = hash_64_generic(h64, k);
-#if HAVE_ARCH_HASH_64 == 1
- if (h1 != h2) {
- pr_err("hash_64(%#llx, %d) = %#x != hash_64_generic() "
- "= %#x", h64, k, h1, h2);
- return false;
- }
-#else
- if (h2 > m) {
- pr_err("hash_64_generic(%#llx, %d) = %#x > %#x",
- h64, k, h1, m);
- return false;
- }
-#endif
+ test_int_hash_64(test, &params, &m, &k);
#endif
}
-
- (void)h2; /* Suppress unused variable warning */
- return true;
}
#define SIZE 256 /* Run time is cubic in SIZE */
-static int __init
-test_hash_init(void)
+static void test_string_or(struct kunit *test)
{
char buf[SIZE+1];
- u32 string_or = 0, hash_or[2][33] = { { 0, } };
- unsigned tests = 0;
+ u32 string_or = 0;
+ int i, j;
+
+ fill_buf(buf, SIZE, 1);
+
+ /* Test every possible non-empty substring in the buffer. */
+ for (j = SIZE; j > 0; --j) {
+ buf[j] = '\0';
+
+ for (i = 0; i <= j; i++) {
+ u32 h0 = full_name_hash(buf+i, buf+i, j-i);
+
+ string_or |= h0;
+ } /* i */
+ } /* j */
+
+ /* The OR of all the hash values should cover all the bits */
+ KUNIT_EXPECT_EQ_MSG(test, string_or, -1u,
+ "OR of all string hash results = %#x != %#x",
+ string_or, -1u);
+}
+
+static void test_hash_or(struct kunit *test)
+{
+ char buf[SIZE+1];
+ u32 hash_or[2][33] = { { 0, } };
unsigned long long h64 = 0;
int i, j;
@@ -160,46 +184,27 @@ test_hash_init(void)
u32 h0 = full_name_hash(buf+i, buf+i, j-i);
/* Check that hashlen_string gets the length right */
- if (hashlen_len(hashlen) != j-i) {
- pr_err("hashlen_string(%d..%d) returned length"
- " %u, expected %d",
- i, j, hashlen_len(hashlen), j-i);
- return -EINVAL;
- }
+ KUNIT_EXPECT_EQ_MSG(test, hashlen_len(hashlen), j-i,
+ "hashlen_string(%d..%d) returned length %u, expected %d",
+ i, j, hashlen_len(hashlen), j-i);
/* Check that the hashes match */
- if (hashlen_hash(hashlen) != h0) {
- pr_err("hashlen_string(%d..%d) = %08x != "
- "full_name_hash() = %08x",
- i, j, hashlen_hash(hashlen), h0);
- return -EINVAL;
- }
+ KUNIT_EXPECT_EQ_MSG(test, hashlen_hash(hashlen), h0,
+ "hashlen_string(%d..%d) = %08x != full_name_hash() = %08x",
+ i, j, hashlen_hash(hashlen), h0);
- string_or |= h0;
h64 = h64 << 32 | h0; /* For use with hash_64 */
- if (!test_int_hash(h64, hash_or))
- return -EINVAL;
- tests++;
+ test_int_hash(test, h64, hash_or);
} /* i */
} /* j */
- /* The OR of all the hash values should cover all the bits */
- if (~string_or) {
- pr_err("OR of all string hash results = %#x != %#x",
- string_or, -1u);
- return -EINVAL;
- }
- if (~hash_or[0][0]) {
- pr_err("OR of all __hash_32 results = %#x != %#x",
- hash_or[0][0], -1u);
- return -EINVAL;
- }
+ KUNIT_EXPECT_EQ_MSG(test, hash_or[0][0], -1u,
+ "OR of all __hash_32 results = %#x != %#x",
+ hash_or[0][0], -1u);
#ifdef HAVE_ARCH__HASH_32
#if HAVE_ARCH__HASH_32 != 1 /* Test is pointless if results match */
- if (~hash_or[1][0]) {
- pr_err("OR of all __hash_32_generic results = %#x != %#x",
- hash_or[1][0], -1u);
- return -EINVAL;
- }
+ KUNIT_EXPECT_EQ_MSG(test, hash_or[1][0], -1u,
+ "OR of all __hash_32_generic results = %#x != %#x",
+ hash_or[1][0], -1u);
#endif
#endif
@@ -207,51 +212,27 @@ test_hash_init(void)
for (i = 1; i <= 32; i++) {
u32 const m = ((u32)2 << (i-1)) - 1; /* Low i bits set */
- if (hash_or[0][i] != m) {
- pr_err("OR of all hash_32(%d) results = %#x "
- "(%#x expected)", i, hash_or[0][i], m);
- return -EINVAL;
- }
- if (hash_or[1][i] != m) {
- pr_err("OR of all hash_64(%d) results = %#x "
- "(%#x expected)", i, hash_or[1][i], m);
- return -EINVAL;
- }
+ KUNIT_EXPECT_EQ_MSG(test, hash_or[0][i], m,
+ "OR of all hash_32(%d) results = %#x (%#x expected)",
+ i, hash_or[0][i], m);
+ KUNIT_EXPECT_EQ_MSG(test, hash_or[1][i], m,
+ "OR of all hash_64(%d) results = %#x (%#x expected)",
+ i, hash_or[1][i], m);
}
+}
- /* Issue notices about skipped tests. */
-#ifdef HAVE_ARCH__HASH_32
-#if HAVE_ARCH__HASH_32 != 1
- pr_info("__hash_32() is arch-specific; not compared to generic.");
-#endif
-#else
- pr_info("__hash_32() has no arch implementation to test.");
-#endif
-#ifdef HAVE_ARCH_HASH_32
-#if HAVE_ARCH_HASH_32 != 1
- pr_info("hash_32() is arch-specific; not compared to generic.");
-#endif
-#else
- pr_info("hash_32() has no arch implementation to test.");
-#endif
-#ifdef HAVE_ARCH_HASH_64
-#if HAVE_ARCH_HASH_64 != 1
- pr_info("hash_64() is arch-specific; not compared to generic.");
-#endif
-#else
- pr_info("hash_64() has no arch implementation to test.");
-#endif
-
- pr_notice("%u tests passed.", tests);
+static struct kunit_case hash_test_cases[] __refdata = {
+ KUNIT_CASE(test_string_or),
+ KUNIT_CASE(test_hash_or),
+ {}
+};
- return 0;
-}
+static struct kunit_suite hash_test_suite = {
+ .name = "hash",
+ .test_cases = hash_test_cases,
+};
-static void __exit test_hash_exit(void)
-{
-}
-module_init(test_hash_init); /* Does everything */
-module_exit(test_hash_exit); /* Does nothing */
+kunit_test_suite(hash_test_suite);
MODULE_LICENSE("GPL");
diff --git a/lib/test_hexdump.c b/lib/test_hexdump.c
index 5144899d3c6b..b916801f23a8 100644
--- a/lib/test_hexdump.c
+++ b/lib/test_hexdump.c
@@ -149,7 +149,7 @@ static void __init test_hexdump(size_t len, int rowsize, int groupsize,
static void __init test_hexdump_set(int rowsize, bool ascii)
{
size_t d = min_t(size_t, sizeof(data_b), rowsize);
- size_t len = get_random_int() % d + 1;
+ size_t len = get_random_u32_inclusive(1, d);
test_hexdump(len, rowsize, 4, ascii);
test_hexdump(len, rowsize, 2, ascii);
@@ -208,11 +208,11 @@ static void __init test_hexdump_overflow(size_t buflen, size_t len,
static void __init test_hexdump_overflow_set(size_t buflen, bool ascii)
{
unsigned int i = 0;
- int rs = (get_random_int() % 2 + 1) * 16;
+ int rs = get_random_u32_inclusive(1, 2) * 16;
do {
int gs = 1 << i;
- size_t len = get_random_int() % rs + gs;
+ size_t len = get_random_u32_below(rs) + gs;
test_hexdump_overflow(buflen, rounddown(len, gs), rs, gs, ascii);
} while (i++ < 3);
@@ -223,11 +223,11 @@ static int __init test_hexdump_init(void)
unsigned int i;
int rowsize;
- rowsize = (get_random_int() % 2 + 1) * 16;
+ rowsize = get_random_u32_inclusive(1, 2) * 16;
for (i = 0; i < 16; i++)
test_hexdump_set(rowsize, false);
- rowsize = (get_random_int() % 2 + 1) * 16;
+ rowsize = get_random_u32_inclusive(1, 2) * 16;
for (i = 0; i < 16; i++)
test_hexdump_set(rowsize, true);
diff --git a/lib/test_hmm.c b/lib/test_hmm.c
new file mode 100644
index 000000000000..717dcb830127
--- /dev/null
+++ b/lib/test_hmm.c
@@ -0,0 +1,1553 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * This is a module to test the HMM (Heterogeneous Memory Management)
+ * mirror and zone device private memory migration APIs of the kernel.
+ * Userspace programs can register with the driver to mirror their own address
+ * space and can use the device to read/write any valid virtual address.
+ */
+#include <linux/init.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/cdev.h>
+#include <linux/device.h>
+#include <linux/memremap.h>
+#include <linux/mutex.h>
+#include <linux/rwsem.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/highmem.h>
+#include <linux/delay.h>
+#include <linux/pagemap.h>
+#include <linux/hmm.h>
+#include <linux/vmalloc.h>
+#include <linux/swap.h>
+#include <linux/swapops.h>
+#include <linux/sched/mm.h>
+#include <linux/platform_device.h>
+#include <linux/rmap.h>
+#include <linux/mmu_notifier.h>
+#include <linux/migrate.h>
+
+#include "test_hmm_uapi.h"
+
+#define DMIRROR_NDEVICES 4
+#define DMIRROR_RANGE_FAULT_TIMEOUT 1000
+#define DEVMEM_CHUNK_SIZE (256 * 1024 * 1024U)
+#define DEVMEM_CHUNKS_RESERVE 16
+
+/*
+ * For device_private pages, dpage is just a dummy struct page
+ * representing a piece of device memory. dmirror_devmem_alloc_page
+ * allocates a real system memory page as backing storage to fake a
+ * real device. zone_device_data points to that backing page. But
+ * for device_coherent memory, the struct page represents real
+ * physical CPU-accessible memory that we can use directly.
+ */
+#define BACKING_PAGE(page) (is_device_private_page((page)) ? \
+ (page)->zone_device_data : (page))
+
+static unsigned long spm_addr_dev0;
+module_param(spm_addr_dev0, long, 0644);
+MODULE_PARM_DESC(spm_addr_dev0,
+ "Specify start address for SPM (special purpose memory) used for device 0. By setting this Coherent device type will be used. Make sure spm_addr_dev1 is set too. Minimum SPM size should be DEVMEM_CHUNK_SIZE.");
+
+static unsigned long spm_addr_dev1;
+module_param(spm_addr_dev1, long, 0644);
+MODULE_PARM_DESC(spm_addr_dev1,
+ "Specify start address for SPM (special purpose memory) used for device 1. By setting this Coherent device type will be used. Make sure spm_addr_dev0 is set too. Minimum SPM size should be DEVMEM_CHUNK_SIZE.");
+
+static const struct dev_pagemap_ops dmirror_devmem_ops;
+static const struct mmu_interval_notifier_ops dmirror_min_ops;
+static dev_t dmirror_dev;
+
+struct dmirror_device;
+
+struct dmirror_bounce {
+ void *ptr;
+ unsigned long size;
+ unsigned long addr;
+ unsigned long cpages;
+};
+
+#define DPT_XA_TAG_ATOMIC 1UL
+#define DPT_XA_TAG_WRITE 3UL
+
+/*
+ * Data structure to track address ranges and register for mmu interval
+ * notifier updates.
+ */
+struct dmirror_interval {
+ struct mmu_interval_notifier notifier;
+ struct dmirror *dmirror;
+};
+
+/*
+ * Data attached to the open device file.
+ * Note that it might be shared after a fork().
+ */
+struct dmirror {
+ struct dmirror_device *mdevice;
+ struct xarray pt;
+ struct mmu_interval_notifier notifier;
+ struct mutex mutex;
+};
+
+/*
+ * ZONE_DEVICE pages for migration and simulating device memory.
+ */
+struct dmirror_chunk {
+ struct dev_pagemap pagemap;
+ struct dmirror_device *mdevice;
+ bool remove;
+};
+
+/*
+ * Per device data.
+ */
+struct dmirror_device {
+ struct cdev cdevice;
+ unsigned int zone_device_type;
+ struct device device;
+
+ unsigned int devmem_capacity;
+ unsigned int devmem_count;
+ struct dmirror_chunk **devmem_chunks;
+ struct mutex devmem_lock; /* protects the above */
+
+ unsigned long calloc;
+ unsigned long cfree;
+ struct page *free_pages;
+ spinlock_t lock; /* protects the above */
+};
+
+static struct dmirror_device dmirror_devices[DMIRROR_NDEVICES];
+
+static int dmirror_bounce_init(struct dmirror_bounce *bounce,
+ unsigned long addr,
+ unsigned long size)
+{
+ bounce->addr = addr;
+ bounce->size = size;
+ bounce->cpages = 0;
+ bounce->ptr = vmalloc(size);
+ if (!bounce->ptr)
+ return -ENOMEM;
+ return 0;
+}
+
+static bool dmirror_is_private_zone(struct dmirror_device *mdevice)
+{
+ return (mdevice->zone_device_type ==
+ HMM_DMIRROR_MEMORY_DEVICE_PRIVATE) ? true : false;
+}
+
+static enum migrate_vma_direction
+dmirror_select_device(struct dmirror *dmirror)
+{
+ return (dmirror->mdevice->zone_device_type ==
+ HMM_DMIRROR_MEMORY_DEVICE_PRIVATE) ?
+ MIGRATE_VMA_SELECT_DEVICE_PRIVATE :
+ MIGRATE_VMA_SELECT_DEVICE_COHERENT;
+}
+
+static void dmirror_bounce_fini(struct dmirror_bounce *bounce)
+{
+ vfree(bounce->ptr);
+}
+
+static int dmirror_fops_open(struct inode *inode, struct file *filp)
+{
+ struct cdev *cdev = inode->i_cdev;
+ struct dmirror *dmirror;
+ int ret;
+
+ /* Mirror this process address space */
+ dmirror = kzalloc(sizeof(*dmirror), GFP_KERNEL);
+ if (dmirror == NULL)
+ return -ENOMEM;
+
+ dmirror->mdevice = container_of(cdev, struct dmirror_device, cdevice);
+ mutex_init(&dmirror->mutex);
+ xa_init(&dmirror->pt);
+
+ ret = mmu_interval_notifier_insert(&dmirror->notifier, current->mm,
+ 0, ULONG_MAX & PAGE_MASK, &dmirror_min_ops);
+ if (ret) {
+ kfree(dmirror);
+ return ret;
+ }
+
+ filp->private_data = dmirror;
+ return 0;
+}
+
+static int dmirror_fops_release(struct inode *inode, struct file *filp)
+{
+ struct dmirror *dmirror = filp->private_data;
+
+ mmu_interval_notifier_remove(&dmirror->notifier);
+ xa_destroy(&dmirror->pt);
+ kfree(dmirror);
+ return 0;
+}
+
+static struct dmirror_chunk *dmirror_page_to_chunk(struct page *page)
+{
+ return container_of(page->pgmap, struct dmirror_chunk, pagemap);
+}
+
+static struct dmirror_device *dmirror_page_to_device(struct page *page)
+
+{
+ return dmirror_page_to_chunk(page)->mdevice;
+}
+
+static int dmirror_do_fault(struct dmirror *dmirror, struct hmm_range *range)
+{
+ unsigned long *pfns = range->hmm_pfns;
+ unsigned long pfn;
+
+ for (pfn = (range->start >> PAGE_SHIFT);
+ pfn < (range->end >> PAGE_SHIFT);
+ pfn++, pfns++) {
+ struct page *page;
+ void *entry;
+
+ /*
+ * Since we asked for hmm_range_fault() to populate pages,
+ * it shouldn't return an error entry on success.
+ */
+ WARN_ON(*pfns & HMM_PFN_ERROR);
+ WARN_ON(!(*pfns & HMM_PFN_VALID));
+
+ page = hmm_pfn_to_page(*pfns);
+ WARN_ON(!page);
+
+ entry = page;
+ if (*pfns & HMM_PFN_WRITE)
+ entry = xa_tag_pointer(entry, DPT_XA_TAG_WRITE);
+ else if (WARN_ON(range->default_flags & HMM_PFN_WRITE))
+ return -EFAULT;
+ entry = xa_store(&dmirror->pt, pfn, entry, GFP_ATOMIC);
+ if (xa_is_err(entry))
+ return xa_err(entry);
+ }
+
+ return 0;
+}
+
+static void dmirror_do_update(struct dmirror *dmirror, unsigned long start,
+ unsigned long end)
+{
+ unsigned long pfn;
+ void *entry;
+
+ /*
+ * The XArray doesn't hold references to pages since it relies on
+ * the mmu notifier to clear page pointers when they become stale.
+ * Therefore, it is OK to just clear the entry.
+ */
+ xa_for_each_range(&dmirror->pt, pfn, entry, start >> PAGE_SHIFT,
+ end >> PAGE_SHIFT)
+ xa_erase(&dmirror->pt, pfn);
+}
+
+static bool dmirror_interval_invalidate(struct mmu_interval_notifier *mni,
+ const struct mmu_notifier_range *range,
+ unsigned long cur_seq)
+{
+ struct dmirror *dmirror = container_of(mni, struct dmirror, notifier);
+
+ /*
+ * Ignore invalidation callbacks for device private pages since
+ * the invalidation is handled as part of the migration process.
+ */
+ if (range->event == MMU_NOTIFY_MIGRATE &&
+ range->owner == dmirror->mdevice)
+ return true;
+
+ if (mmu_notifier_range_blockable(range))
+ mutex_lock(&dmirror->mutex);
+ else if (!mutex_trylock(&dmirror->mutex))
+ return false;
+
+ mmu_interval_set_seq(mni, cur_seq);
+ dmirror_do_update(dmirror, range->start, range->end);
+
+ mutex_unlock(&dmirror->mutex);
+ return true;
+}
+
+static const struct mmu_interval_notifier_ops dmirror_min_ops = {
+ .invalidate = dmirror_interval_invalidate,
+};
+
+static int dmirror_range_fault(struct dmirror *dmirror,
+ struct hmm_range *range)
+{
+ struct mm_struct *mm = dmirror->notifier.mm;
+ unsigned long timeout =
+ jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
+ int ret;
+
+ while (true) {
+ if (time_after(jiffies, timeout)) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ range->notifier_seq = mmu_interval_read_begin(range->notifier);
+ mmap_read_lock(mm);
+ ret = hmm_range_fault(range);
+ mmap_read_unlock(mm);
+ if (ret) {
+ if (ret == -EBUSY)
+ continue;
+ goto out;
+ }
+
+ mutex_lock(&dmirror->mutex);
+ if (mmu_interval_read_retry(range->notifier,
+ range->notifier_seq)) {
+ mutex_unlock(&dmirror->mutex);
+ continue;
+ }
+ break;
+ }
+
+ ret = dmirror_do_fault(dmirror, range);
+
+ mutex_unlock(&dmirror->mutex);
+out:
+ return ret;
+}
+
+static int dmirror_fault(struct dmirror *dmirror, unsigned long start,
+ unsigned long end, bool write)
+{
+ struct mm_struct *mm = dmirror->notifier.mm;
+ unsigned long addr;
+ unsigned long pfns[64];
+ struct hmm_range range = {
+ .notifier = &dmirror->notifier,
+ .hmm_pfns = pfns,
+ .pfn_flags_mask = 0,
+ .default_flags =
+ HMM_PFN_REQ_FAULT | (write ? HMM_PFN_REQ_WRITE : 0),
+ .dev_private_owner = dmirror->mdevice,
+ };
+ int ret = 0;
+
+ /* Since the mm is for the mirrored process, get a reference first. */
+ if (!mmget_not_zero(mm))
+ return 0;
+
+ for (addr = start; addr < end; addr = range.end) {
+ range.start = addr;
+ range.end = min(addr + (ARRAY_SIZE(pfns) << PAGE_SHIFT), end);
+
+ ret = dmirror_range_fault(dmirror, &range);
+ if (ret)
+ break;
+ }
+
+ mmput(mm);
+ return ret;
+}
+
+static int dmirror_do_read(struct dmirror *dmirror, unsigned long start,
+ unsigned long end, struct dmirror_bounce *bounce)
+{
+ unsigned long pfn;
+ void *ptr;
+
+ ptr = bounce->ptr + ((start - bounce->addr) & PAGE_MASK);
+
+ for (pfn = start >> PAGE_SHIFT; pfn < (end >> PAGE_SHIFT); pfn++) {
+ void *entry;
+ struct page *page;
+
+ entry = xa_load(&dmirror->pt, pfn);
+ page = xa_untag_pointer(entry);
+ if (!page)
+ return -ENOENT;
+
+ memcpy_from_page(ptr, page, 0, PAGE_SIZE);
+
+ ptr += PAGE_SIZE;
+ bounce->cpages++;
+ }
+
+ return 0;
+}
+
+static int dmirror_read(struct dmirror *dmirror, struct hmm_dmirror_cmd *cmd)
+{
+ struct dmirror_bounce bounce;
+ unsigned long start, end;
+ unsigned long size = cmd->npages << PAGE_SHIFT;
+ int ret;
+
+ start = cmd->addr;
+ end = start + size;
+ if (end < start)
+ return -EINVAL;
+
+ ret = dmirror_bounce_init(&bounce, start, size);
+ if (ret)
+ return ret;
+
+ while (1) {
+ mutex_lock(&dmirror->mutex);
+ ret = dmirror_do_read(dmirror, start, end, &bounce);
+ mutex_unlock(&dmirror->mutex);
+ if (ret != -ENOENT)
+ break;
+
+ start = cmd->addr + (bounce.cpages << PAGE_SHIFT);
+ ret = dmirror_fault(dmirror, start, end, false);
+ if (ret)
+ break;
+ cmd->faults++;
+ }
+
+ if (ret == 0) {
+ if (copy_to_user(u64_to_user_ptr(cmd->ptr), bounce.ptr,
+ bounce.size))
+ ret = -EFAULT;
+ }
+ cmd->cpages = bounce.cpages;
+ dmirror_bounce_fini(&bounce);
+ return ret;
+}
+
+static int dmirror_do_write(struct dmirror *dmirror, unsigned long start,
+ unsigned long end, struct dmirror_bounce *bounce)
+{
+ unsigned long pfn;
+ void *ptr;
+
+ ptr = bounce->ptr + ((start - bounce->addr) & PAGE_MASK);
+
+ for (pfn = start >> PAGE_SHIFT; pfn < (end >> PAGE_SHIFT); pfn++) {
+ void *entry;
+ struct page *page;
+
+ entry = xa_load(&dmirror->pt, pfn);
+ page = xa_untag_pointer(entry);
+ if (!page || xa_pointer_tag(entry) != DPT_XA_TAG_WRITE)
+ return -ENOENT;
+
+ memcpy_to_page(page, 0, ptr, PAGE_SIZE);
+
+ ptr += PAGE_SIZE;
+ bounce->cpages++;
+ }
+
+ return 0;
+}
+
+static int dmirror_write(struct dmirror *dmirror, struct hmm_dmirror_cmd *cmd)
+{
+ struct dmirror_bounce bounce;
+ unsigned long start, end;
+ unsigned long size = cmd->npages << PAGE_SHIFT;
+ int ret;
+
+ start = cmd->addr;
+ end = start + size;
+ if (end < start)
+ return -EINVAL;
+
+ ret = dmirror_bounce_init(&bounce, start, size);
+ if (ret)
+ return ret;
+ if (copy_from_user(bounce.ptr, u64_to_user_ptr(cmd->ptr),
+ bounce.size)) {
+ ret = -EFAULT;
+ goto fini;
+ }
+
+ while (1) {
+ mutex_lock(&dmirror->mutex);
+ ret = dmirror_do_write(dmirror, start, end, &bounce);
+ mutex_unlock(&dmirror->mutex);
+ if (ret != -ENOENT)
+ break;
+
+ start = cmd->addr + (bounce.cpages << PAGE_SHIFT);
+ ret = dmirror_fault(dmirror, start, end, true);
+ if (ret)
+ break;
+ cmd->faults++;
+ }
+
+fini:
+ cmd->cpages = bounce.cpages;
+ dmirror_bounce_fini(&bounce);
+ return ret;
+}
+
+static int dmirror_allocate_chunk(struct dmirror_device *mdevice,
+ struct page **ppage)
+{
+ struct dmirror_chunk *devmem;
+ struct resource *res = NULL;
+ unsigned long pfn;
+ unsigned long pfn_first;
+ unsigned long pfn_last;
+ void *ptr;
+ int ret = -ENOMEM;
+
+ devmem = kzalloc(sizeof(*devmem), GFP_KERNEL);
+ if (!devmem)
+ return ret;
+
+ switch (mdevice->zone_device_type) {
+ case HMM_DMIRROR_MEMORY_DEVICE_PRIVATE:
+ res = request_free_mem_region(&iomem_resource, DEVMEM_CHUNK_SIZE,
+ "hmm_dmirror");
+ if (IS_ERR_OR_NULL(res))
+ goto err_devmem;
+ devmem->pagemap.range.start = res->start;
+ devmem->pagemap.range.end = res->end;
+ devmem->pagemap.type = MEMORY_DEVICE_PRIVATE;
+ break;
+ case HMM_DMIRROR_MEMORY_DEVICE_COHERENT:
+ devmem->pagemap.range.start = (MINOR(mdevice->cdevice.dev) - 2) ?
+ spm_addr_dev0 :
+ spm_addr_dev1;
+ devmem->pagemap.range.end = devmem->pagemap.range.start +
+ DEVMEM_CHUNK_SIZE - 1;
+ devmem->pagemap.type = MEMORY_DEVICE_COHERENT;
+ break;
+ default:
+ ret = -EINVAL;
+ goto err_devmem;
+ }
+
+ devmem->pagemap.nr_range = 1;
+ devmem->pagemap.ops = &dmirror_devmem_ops;
+ devmem->pagemap.owner = mdevice;
+
+ mutex_lock(&mdevice->devmem_lock);
+
+ if (mdevice->devmem_count == mdevice->devmem_capacity) {
+ struct dmirror_chunk **new_chunks;
+ unsigned int new_capacity;
+
+ new_capacity = mdevice->devmem_capacity +
+ DEVMEM_CHUNKS_RESERVE;
+ new_chunks = krealloc(mdevice->devmem_chunks,
+ sizeof(new_chunks[0]) * new_capacity,
+ GFP_KERNEL);
+ if (!new_chunks)
+ goto err_release;
+ mdevice->devmem_capacity = new_capacity;
+ mdevice->devmem_chunks = new_chunks;
+ }
+ ptr = memremap_pages(&devmem->pagemap, numa_node_id());
+ if (IS_ERR_OR_NULL(ptr)) {
+ if (ptr)
+ ret = PTR_ERR(ptr);
+ else
+ ret = -EFAULT;
+ goto err_release;
+ }
+
+ devmem->mdevice = mdevice;
+ pfn_first = devmem->pagemap.range.start >> PAGE_SHIFT;
+ pfn_last = pfn_first + (range_len(&devmem->pagemap.range) >> PAGE_SHIFT);
+ mdevice->devmem_chunks[mdevice->devmem_count++] = devmem;
+
+ mutex_unlock(&mdevice->devmem_lock);
+
+ pr_info("added new %u MB chunk (total %u chunks, %u MB) PFNs [0x%lx 0x%lx)\n",
+ DEVMEM_CHUNK_SIZE / (1024 * 1024),
+ mdevice->devmem_count,
+ mdevice->devmem_count * (DEVMEM_CHUNK_SIZE / (1024 * 1024)),
+ pfn_first, pfn_last);
+
+ spin_lock(&mdevice->lock);
+ for (pfn = pfn_first; pfn < pfn_last; pfn++) {
+ struct page *page = pfn_to_page(pfn);
+
+ page->zone_device_data = mdevice->free_pages;
+ mdevice->free_pages = page;
+ }
+ if (ppage) {
+ *ppage = mdevice->free_pages;
+ mdevice->free_pages = (*ppage)->zone_device_data;
+ mdevice->calloc++;
+ }
+ spin_unlock(&mdevice->lock);
+
+ return 0;
+
+err_release:
+ mutex_unlock(&mdevice->devmem_lock);
+ if (res && devmem->pagemap.type == MEMORY_DEVICE_PRIVATE)
+ release_mem_region(devmem->pagemap.range.start,
+ range_len(&devmem->pagemap.range));
+err_devmem:
+ kfree(devmem);
+
+ return ret;
+}
+
+static struct page *dmirror_devmem_alloc_page(struct dmirror_device *mdevice)
+{
+ struct page *dpage = NULL;
+ struct page *rpage = NULL;
+
+ /*
+ * For ZONE_DEVICE private type, this is a fake device so we allocate
+ * real system memory to store our device memory.
+ * For ZONE_DEVICE coherent type we use the actual dpage to store the
+ * data and ignore rpage.
+ */
+ if (dmirror_is_private_zone(mdevice)) {
+ rpage = alloc_page(GFP_HIGHUSER);
+ if (!rpage)
+ return NULL;
+ }
+ spin_lock(&mdevice->lock);
+
+ if (mdevice->free_pages) {
+ dpage = mdevice->free_pages;
+ mdevice->free_pages = dpage->zone_device_data;
+ mdevice->calloc++;
+ spin_unlock(&mdevice->lock);
+ } else {
+ spin_unlock(&mdevice->lock);
+ if (dmirror_allocate_chunk(mdevice, &dpage))
+ goto error;
+ }
+
+ zone_device_page_init(dpage);
+ dpage->zone_device_data = rpage;
+ return dpage;
+
+error:
+ if (rpage)
+ __free_page(rpage);
+ return NULL;
+}
+
+static void dmirror_migrate_alloc_and_copy(struct migrate_vma *args,
+ struct dmirror *dmirror)
+{
+ struct dmirror_device *mdevice = dmirror->mdevice;
+ const unsigned long *src = args->src;
+ unsigned long *dst = args->dst;
+ unsigned long addr;
+
+ for (addr = args->start; addr < args->end; addr += PAGE_SIZE,
+ src++, dst++) {
+ struct page *spage;
+ struct page *dpage;
+ struct page *rpage;
+
+ if (!(*src & MIGRATE_PFN_MIGRATE))
+ continue;
+
+ /*
+ * Note that spage might be NULL which is OK since it is an
+ * unallocated pte_none() or read-only zero page.
+ */
+ spage = migrate_pfn_to_page(*src);
+ if (WARN(spage && is_zone_device_page(spage),
+ "page already in device spage pfn: 0x%lx\n",
+ page_to_pfn(spage)))
+ continue;
+
+ dpage = dmirror_devmem_alloc_page(mdevice);
+ if (!dpage)
+ continue;
+
+ rpage = BACKING_PAGE(dpage);
+ if (spage)
+ copy_highpage(rpage, spage);
+ else
+ clear_highpage(rpage);
+
+ /*
+ * Normally, a device would use the page->zone_device_data to
+ * point to the mirror but here we use it to hold the page for
+ * the simulated device memory and that page holds the pointer
+ * to the mirror.
+ */
+ rpage->zone_device_data = dmirror;
+
+ pr_debug("migrating from sys to dev pfn src: 0x%lx pfn dst: 0x%lx\n",
+ page_to_pfn(spage), page_to_pfn(dpage));
+ *dst = migrate_pfn(page_to_pfn(dpage));
+ if ((*src & MIGRATE_PFN_WRITE) ||
+ (!spage && args->vma->vm_flags & VM_WRITE))
+ *dst |= MIGRATE_PFN_WRITE;
+ }
+}
+
+static int dmirror_check_atomic(struct dmirror *dmirror, unsigned long start,
+ unsigned long end)
+{
+ unsigned long pfn;
+
+ for (pfn = start >> PAGE_SHIFT; pfn < (end >> PAGE_SHIFT); pfn++) {
+ void *entry;
+
+ entry = xa_load(&dmirror->pt, pfn);
+ if (xa_pointer_tag(entry) == DPT_XA_TAG_ATOMIC)
+ return -EPERM;
+ }
+
+ return 0;
+}
+
+static int dmirror_atomic_map(unsigned long start, unsigned long end,
+ struct page **pages, struct dmirror *dmirror)
+{
+ unsigned long pfn, mapped = 0;
+ int i;
+
+ /* Map the migrated pages into the device's page tables. */
+ mutex_lock(&dmirror->mutex);
+
+ for (i = 0, pfn = start >> PAGE_SHIFT; pfn < (end >> PAGE_SHIFT); pfn++, i++) {
+ void *entry;
+
+ if (!pages[i])
+ continue;
+
+ entry = pages[i];
+ entry = xa_tag_pointer(entry, DPT_XA_TAG_ATOMIC);
+ entry = xa_store(&dmirror->pt, pfn, entry, GFP_ATOMIC);
+ if (xa_is_err(entry)) {
+ mutex_unlock(&dmirror->mutex);
+ return xa_err(entry);
+ }
+
+ mapped++;
+ }
+
+ mutex_unlock(&dmirror->mutex);
+ return mapped;
+}
+
+static int dmirror_migrate_finalize_and_map(struct migrate_vma *args,
+ struct dmirror *dmirror)
+{
+ unsigned long start = args->start;
+ unsigned long end = args->end;
+ const unsigned long *src = args->src;
+ const unsigned long *dst = args->dst;
+ unsigned long pfn;
+
+ /* Map the migrated pages into the device's page tables. */
+ mutex_lock(&dmirror->mutex);
+
+ for (pfn = start >> PAGE_SHIFT; pfn < (end >> PAGE_SHIFT); pfn++,
+ src++, dst++) {
+ struct page *dpage;
+ void *entry;
+
+ if (!(*src & MIGRATE_PFN_MIGRATE))
+ continue;
+
+ dpage = migrate_pfn_to_page(*dst);
+ if (!dpage)
+ continue;
+
+ entry = BACKING_PAGE(dpage);
+ if (*dst & MIGRATE_PFN_WRITE)
+ entry = xa_tag_pointer(entry, DPT_XA_TAG_WRITE);
+ entry = xa_store(&dmirror->pt, pfn, entry, GFP_ATOMIC);
+ if (xa_is_err(entry)) {
+ mutex_unlock(&dmirror->mutex);
+ return xa_err(entry);
+ }
+ }
+
+ mutex_unlock(&dmirror->mutex);
+ return 0;
+}
+
+static int dmirror_exclusive(struct dmirror *dmirror,
+ struct hmm_dmirror_cmd *cmd)
+{
+ unsigned long start, end, addr;
+ unsigned long size = cmd->npages << PAGE_SHIFT;
+ struct mm_struct *mm = dmirror->notifier.mm;
+ struct page *pages[64];
+ struct dmirror_bounce bounce;
+ unsigned long next;
+ int ret;
+
+ start = cmd->addr;
+ end = start + size;
+ if (end < start)
+ return -EINVAL;
+
+ /* Since the mm is for the mirrored process, get a reference first. */
+ if (!mmget_not_zero(mm))
+ return -EINVAL;
+
+ mmap_read_lock(mm);
+ for (addr = start; addr < end; addr = next) {
+ unsigned long mapped = 0;
+ int i;
+
+ if (end < addr + (ARRAY_SIZE(pages) << PAGE_SHIFT))
+ next = end;
+ else
+ next = addr + (ARRAY_SIZE(pages) << PAGE_SHIFT);
+
+ ret = make_device_exclusive_range(mm, addr, next, pages, NULL);
+ /*
+ * Do dmirror_atomic_map() iff all pages are marked for
+ * exclusive access to avoid accessing uninitialized
+ * fields of pages.
+ */
+ if (ret == (next - addr) >> PAGE_SHIFT)
+ mapped = dmirror_atomic_map(addr, next, pages, dmirror);
+ for (i = 0; i < ret; i++) {
+ if (pages[i]) {
+ unlock_page(pages[i]);
+ put_page(pages[i]);
+ }
+ }
+
+ if (addr + (mapped << PAGE_SHIFT) < next) {
+ mmap_read_unlock(mm);
+ mmput(mm);
+ return -EBUSY;
+ }
+ }
+ mmap_read_unlock(mm);
+ mmput(mm);
+
+ /* Return the migrated data for verification. */
+ ret = dmirror_bounce_init(&bounce, start, size);
+ if (ret)
+ return ret;
+ mutex_lock(&dmirror->mutex);
+ ret = dmirror_do_read(dmirror, start, end, &bounce);
+ mutex_unlock(&dmirror->mutex);
+ if (ret == 0) {
+ if (copy_to_user(u64_to_user_ptr(cmd->ptr), bounce.ptr,
+ bounce.size))
+ ret = -EFAULT;
+ }
+
+ cmd->cpages = bounce.cpages;
+ dmirror_bounce_fini(&bounce);
+ return ret;
+}
+
+static vm_fault_t dmirror_devmem_fault_alloc_and_copy(struct migrate_vma *args,
+ struct dmirror *dmirror)
+{
+ const unsigned long *src = args->src;
+ unsigned long *dst = args->dst;
+ unsigned long start = args->start;
+ unsigned long end = args->end;
+ unsigned long addr;
+
+ for (addr = start; addr < end; addr += PAGE_SIZE,
+ src++, dst++) {
+ struct page *dpage, *spage;
+
+ spage = migrate_pfn_to_page(*src);
+ if (!spage || !(*src & MIGRATE_PFN_MIGRATE))
+ continue;
+
+ if (WARN_ON(!is_device_private_page(spage) &&
+ !is_device_coherent_page(spage)))
+ continue;
+ spage = BACKING_PAGE(spage);
+ dpage = alloc_page_vma(GFP_HIGHUSER_MOVABLE, args->vma, addr);
+ if (!dpage)
+ continue;
+ pr_debug("migrating from dev to sys pfn src: 0x%lx pfn dst: 0x%lx\n",
+ page_to_pfn(spage), page_to_pfn(dpage));
+
+ lock_page(dpage);
+ xa_erase(&dmirror->pt, addr >> PAGE_SHIFT);
+ copy_highpage(dpage, spage);
+ *dst = migrate_pfn(page_to_pfn(dpage));
+ if (*src & MIGRATE_PFN_WRITE)
+ *dst |= MIGRATE_PFN_WRITE;
+ }
+ return 0;
+}
+
+static unsigned long
+dmirror_successful_migrated_pages(struct migrate_vma *migrate)
+{
+ unsigned long cpages = 0;
+ unsigned long i;
+
+ for (i = 0; i < migrate->npages; i++) {
+ if (migrate->src[i] & MIGRATE_PFN_VALID &&
+ migrate->src[i] & MIGRATE_PFN_MIGRATE)
+ cpages++;
+ }
+ return cpages;
+}
+
+static int dmirror_migrate_to_system(struct dmirror *dmirror,
+ struct hmm_dmirror_cmd *cmd)
+{
+ unsigned long start, end, addr;
+ unsigned long size = cmd->npages << PAGE_SHIFT;
+ struct mm_struct *mm = dmirror->notifier.mm;
+ struct vm_area_struct *vma;
+ unsigned long src_pfns[64] = { 0 };
+ unsigned long dst_pfns[64] = { 0 };
+ struct migrate_vma args = { 0 };
+ unsigned long next;
+ int ret;
+
+ start = cmd->addr;
+ end = start + size;
+ if (end < start)
+ return -EINVAL;
+
+ /* Since the mm is for the mirrored process, get a reference first. */
+ if (!mmget_not_zero(mm))
+ return -EINVAL;
+
+ cmd->cpages = 0;
+ mmap_read_lock(mm);
+ for (addr = start; addr < end; addr = next) {
+ vma = vma_lookup(mm, addr);
+ if (!vma || !(vma->vm_flags & VM_READ)) {
+ ret = -EINVAL;
+ goto out;
+ }
+ next = min(end, addr + (ARRAY_SIZE(src_pfns) << PAGE_SHIFT));
+ if (next > vma->vm_end)
+ next = vma->vm_end;
+
+ args.vma = vma;
+ args.src = src_pfns;
+ args.dst = dst_pfns;
+ args.start = addr;
+ args.end = next;
+ args.pgmap_owner = dmirror->mdevice;
+ args.flags = dmirror_select_device(dmirror);
+
+ ret = migrate_vma_setup(&args);
+ if (ret)
+ goto out;
+
+ pr_debug("Migrating from device mem to sys mem\n");
+ dmirror_devmem_fault_alloc_and_copy(&args, dmirror);
+
+ migrate_vma_pages(&args);
+ cmd->cpages += dmirror_successful_migrated_pages(&args);
+ migrate_vma_finalize(&args);
+ }
+out:
+ mmap_read_unlock(mm);
+ mmput(mm);
+
+ return ret;
+}
+
+static int dmirror_migrate_to_device(struct dmirror *dmirror,
+ struct hmm_dmirror_cmd *cmd)
+{
+ unsigned long start, end, addr;
+ unsigned long size = cmd->npages << PAGE_SHIFT;
+ struct mm_struct *mm = dmirror->notifier.mm;
+ struct vm_area_struct *vma;
+ unsigned long src_pfns[64] = { 0 };
+ unsigned long dst_pfns[64] = { 0 };
+ struct dmirror_bounce bounce;
+ struct migrate_vma args = { 0 };
+ unsigned long next;
+ int ret;
+
+ start = cmd->addr;
+ end = start + size;
+ if (end < start)
+ return -EINVAL;
+
+ /* Since the mm is for the mirrored process, get a reference first. */
+ if (!mmget_not_zero(mm))
+ return -EINVAL;
+
+ mmap_read_lock(mm);
+ for (addr = start; addr < end; addr = next) {
+ vma = vma_lookup(mm, addr);
+ if (!vma || !(vma->vm_flags & VM_READ)) {
+ ret = -EINVAL;
+ goto out;
+ }
+ next = min(end, addr + (ARRAY_SIZE(src_pfns) << PAGE_SHIFT));
+ if (next > vma->vm_end)
+ next = vma->vm_end;
+
+ args.vma = vma;
+ args.src = src_pfns;
+ args.dst = dst_pfns;
+ args.start = addr;
+ args.end = next;
+ args.pgmap_owner = dmirror->mdevice;
+ args.flags = MIGRATE_VMA_SELECT_SYSTEM;
+ ret = migrate_vma_setup(&args);
+ if (ret)
+ goto out;
+
+ pr_debug("Migrating from sys mem to device mem\n");
+ dmirror_migrate_alloc_and_copy(&args, dmirror);
+ migrate_vma_pages(&args);
+ dmirror_migrate_finalize_and_map(&args, dmirror);
+ migrate_vma_finalize(&args);
+ }
+ mmap_read_unlock(mm);
+ mmput(mm);
+
+ /*
+ * Return the migrated data for verification.
+ * Only for pages in device zone
+ */
+ ret = dmirror_bounce_init(&bounce, start, size);
+ if (ret)
+ return ret;
+ mutex_lock(&dmirror->mutex);
+ ret = dmirror_do_read(dmirror, start, end, &bounce);
+ mutex_unlock(&dmirror->mutex);
+ if (ret == 0) {
+ if (copy_to_user(u64_to_user_ptr(cmd->ptr), bounce.ptr,
+ bounce.size))
+ ret = -EFAULT;
+ }
+ cmd->cpages = bounce.cpages;
+ dmirror_bounce_fini(&bounce);
+ return ret;
+
+out:
+ mmap_read_unlock(mm);
+ mmput(mm);
+ return ret;
+}
+
+static void dmirror_mkentry(struct dmirror *dmirror, struct hmm_range *range,
+ unsigned char *perm, unsigned long entry)
+{
+ struct page *page;
+
+ if (entry & HMM_PFN_ERROR) {
+ *perm = HMM_DMIRROR_PROT_ERROR;
+ return;
+ }
+ if (!(entry & HMM_PFN_VALID)) {
+ *perm = HMM_DMIRROR_PROT_NONE;
+ return;
+ }
+
+ page = hmm_pfn_to_page(entry);
+ if (is_device_private_page(page)) {
+ /* Is the page migrated to this device or some other? */
+ if (dmirror->mdevice == dmirror_page_to_device(page))
+ *perm = HMM_DMIRROR_PROT_DEV_PRIVATE_LOCAL;
+ else
+ *perm = HMM_DMIRROR_PROT_DEV_PRIVATE_REMOTE;
+ } else if (is_device_coherent_page(page)) {
+ /* Is the page migrated to this device or some other? */
+ if (dmirror->mdevice == dmirror_page_to_device(page))
+ *perm = HMM_DMIRROR_PROT_DEV_COHERENT_LOCAL;
+ else
+ *perm = HMM_DMIRROR_PROT_DEV_COHERENT_REMOTE;
+ } else if (is_zero_pfn(page_to_pfn(page)))
+ *perm = HMM_DMIRROR_PROT_ZERO;
+ else
+ *perm = HMM_DMIRROR_PROT_NONE;
+ if (entry & HMM_PFN_WRITE)
+ *perm |= HMM_DMIRROR_PROT_WRITE;
+ else
+ *perm |= HMM_DMIRROR_PROT_READ;
+ if (hmm_pfn_to_map_order(entry) + PAGE_SHIFT == PMD_SHIFT)
+ *perm |= HMM_DMIRROR_PROT_PMD;
+ else if (hmm_pfn_to_map_order(entry) + PAGE_SHIFT == PUD_SHIFT)
+ *perm |= HMM_DMIRROR_PROT_PUD;
+}
+
+static bool dmirror_snapshot_invalidate(struct mmu_interval_notifier *mni,
+ const struct mmu_notifier_range *range,
+ unsigned long cur_seq)
+{
+ struct dmirror_interval *dmi =
+ container_of(mni, struct dmirror_interval, notifier);
+ struct dmirror *dmirror = dmi->dmirror;
+
+ if (mmu_notifier_range_blockable(range))
+ mutex_lock(&dmirror->mutex);
+ else if (!mutex_trylock(&dmirror->mutex))
+ return false;
+
+ /*
+ * Snapshots only need to set the sequence number since any
+ * invalidation in the interval invalidates the whole snapshot.
+ */
+ mmu_interval_set_seq(mni, cur_seq);
+
+ mutex_unlock(&dmirror->mutex);
+ return true;
+}
+
+static const struct mmu_interval_notifier_ops dmirror_mrn_ops = {
+ .invalidate = dmirror_snapshot_invalidate,
+};
+
+static int dmirror_range_snapshot(struct dmirror *dmirror,
+ struct hmm_range *range,
+ unsigned char *perm)
+{
+ struct mm_struct *mm = dmirror->notifier.mm;
+ struct dmirror_interval notifier;
+ unsigned long timeout =
+ jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
+ unsigned long i;
+ unsigned long n;
+ int ret = 0;
+
+ notifier.dmirror = dmirror;
+ range->notifier = &notifier.notifier;
+
+ ret = mmu_interval_notifier_insert(range->notifier, mm,
+ range->start, range->end - range->start,
+ &dmirror_mrn_ops);
+ if (ret)
+ return ret;
+
+ while (true) {
+ if (time_after(jiffies, timeout)) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ range->notifier_seq = mmu_interval_read_begin(range->notifier);
+
+ mmap_read_lock(mm);
+ ret = hmm_range_fault(range);
+ mmap_read_unlock(mm);
+ if (ret) {
+ if (ret == -EBUSY)
+ continue;
+ goto out;
+ }
+
+ mutex_lock(&dmirror->mutex);
+ if (mmu_interval_read_retry(range->notifier,
+ range->notifier_seq)) {
+ mutex_unlock(&dmirror->mutex);
+ continue;
+ }
+ break;
+ }
+
+ n = (range->end - range->start) >> PAGE_SHIFT;
+ for (i = 0; i < n; i++)
+ dmirror_mkentry(dmirror, range, perm + i, range->hmm_pfns[i]);
+
+ mutex_unlock(&dmirror->mutex);
+out:
+ mmu_interval_notifier_remove(range->notifier);
+ return ret;
+}
+
+static int dmirror_snapshot(struct dmirror *dmirror,
+ struct hmm_dmirror_cmd *cmd)
+{
+ struct mm_struct *mm = dmirror->notifier.mm;
+ unsigned long start, end;
+ unsigned long size = cmd->npages << PAGE_SHIFT;
+ unsigned long addr;
+ unsigned long next;
+ unsigned long pfns[64];
+ unsigned char perm[64];
+ char __user *uptr;
+ struct hmm_range range = {
+ .hmm_pfns = pfns,
+ .dev_private_owner = dmirror->mdevice,
+ };
+ int ret = 0;
+
+ start = cmd->addr;
+ end = start + size;
+ if (end < start)
+ return -EINVAL;
+
+ /* Since the mm is for the mirrored process, get a reference first. */
+ if (!mmget_not_zero(mm))
+ return -EINVAL;
+
+ /*
+ * Register a temporary notifier to detect invalidations even if it
+ * overlaps with other mmu_interval_notifiers.
+ */
+ uptr = u64_to_user_ptr(cmd->ptr);
+ for (addr = start; addr < end; addr = next) {
+ unsigned long n;
+
+ next = min(addr + (ARRAY_SIZE(pfns) << PAGE_SHIFT), end);
+ range.start = addr;
+ range.end = next;
+
+ ret = dmirror_range_snapshot(dmirror, &range, perm);
+ if (ret)
+ break;
+
+ n = (range.end - range.start) >> PAGE_SHIFT;
+ if (copy_to_user(uptr, perm, n)) {
+ ret = -EFAULT;
+ break;
+ }
+
+ cmd->cpages += n;
+ uptr += n;
+ }
+ mmput(mm);
+
+ return ret;
+}
+
+static void dmirror_device_evict_chunk(struct dmirror_chunk *chunk)
+{
+ unsigned long start_pfn = chunk->pagemap.range.start >> PAGE_SHIFT;
+ unsigned long end_pfn = chunk->pagemap.range.end >> PAGE_SHIFT;
+ unsigned long npages = end_pfn - start_pfn + 1;
+ unsigned long i;
+ unsigned long *src_pfns;
+ unsigned long *dst_pfns;
+
+ src_pfns = kcalloc(npages, sizeof(*src_pfns), GFP_KERNEL);
+ dst_pfns = kcalloc(npages, sizeof(*dst_pfns), GFP_KERNEL);
+
+ migrate_device_range(src_pfns, start_pfn, npages);
+ for (i = 0; i < npages; i++) {
+ struct page *dpage, *spage;
+
+ spage = migrate_pfn_to_page(src_pfns[i]);
+ if (!spage || !(src_pfns[i] & MIGRATE_PFN_MIGRATE))
+ continue;
+
+ if (WARN_ON(!is_device_private_page(spage) &&
+ !is_device_coherent_page(spage)))
+ continue;
+ spage = BACKING_PAGE(spage);
+ dpage = alloc_page(GFP_HIGHUSER_MOVABLE | __GFP_NOFAIL);
+ lock_page(dpage);
+ copy_highpage(dpage, spage);
+ dst_pfns[i] = migrate_pfn(page_to_pfn(dpage));
+ if (src_pfns[i] & MIGRATE_PFN_WRITE)
+ dst_pfns[i] |= MIGRATE_PFN_WRITE;
+ }
+ migrate_device_pages(src_pfns, dst_pfns, npages);
+ migrate_device_finalize(src_pfns, dst_pfns, npages);
+ kfree(src_pfns);
+ kfree(dst_pfns);
+}
+
+/* Removes free pages from the free list so they can't be re-allocated */
+static void dmirror_remove_free_pages(struct dmirror_chunk *devmem)
+{
+ struct dmirror_device *mdevice = devmem->mdevice;
+ struct page *page;
+
+ for (page = mdevice->free_pages; page; page = page->zone_device_data)
+ if (dmirror_page_to_chunk(page) == devmem)
+ mdevice->free_pages = page->zone_device_data;
+}
+
+static void dmirror_device_remove_chunks(struct dmirror_device *mdevice)
+{
+ unsigned int i;
+
+ mutex_lock(&mdevice->devmem_lock);
+ if (mdevice->devmem_chunks) {
+ for (i = 0; i < mdevice->devmem_count; i++) {
+ struct dmirror_chunk *devmem =
+ mdevice->devmem_chunks[i];
+
+ spin_lock(&mdevice->lock);
+ devmem->remove = true;
+ dmirror_remove_free_pages(devmem);
+ spin_unlock(&mdevice->lock);
+
+ dmirror_device_evict_chunk(devmem);
+ memunmap_pages(&devmem->pagemap);
+ if (devmem->pagemap.type == MEMORY_DEVICE_PRIVATE)
+ release_mem_region(devmem->pagemap.range.start,
+ range_len(&devmem->pagemap.range));
+ kfree(devmem);
+ }
+ mdevice->devmem_count = 0;
+ mdevice->devmem_capacity = 0;
+ mdevice->free_pages = NULL;
+ kfree(mdevice->devmem_chunks);
+ mdevice->devmem_chunks = NULL;
+ }
+ mutex_unlock(&mdevice->devmem_lock);
+}
+
+static long dmirror_fops_unlocked_ioctl(struct file *filp,
+ unsigned int command,
+ unsigned long arg)
+{
+ void __user *uarg = (void __user *)arg;
+ struct hmm_dmirror_cmd cmd;
+ struct dmirror *dmirror;
+ int ret;
+
+ dmirror = filp->private_data;
+ if (!dmirror)
+ return -EINVAL;
+
+ if (copy_from_user(&cmd, uarg, sizeof(cmd)))
+ return -EFAULT;
+
+ if (cmd.addr & ~PAGE_MASK)
+ return -EINVAL;
+ if (cmd.addr >= (cmd.addr + (cmd.npages << PAGE_SHIFT)))
+ return -EINVAL;
+
+ cmd.cpages = 0;
+ cmd.faults = 0;
+
+ switch (command) {
+ case HMM_DMIRROR_READ:
+ ret = dmirror_read(dmirror, &cmd);
+ break;
+
+ case HMM_DMIRROR_WRITE:
+ ret = dmirror_write(dmirror, &cmd);
+ break;
+
+ case HMM_DMIRROR_MIGRATE_TO_DEV:
+ ret = dmirror_migrate_to_device(dmirror, &cmd);
+ break;
+
+ case HMM_DMIRROR_MIGRATE_TO_SYS:
+ ret = dmirror_migrate_to_system(dmirror, &cmd);
+ break;
+
+ case HMM_DMIRROR_EXCLUSIVE:
+ ret = dmirror_exclusive(dmirror, &cmd);
+ break;
+
+ case HMM_DMIRROR_CHECK_EXCLUSIVE:
+ ret = dmirror_check_atomic(dmirror, cmd.addr,
+ cmd.addr + (cmd.npages << PAGE_SHIFT));
+ break;
+
+ case HMM_DMIRROR_SNAPSHOT:
+ ret = dmirror_snapshot(dmirror, &cmd);
+ break;
+
+ case HMM_DMIRROR_RELEASE:
+ dmirror_device_remove_chunks(dmirror->mdevice);
+ ret = 0;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+ if (ret)
+ return ret;
+
+ if (copy_to_user(uarg, &cmd, sizeof(cmd)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int dmirror_fops_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ unsigned long addr;
+
+ for (addr = vma->vm_start; addr < vma->vm_end; addr += PAGE_SIZE) {
+ struct page *page;
+ int ret;
+
+ page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+ if (!page)
+ return -ENOMEM;
+
+ ret = vm_insert_page(vma, addr, page);
+ if (ret) {
+ __free_page(page);
+ return ret;
+ }
+ put_page(page);
+ }
+
+ return 0;
+}
+
+static const struct file_operations dmirror_fops = {
+ .open = dmirror_fops_open,
+ .release = dmirror_fops_release,
+ .mmap = dmirror_fops_mmap,
+ .unlocked_ioctl = dmirror_fops_unlocked_ioctl,
+ .llseek = default_llseek,
+ .owner = THIS_MODULE,
+};
+
+static void dmirror_devmem_free(struct page *page)
+{
+ struct page *rpage = BACKING_PAGE(page);
+ struct dmirror_device *mdevice;
+
+ if (rpage != page)
+ __free_page(rpage);
+
+ mdevice = dmirror_page_to_device(page);
+ spin_lock(&mdevice->lock);
+
+ /* Return page to our allocator if not freeing the chunk */
+ if (!dmirror_page_to_chunk(page)->remove) {
+ mdevice->cfree++;
+ page->zone_device_data = mdevice->free_pages;
+ mdevice->free_pages = page;
+ }
+ spin_unlock(&mdevice->lock);
+}
+
+static vm_fault_t dmirror_devmem_fault(struct vm_fault *vmf)
+{
+ struct migrate_vma args = { 0 };
+ unsigned long src_pfns = 0;
+ unsigned long dst_pfns = 0;
+ struct page *rpage;
+ struct dmirror *dmirror;
+ vm_fault_t ret;
+
+ /*
+ * Normally, a device would use the page->zone_device_data to point to
+ * the mirror but here we use it to hold the page for the simulated
+ * device memory and that page holds the pointer to the mirror.
+ */
+ rpage = vmf->page->zone_device_data;
+ dmirror = rpage->zone_device_data;
+
+ /* FIXME demonstrate how we can adjust migrate range */
+ args.vma = vmf->vma;
+ args.start = vmf->address;
+ args.end = args.start + PAGE_SIZE;
+ args.src = &src_pfns;
+ args.dst = &dst_pfns;
+ args.pgmap_owner = dmirror->mdevice;
+ args.flags = dmirror_select_device(dmirror);
+ args.fault_page = vmf->page;
+
+ if (migrate_vma_setup(&args))
+ return VM_FAULT_SIGBUS;
+
+ ret = dmirror_devmem_fault_alloc_and_copy(&args, dmirror);
+ if (ret)
+ return ret;
+ migrate_vma_pages(&args);
+ /*
+ * No device finalize step is needed since
+ * dmirror_devmem_fault_alloc_and_copy() will have already
+ * invalidated the device page table.
+ */
+ migrate_vma_finalize(&args);
+ return 0;
+}
+
+static const struct dev_pagemap_ops dmirror_devmem_ops = {
+ .page_free = dmirror_devmem_free,
+ .migrate_to_ram = dmirror_devmem_fault,
+};
+
+static int dmirror_device_init(struct dmirror_device *mdevice, int id)
+{
+ dev_t dev;
+ int ret;
+
+ dev = MKDEV(MAJOR(dmirror_dev), id);
+ mutex_init(&mdevice->devmem_lock);
+ spin_lock_init(&mdevice->lock);
+
+ cdev_init(&mdevice->cdevice, &dmirror_fops);
+ mdevice->cdevice.owner = THIS_MODULE;
+ device_initialize(&mdevice->device);
+ mdevice->device.devt = dev;
+
+ ret = dev_set_name(&mdevice->device, "hmm_dmirror%u", id);
+ if (ret)
+ return ret;
+
+ ret = cdev_device_add(&mdevice->cdevice, &mdevice->device);
+ if (ret)
+ return ret;
+
+ /* Build a list of free ZONE_DEVICE struct pages */
+ return dmirror_allocate_chunk(mdevice, NULL);
+}
+
+static void dmirror_device_remove(struct dmirror_device *mdevice)
+{
+ dmirror_device_remove_chunks(mdevice);
+ cdev_device_del(&mdevice->cdevice, &mdevice->device);
+}
+
+static int __init hmm_dmirror_init(void)
+{
+ int ret;
+ int id = 0;
+ int ndevices = 0;
+
+ ret = alloc_chrdev_region(&dmirror_dev, 0, DMIRROR_NDEVICES,
+ "HMM_DMIRROR");
+ if (ret)
+ goto err_unreg;
+
+ memset(dmirror_devices, 0, DMIRROR_NDEVICES * sizeof(dmirror_devices[0]));
+ dmirror_devices[ndevices++].zone_device_type =
+ HMM_DMIRROR_MEMORY_DEVICE_PRIVATE;
+ dmirror_devices[ndevices++].zone_device_type =
+ HMM_DMIRROR_MEMORY_DEVICE_PRIVATE;
+ if (spm_addr_dev0 && spm_addr_dev1) {
+ dmirror_devices[ndevices++].zone_device_type =
+ HMM_DMIRROR_MEMORY_DEVICE_COHERENT;
+ dmirror_devices[ndevices++].zone_device_type =
+ HMM_DMIRROR_MEMORY_DEVICE_COHERENT;
+ }
+ for (id = 0; id < ndevices; id++) {
+ ret = dmirror_device_init(dmirror_devices + id, id);
+ if (ret)
+ goto err_chrdev;
+ }
+
+ pr_info("HMM test module loaded. This is only for testing HMM.\n");
+ return 0;
+
+err_chrdev:
+ while (--id >= 0)
+ dmirror_device_remove(dmirror_devices + id);
+ unregister_chrdev_region(dmirror_dev, DMIRROR_NDEVICES);
+err_unreg:
+ return ret;
+}
+
+static void __exit hmm_dmirror_exit(void)
+{
+ int id;
+
+ for (id = 0; id < DMIRROR_NDEVICES; id++)
+ if (dmirror_devices[id].zone_device_type)
+ dmirror_device_remove(dmirror_devices + id);
+ unregister_chrdev_region(dmirror_dev, DMIRROR_NDEVICES);
+}
+
+module_init(hmm_dmirror_init);
+module_exit(hmm_dmirror_exit);
+MODULE_LICENSE("GPL");
diff --git a/lib/test_hmm_uapi.h b/lib/test_hmm_uapi.h
new file mode 100644
index 000000000000..8c818a2cf4f6
--- /dev/null
+++ b/lib/test_hmm_uapi.h
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * This is a module to test the HMM (Heterogeneous Memory Management) API
+ * of the kernel. It allows a userspace program to expose its entire address
+ * space through the HMM test module device file.
+ */
+#ifndef _LIB_TEST_HMM_UAPI_H
+#define _LIB_TEST_HMM_UAPI_H
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+/*
+ * Structure to pass to the HMM test driver to mimic a device accessing
+ * system memory and ZONE_DEVICE private memory through device page tables.
+ *
+ * @addr: (in) user address the device will read/write
+ * @ptr: (in) user address where device data is copied to/from
+ * @npages: (in) number of pages to read/write
+ * @cpages: (out) number of pages copied
+ * @faults: (out) number of device page faults seen
+ */
+struct hmm_dmirror_cmd {
+ __u64 addr;
+ __u64 ptr;
+ __u64 npages;
+ __u64 cpages;
+ __u64 faults;
+};
+
+/* Expose the address space of the calling process through hmm device file */
+#define HMM_DMIRROR_READ _IOWR('H', 0x00, struct hmm_dmirror_cmd)
+#define HMM_DMIRROR_WRITE _IOWR('H', 0x01, struct hmm_dmirror_cmd)
+#define HMM_DMIRROR_MIGRATE_TO_DEV _IOWR('H', 0x02, struct hmm_dmirror_cmd)
+#define HMM_DMIRROR_MIGRATE_TO_SYS _IOWR('H', 0x03, struct hmm_dmirror_cmd)
+#define HMM_DMIRROR_SNAPSHOT _IOWR('H', 0x04, struct hmm_dmirror_cmd)
+#define HMM_DMIRROR_EXCLUSIVE _IOWR('H', 0x05, struct hmm_dmirror_cmd)
+#define HMM_DMIRROR_CHECK_EXCLUSIVE _IOWR('H', 0x06, struct hmm_dmirror_cmd)
+#define HMM_DMIRROR_RELEASE _IOWR('H', 0x07, struct hmm_dmirror_cmd)
+
+/*
+ * Values returned in hmm_dmirror_cmd.ptr for HMM_DMIRROR_SNAPSHOT.
+ * HMM_DMIRROR_PROT_ERROR: no valid mirror PTE for this page
+ * HMM_DMIRROR_PROT_NONE: unpopulated PTE or PTE with no access
+ * HMM_DMIRROR_PROT_READ: read-only PTE
+ * HMM_DMIRROR_PROT_WRITE: read/write PTE
+ * HMM_DMIRROR_PROT_PMD: PMD sized page is fully mapped by same permissions
+ * HMM_DMIRROR_PROT_PUD: PUD sized page is fully mapped by same permissions
+ * HMM_DMIRROR_PROT_ZERO: special read-only zero page
+ * HMM_DMIRROR_PROT_DEV_PRIVATE_LOCAL: Migrated device private page on the
+ * device the ioctl() is made
+ * HMM_DMIRROR_PROT_DEV_PRIVATE_REMOTE: Migrated device private page on some
+ * other device
+ * HMM_DMIRROR_PROT_DEV_COHERENT: Migrate device coherent page on the device
+ * the ioctl() is made
+ */
+enum {
+ HMM_DMIRROR_PROT_ERROR = 0xFF,
+ HMM_DMIRROR_PROT_NONE = 0x00,
+ HMM_DMIRROR_PROT_READ = 0x01,
+ HMM_DMIRROR_PROT_WRITE = 0x02,
+ HMM_DMIRROR_PROT_PMD = 0x04,
+ HMM_DMIRROR_PROT_PUD = 0x08,
+ HMM_DMIRROR_PROT_ZERO = 0x10,
+ HMM_DMIRROR_PROT_DEV_PRIVATE_LOCAL = 0x20,
+ HMM_DMIRROR_PROT_DEV_PRIVATE_REMOTE = 0x30,
+ HMM_DMIRROR_PROT_DEV_COHERENT_LOCAL = 0x40,
+ HMM_DMIRROR_PROT_DEV_COHERENT_REMOTE = 0x50,
+};
+
+enum {
+ /* 0 is reserved to catch uninitialized type fields */
+ HMM_DMIRROR_MEMORY_DEVICE_PRIVATE = 1,
+ HMM_DMIRROR_MEMORY_DEVICE_COHERENT,
+};
+
+#endif /* _LIB_TEST_HMM_UAPI_H */
diff --git a/lib/test_ida.c b/lib/test_ida.c
index b06880625961..072a49897e71 100644
--- a/lib/test_ida.c
+++ b/lib/test_ida.c
@@ -13,7 +13,7 @@ static unsigned int tests_run;
static unsigned int tests_passed;
#ifdef __KERNEL__
-void ida_dump(struct ida *ida) { }
+static void ida_dump(struct ida *ida) { }
#endif
#define IDA_BUG_ON(ida, x) do { \
tests_run++; \
@@ -150,6 +150,45 @@ static void ida_check_conv(struct ida *ida)
IDA_BUG_ON(ida, !ida_is_empty(ida));
}
+/*
+ * Check various situations where we attempt to free an ID we don't own.
+ */
+static void ida_check_bad_free(struct ida *ida)
+{
+ unsigned long i;
+
+ printk("vvv Ignore \"not allocated\" warnings\n");
+ /* IDA is empty; all of these will fail */
+ ida_free(ida, 0);
+ for (i = 0; i < 31; i++)
+ ida_free(ida, 1 << i);
+
+ /* IDA contains a single value entry */
+ IDA_BUG_ON(ida, ida_alloc_min(ida, 3, GFP_KERNEL) != 3);
+ ida_free(ida, 0);
+ for (i = 0; i < 31; i++)
+ ida_free(ida, 1 << i);
+
+ /* IDA contains a single bitmap */
+ IDA_BUG_ON(ida, ida_alloc_min(ida, 1023, GFP_KERNEL) != 1023);
+ ida_free(ida, 0);
+ for (i = 0; i < 31; i++)
+ ida_free(ida, 1 << i);
+
+ /* IDA contains a tree */
+ IDA_BUG_ON(ida, ida_alloc_min(ida, (1 << 20) - 1, GFP_KERNEL) != (1 << 20) - 1);
+ ida_free(ida, 0);
+ for (i = 0; i < 31; i++)
+ ida_free(ida, 1 << i);
+ printk("^^^ \"not allocated\" warnings over\n");
+
+ ida_free(ida, 3);
+ ida_free(ida, 1023);
+ ida_free(ida, (1 << 20) - 1);
+
+ IDA_BUG_ON(ida, !ida_is_empty(ida));
+}
+
static DEFINE_IDA(ida);
static int ida_checks(void)
@@ -162,6 +201,7 @@ static int ida_checks(void)
ida_check_leaf(&ida, 1024 * 64);
ida_check_max(&ida);
ida_check_conv(&ida);
+ ida_check_bad_free(&ida);
printk("IDA: %u of %u tests passed\n", tests_passed, tests_run);
return (tests_run != tests_passed) ? 0 : -EINVAL;
diff --git a/lib/test_kasan.c b/lib/test_kasan.c
deleted file mode 100644
index e3c593c38eff..000000000000
--- a/lib/test_kasan.c
+++ /dev/null
@@ -1,670 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- *
- * Copyright (c) 2014 Samsung Electronics Co., Ltd.
- * Author: Andrey Ryabinin <a.ryabinin@samsung.com>
- */
-
-#define pr_fmt(fmt) "kasan test: %s " fmt, __func__
-
-#include <linux/delay.h>
-#include <linux/kernel.h>
-#include <linux/mman.h>
-#include <linux/mm.h>
-#include <linux/printk.h>
-#include <linux/slab.h>
-#include <linux/string.h>
-#include <linux/uaccess.h>
-#include <linux/module.h>
-#include <linux/kasan.h>
-
-/*
- * Note: test functions are marked noinline so that their names appear in
- * reports.
- */
-
-static noinline void __init kmalloc_oob_right(void)
-{
- char *ptr;
- size_t size = 123;
-
- pr_info("out-of-bounds to right\n");
- ptr = kmalloc(size, GFP_KERNEL);
- if (!ptr) {
- pr_err("Allocation failed\n");
- return;
- }
-
- ptr[size] = 'x';
- kfree(ptr);
-}
-
-static noinline void __init kmalloc_oob_left(void)
-{
- char *ptr;
- size_t size = 15;
-
- pr_info("out-of-bounds to left\n");
- ptr = kmalloc(size, GFP_KERNEL);
- if (!ptr) {
- pr_err("Allocation failed\n");
- return;
- }
-
- *ptr = *(ptr - 1);
- kfree(ptr);
-}
-
-static noinline void __init kmalloc_node_oob_right(void)
-{
- char *ptr;
- size_t size = 4096;
-
- pr_info("kmalloc_node(): out-of-bounds to right\n");
- ptr = kmalloc_node(size, GFP_KERNEL, 0);
- if (!ptr) {
- pr_err("Allocation failed\n");
- return;
- }
-
- ptr[size] = 0;
- kfree(ptr);
-}
-
-#ifdef CONFIG_SLUB
-static noinline void __init kmalloc_pagealloc_oob_right(void)
-{
- char *ptr;
- size_t size = KMALLOC_MAX_CACHE_SIZE + 10;
-
- /* Allocate a chunk that does not fit into a SLUB cache to trigger
- * the page allocator fallback.
- */
- pr_info("kmalloc pagealloc allocation: out-of-bounds to right\n");
- ptr = kmalloc(size, GFP_KERNEL);
- if (!ptr) {
- pr_err("Allocation failed\n");
- return;
- }
-
- ptr[size] = 0;
- kfree(ptr);
-}
-
-static noinline void __init kmalloc_pagealloc_uaf(void)
-{
- char *ptr;
- size_t size = KMALLOC_MAX_CACHE_SIZE + 10;
-
- pr_info("kmalloc pagealloc allocation: use-after-free\n");
- ptr = kmalloc(size, GFP_KERNEL);
- if (!ptr) {
- pr_err("Allocation failed\n");
- return;
- }
-
- kfree(ptr);
- ptr[0] = 0;
-}
-
-static noinline void __init kmalloc_pagealloc_invalid_free(void)
-{
- char *ptr;
- size_t size = KMALLOC_MAX_CACHE_SIZE + 10;
-
- pr_info("kmalloc pagealloc allocation: invalid-free\n");
- ptr = kmalloc(size, GFP_KERNEL);
- if (!ptr) {
- pr_err("Allocation failed\n");
- return;
- }
-
- kfree(ptr + 1);
-}
-#endif
-
-static noinline void __init kmalloc_large_oob_right(void)
-{
- char *ptr;
- size_t size = KMALLOC_MAX_CACHE_SIZE - 256;
- /* Allocate a chunk that is large enough, but still fits into a slab
- * and does not trigger the page allocator fallback in SLUB.
- */
- pr_info("kmalloc large allocation: out-of-bounds to right\n");
- ptr = kmalloc(size, GFP_KERNEL);
- if (!ptr) {
- pr_err("Allocation failed\n");
- return;
- }
-
- ptr[size] = 0;
- kfree(ptr);
-}
-
-static noinline void __init kmalloc_oob_krealloc_more(void)
-{
- char *ptr1, *ptr2;
- size_t size1 = 17;
- size_t size2 = 19;
-
- pr_info("out-of-bounds after krealloc more\n");
- ptr1 = kmalloc(size1, GFP_KERNEL);
- ptr2 = krealloc(ptr1, size2, GFP_KERNEL);
- if (!ptr1 || !ptr2) {
- pr_err("Allocation failed\n");
- kfree(ptr1);
- return;
- }
-
- ptr2[size2] = 'x';
- kfree(ptr2);
-}
-
-static noinline void __init kmalloc_oob_krealloc_less(void)
-{
- char *ptr1, *ptr2;
- size_t size1 = 17;
- size_t size2 = 15;
-
- pr_info("out-of-bounds after krealloc less\n");
- ptr1 = kmalloc(size1, GFP_KERNEL);
- ptr2 = krealloc(ptr1, size2, GFP_KERNEL);
- if (!ptr1 || !ptr2) {
- pr_err("Allocation failed\n");
- kfree(ptr1);
- return;
- }
- ptr2[size2] = 'x';
- kfree(ptr2);
-}
-
-static noinline void __init kmalloc_oob_16(void)
-{
- struct {
- u64 words[2];
- } *ptr1, *ptr2;
-
- pr_info("kmalloc out-of-bounds for 16-bytes access\n");
- ptr1 = kmalloc(sizeof(*ptr1) - 3, GFP_KERNEL);
- ptr2 = kmalloc(sizeof(*ptr2), GFP_KERNEL);
- if (!ptr1 || !ptr2) {
- pr_err("Allocation failed\n");
- kfree(ptr1);
- kfree(ptr2);
- return;
- }
- *ptr1 = *ptr2;
- kfree(ptr1);
- kfree(ptr2);
-}
-
-static noinline void __init kmalloc_oob_memset_2(void)
-{
- char *ptr;
- size_t size = 8;
-
- pr_info("out-of-bounds in memset2\n");
- ptr = kmalloc(size, GFP_KERNEL);
- if (!ptr) {
- pr_err("Allocation failed\n");
- return;
- }
-
- memset(ptr+7, 0, 2);
- kfree(ptr);
-}
-
-static noinline void __init kmalloc_oob_memset_4(void)
-{
- char *ptr;
- size_t size = 8;
-
- pr_info("out-of-bounds in memset4\n");
- ptr = kmalloc(size, GFP_KERNEL);
- if (!ptr) {
- pr_err("Allocation failed\n");
- return;
- }
-
- memset(ptr+5, 0, 4);
- kfree(ptr);
-}
-
-
-static noinline void __init kmalloc_oob_memset_8(void)
-{
- char *ptr;
- size_t size = 8;
-
- pr_info("out-of-bounds in memset8\n");
- ptr = kmalloc(size, GFP_KERNEL);
- if (!ptr) {
- pr_err("Allocation failed\n");
- return;
- }
-
- memset(ptr+1, 0, 8);
- kfree(ptr);
-}
-
-static noinline void __init kmalloc_oob_memset_16(void)
-{
- char *ptr;
- size_t size = 16;
-
- pr_info("out-of-bounds in memset16\n");
- ptr = kmalloc(size, GFP_KERNEL);
- if (!ptr) {
- pr_err("Allocation failed\n");
- return;
- }
-
- memset(ptr+1, 0, 16);
- kfree(ptr);
-}
-
-static noinline void __init kmalloc_oob_in_memset(void)
-{
- char *ptr;
- size_t size = 666;
-
- pr_info("out-of-bounds in memset\n");
- ptr = kmalloc(size, GFP_KERNEL);
- if (!ptr) {
- pr_err("Allocation failed\n");
- return;
- }
-
- memset(ptr, 0, size+5);
- kfree(ptr);
-}
-
-static noinline void __init kmalloc_uaf(void)
-{
- char *ptr;
- size_t size = 10;
-
- pr_info("use-after-free\n");
- ptr = kmalloc(size, GFP_KERNEL);
- if (!ptr) {
- pr_err("Allocation failed\n");
- return;
- }
-
- kfree(ptr);
- *(ptr + 8) = 'x';
-}
-
-static noinline void __init kmalloc_uaf_memset(void)
-{
- char *ptr;
- size_t size = 33;
-
- pr_info("use-after-free in memset\n");
- ptr = kmalloc(size, GFP_KERNEL);
- if (!ptr) {
- pr_err("Allocation failed\n");
- return;
- }
-
- kfree(ptr);
- memset(ptr, 0, size);
-}
-
-static noinline void __init kmalloc_uaf2(void)
-{
- char *ptr1, *ptr2;
- size_t size = 43;
-
- pr_info("use-after-free after another kmalloc\n");
- ptr1 = kmalloc(size, GFP_KERNEL);
- if (!ptr1) {
- pr_err("Allocation failed\n");
- return;
- }
-
- kfree(ptr1);
- ptr2 = kmalloc(size, GFP_KERNEL);
- if (!ptr2) {
- pr_err("Allocation failed\n");
- return;
- }
-
- ptr1[40] = 'x';
- if (ptr1 == ptr2)
- pr_err("Could not detect use-after-free: ptr1 == ptr2\n");
- kfree(ptr2);
-}
-
-static noinline void __init kmem_cache_oob(void)
-{
- char *p;
- size_t size = 200;
- struct kmem_cache *cache = kmem_cache_create("test_cache",
- size, 0,
- 0, NULL);
- if (!cache) {
- pr_err("Cache allocation failed\n");
- return;
- }
- pr_info("out-of-bounds in kmem_cache_alloc\n");
- p = kmem_cache_alloc(cache, GFP_KERNEL);
- if (!p) {
- pr_err("Allocation failed\n");
- kmem_cache_destroy(cache);
- return;
- }
-
- *p = p[size];
- kmem_cache_free(cache, p);
- kmem_cache_destroy(cache);
-}
-
-static noinline void __init memcg_accounted_kmem_cache(void)
-{
- int i;
- char *p;
- size_t size = 200;
- struct kmem_cache *cache;
-
- cache = kmem_cache_create("test_cache", size, 0, SLAB_ACCOUNT, NULL);
- if (!cache) {
- pr_err("Cache allocation failed\n");
- return;
- }
-
- pr_info("allocate memcg accounted object\n");
- /*
- * Several allocations with a delay to allow for lazy per memcg kmem
- * cache creation.
- */
- for (i = 0; i < 5; i++) {
- p = kmem_cache_alloc(cache, GFP_KERNEL);
- if (!p)
- goto free_cache;
-
- kmem_cache_free(cache, p);
- msleep(100);
- }
-
-free_cache:
- kmem_cache_destroy(cache);
-}
-
-static char global_array[10];
-
-static noinline void __init kasan_global_oob(void)
-{
- volatile int i = 3;
- char *p = &global_array[ARRAY_SIZE(global_array) + i];
-
- pr_info("out-of-bounds global variable\n");
- *(volatile char *)p;
-}
-
-static noinline void __init kasan_stack_oob(void)
-{
- char stack_array[10];
- volatile int i = 0;
- char *p = &stack_array[ARRAY_SIZE(stack_array) + i];
-
- pr_info("out-of-bounds on stack\n");
- *(volatile char *)p;
-}
-
-static noinline void __init ksize_unpoisons_memory(void)
-{
- char *ptr;
- size_t size = 123, real_size;
-
- pr_info("ksize() unpoisons the whole allocated chunk\n");
- ptr = kmalloc(size, GFP_KERNEL);
- if (!ptr) {
- pr_err("Allocation failed\n");
- return;
- }
- real_size = ksize(ptr);
- /* This access doesn't trigger an error. */
- ptr[size] = 'x';
- /* This one does. */
- ptr[real_size] = 'y';
- kfree(ptr);
-}
-
-static noinline void __init copy_user_test(void)
-{
- char *kmem;
- char __user *usermem;
- size_t size = 10;
- int unused;
-
- kmem = kmalloc(size, GFP_KERNEL);
- if (!kmem)
- return;
-
- usermem = (char __user *)vm_mmap(NULL, 0, PAGE_SIZE,
- PROT_READ | PROT_WRITE | PROT_EXEC,
- MAP_ANONYMOUS | MAP_PRIVATE, 0);
- if (IS_ERR(usermem)) {
- pr_err("Failed to allocate user memory\n");
- kfree(kmem);
- return;
- }
-
- pr_info("out-of-bounds in copy_from_user()\n");
- unused = copy_from_user(kmem, usermem, size + 1);
-
- pr_info("out-of-bounds in copy_to_user()\n");
- unused = copy_to_user(usermem, kmem, size + 1);
-
- pr_info("out-of-bounds in __copy_from_user()\n");
- unused = __copy_from_user(kmem, usermem, size + 1);
-
- pr_info("out-of-bounds in __copy_to_user()\n");
- unused = __copy_to_user(usermem, kmem, size + 1);
-
- pr_info("out-of-bounds in __copy_from_user_inatomic()\n");
- unused = __copy_from_user_inatomic(kmem, usermem, size + 1);
-
- pr_info("out-of-bounds in __copy_to_user_inatomic()\n");
- unused = __copy_to_user_inatomic(usermem, kmem, size + 1);
-
- pr_info("out-of-bounds in strncpy_from_user()\n");
- unused = strncpy_from_user(kmem, usermem, size + 1);
-
- vm_munmap((unsigned long)usermem, PAGE_SIZE);
- kfree(kmem);
-}
-
-static noinline void __init kasan_alloca_oob_left(void)
-{
- volatile int i = 10;
- char alloca_array[i];
- char *p = alloca_array - 1;
-
- pr_info("out-of-bounds to left on alloca\n");
- *(volatile char *)p;
-}
-
-static noinline void __init kasan_alloca_oob_right(void)
-{
- volatile int i = 10;
- char alloca_array[i];
- char *p = alloca_array + i;
-
- pr_info("out-of-bounds to right on alloca\n");
- *(volatile char *)p;
-}
-
-static noinline void __init kmem_cache_double_free(void)
-{
- char *p;
- size_t size = 200;
- struct kmem_cache *cache;
-
- cache = kmem_cache_create("test_cache", size, 0, 0, NULL);
- if (!cache) {
- pr_err("Cache allocation failed\n");
- return;
- }
- pr_info("double-free on heap object\n");
- p = kmem_cache_alloc(cache, GFP_KERNEL);
- if (!p) {
- pr_err("Allocation failed\n");
- kmem_cache_destroy(cache);
- return;
- }
-
- kmem_cache_free(cache, p);
- kmem_cache_free(cache, p);
- kmem_cache_destroy(cache);
-}
-
-static noinline void __init kmem_cache_invalid_free(void)
-{
- char *p;
- size_t size = 200;
- struct kmem_cache *cache;
-
- cache = kmem_cache_create("test_cache", size, 0, SLAB_TYPESAFE_BY_RCU,
- NULL);
- if (!cache) {
- pr_err("Cache allocation failed\n");
- return;
- }
- pr_info("invalid-free of heap object\n");
- p = kmem_cache_alloc(cache, GFP_KERNEL);
- if (!p) {
- pr_err("Allocation failed\n");
- kmem_cache_destroy(cache);
- return;
- }
-
- /* Trigger invalid free, the object doesn't get freed */
- kmem_cache_free(cache, p + 1);
-
- /*
- * Properly free the object to prevent the "Objects remaining in
- * test_cache on __kmem_cache_shutdown" BUG failure.
- */
- kmem_cache_free(cache, p);
-
- kmem_cache_destroy(cache);
-}
-
-static noinline void __init kasan_memchr(void)
-{
- char *ptr;
- size_t size = 24;
-
- pr_info("out-of-bounds in memchr\n");
- ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
- if (!ptr)
- return;
-
- memchr(ptr, '1', size + 1);
- kfree(ptr);
-}
-
-static noinline void __init kasan_memcmp(void)
-{
- char *ptr;
- size_t size = 24;
- int arr[9];
-
- pr_info("out-of-bounds in memcmp\n");
- ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
- if (!ptr)
- return;
-
- memset(arr, 0, sizeof(arr));
- memcmp(ptr, arr, size+1);
- kfree(ptr);
-}
-
-static noinline void __init kasan_strings(void)
-{
- char *ptr;
- size_t size = 24;
-
- pr_info("use-after-free in strchr\n");
- ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
- if (!ptr)
- return;
-
- kfree(ptr);
-
- /*
- * Try to cause only 1 invalid access (less spam in dmesg).
- * For that we need ptr to point to zeroed byte.
- * Skip metadata that could be stored in freed object so ptr
- * will likely point to zeroed byte.
- */
- ptr += 16;
- strchr(ptr, '1');
-
- pr_info("use-after-free in strrchr\n");
- strrchr(ptr, '1');
-
- pr_info("use-after-free in strcmp\n");
- strcmp(ptr, "2");
-
- pr_info("use-after-free in strncmp\n");
- strncmp(ptr, "2", 1);
-
- pr_info("use-after-free in strlen\n");
- strlen(ptr);
-
- pr_info("use-after-free in strnlen\n");
- strnlen(ptr, 1);
-}
-
-static int __init kmalloc_tests_init(void)
-{
- /*
- * Temporarily enable multi-shot mode. Otherwise, we'd only get a
- * report for the first case.
- */
- bool multishot = kasan_save_enable_multi_shot();
-
- kmalloc_oob_right();
- kmalloc_oob_left();
- kmalloc_node_oob_right();
-#ifdef CONFIG_SLUB
- kmalloc_pagealloc_oob_right();
- kmalloc_pagealloc_uaf();
- kmalloc_pagealloc_invalid_free();
-#endif
- kmalloc_large_oob_right();
- kmalloc_oob_krealloc_more();
- kmalloc_oob_krealloc_less();
- kmalloc_oob_16();
- kmalloc_oob_in_memset();
- kmalloc_oob_memset_2();
- kmalloc_oob_memset_4();
- kmalloc_oob_memset_8();
- kmalloc_oob_memset_16();
- kmalloc_uaf();
- kmalloc_uaf_memset();
- kmalloc_uaf2();
- kmem_cache_oob();
- memcg_accounted_kmem_cache();
- kasan_stack_oob();
- kasan_global_oob();
- kasan_alloca_oob_left();
- kasan_alloca_oob_right();
- ksize_unpoisons_memory();
- copy_user_test();
- kmem_cache_double_free();
- kmem_cache_invalid_free();
- kasan_memchr();
- kasan_memcmp();
- kasan_strings();
-
- kasan_restore_multi_shot(multishot);
-
- return -EAGAIN;
-}
-
-module_init(kmalloc_tests_init);
-MODULE_LICENSE("GPL");
diff --git a/lib/test_kmod.c b/lib/test_kmod.c
index 9cf77628fc91..43d9dfd57ab7 100644
--- a/lib/test_kmod.c
+++ b/lib/test_kmod.c
@@ -1,18 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-or-later OR copyleft-next-0.3.1
/*
* kmod stress test driver
*
* Copyright (C) 2017 Luis R. Rodriguez <mcgrof@kernel.org>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or at your option any
- * later version; or, when distributed separately from the Linux kernel or
- * when incorporated into other software packages, subject to the following
- * license:
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of copyleft-next (version 0.3.1 or later) as published
- * at http://copyleft-next.org/.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -61,12 +51,11 @@ static int num_test_devs;
/**
* enum kmod_test_case - linker table test case
- *
- * If you add a test case, please be sure to review if you need to se
- * @need_mod_put for your tests case.
- *
* @TEST_KMOD_DRIVER: stress tests request_module()
* @TEST_KMOD_FS_TYPE: stress tests get_fs_type()
+ *
+ * If you add a test case, please be sure to review if you need to set
+ * @need_mod_put for your tests case.
*/
enum kmod_test_case {
__TEST_KMOD_INVALID = 0,
@@ -88,7 +77,7 @@ struct test_config {
struct kmod_test_device;
/**
- * kmod_test_device_info - thread info
+ * struct kmod_test_device_info - thread info
*
* @ret_sync: return value if request_module() is used, sync request for
* @TEST_KMOD_DRIVER
@@ -111,7 +100,7 @@ struct kmod_test_device_info {
};
/**
- * kmod_test_device - test device to help test kmod
+ * struct kmod_test_device - test device to help test kmod
*
* @dev_idx: unique ID for test device
* @config: configuration for the test
@@ -204,7 +193,7 @@ static void test_kmod_put_module(struct kmod_test_device_info *info)
case TEST_KMOD_DRIVER:
break;
case TEST_KMOD_FS_TYPE:
- if (info && info->fs_sync && info->fs_sync->owner)
+ if (info->fs_sync && info->fs_sync->owner)
module_put(info->fs_sync->owner);
break;
default:
@@ -286,7 +275,7 @@ static int tally_work_test(struct kmod_test_device_info *info)
* If this ran it means *all* tasks were created fine and we
* are now just collecting results.
*
- * Only propagate errors, do not override with a subsequent sucess case.
+ * Only propagate errors, do not override with a subsequent success case.
*/
static void tally_up_work(struct kmod_test_device *test_dev)
{
@@ -543,7 +532,7 @@ static int trigger_config_run(struct kmod_test_device *test_dev)
* wrong with the setup of the test. If the test setup went fine
* then userspace must just check the result of config->test_result.
* One issue with relying on the return from a call in the kernel
- * is if the kernel returns a possitive value using this trigger
+ * is if the kernel returns a positive value using this trigger
* will not return the value to userspace, it would be lost.
*
* By not relying on capturing the return value of tests we are using
@@ -585,7 +574,7 @@ trigger_config_store(struct device *dev,
* Note: any return > 0 will be treated as success
* and the error value will not be available to userspace.
* Do not rely on trying to send to userspace a test value
- * return value as possitive return errors will be lost.
+ * return value as positive return errors will be lost.
*/
if (WARN_ON(ret > 0))
return -EINVAL;
@@ -745,7 +734,7 @@ static int trigger_config_run_type(struct kmod_test_device *test_dev,
break;
case TEST_KMOD_FS_TYPE:
kfree_const(config->test_fs);
- config->test_driver = NULL;
+ config->test_fs = NULL;
copied = config_copy_test_fs(config, test_str,
strlen(test_str));
break;
@@ -877,20 +866,17 @@ static int test_dev_config_update_uint_sync(struct kmod_test_device *test_dev,
int (*test_sync)(struct kmod_test_device *test_dev))
{
int ret;
- unsigned long new;
+ unsigned int val;
unsigned int old_val;
- ret = kstrtoul(buf, 10, &new);
+ ret = kstrtouint(buf, 10, &val);
if (ret)
return ret;
- if (new > UINT_MAX)
- return -EINVAL;
-
mutex_lock(&test_dev->config_mutex);
old_val = *config;
- *(unsigned int *)config = new;
+ *(unsigned int *)config = val;
ret = test_sync(test_dev);
if (ret) {
@@ -914,18 +900,18 @@ static int test_dev_config_update_uint_range(struct kmod_test_device *test_dev,
unsigned int min,
unsigned int max)
{
+ unsigned int val;
int ret;
- unsigned long new;
- ret = kstrtoul(buf, 10, &new);
+ ret = kstrtouint(buf, 10, &val);
if (ret)
return ret;
- if (new < min || new > max)
+ if (val < min || val > max)
return -EINVAL;
mutex_lock(&test_dev->config_mutex);
- *config = new;
+ *config = val;
mutex_unlock(&test_dev->config_mutex);
/* Always return full write size even if we didn't consume all */
@@ -936,18 +922,15 @@ static int test_dev_config_update_int(struct kmod_test_device *test_dev,
const char *buf, size_t size,
int *config)
{
+ int val;
int ret;
- long new;
- ret = kstrtol(buf, 10, &new);
+ ret = kstrtoint(buf, 10, &val);
if (ret)
return ret;
- if (new < INT_MIN || new > INT_MAX)
- return -EINVAL;
-
mutex_lock(&test_dev->config_mutex);
- *config = new;
+ *config = val;
mutex_unlock(&test_dev->config_mutex);
/* Always return full write size even if we didn't consume all */
return size;
@@ -1155,6 +1138,7 @@ static struct kmod_test_device *register_test_dev_kmod(void)
if (ret) {
pr_err("could not register misc device: %d\n", ret);
free_test_dev_kmod(test_dev);
+ test_dev = NULL;
goto out;
}
diff --git a/lib/test_kprobes.c b/lib/test_kprobes.c
new file mode 100644
index 000000000000..0648f7154f5c
--- /dev/null
+++ b/lib/test_kprobes.c
@@ -0,0 +1,403 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * test_kprobes.c - simple sanity test for *probes
+ *
+ * Copyright IBM Corp. 2008
+ */
+
+#include <linux/kernel.h>
+#include <linux/kprobes.h>
+#include <linux/random.h>
+#include <kunit/test.h>
+
+#define div_factor 3
+
+static u32 rand1, preh_val, posth_val;
+static u32 (*target)(u32 value);
+static u32 (*recursed_target)(u32 value);
+static u32 (*target2)(u32 value);
+static struct kunit *current_test;
+
+static unsigned long (*internal_target)(void);
+static unsigned long (*stacktrace_target)(void);
+static unsigned long (*stacktrace_driver)(void);
+static unsigned long target_return_address[2];
+
+static noinline u32 kprobe_target(u32 value)
+{
+ return (value / div_factor);
+}
+
+static noinline u32 kprobe_recursed_target(u32 value)
+{
+ return (value / div_factor);
+}
+
+static int kp_pre_handler(struct kprobe *p, struct pt_regs *regs)
+{
+ KUNIT_EXPECT_FALSE(current_test, preemptible());
+
+ preh_val = recursed_target(rand1);
+ return 0;
+}
+
+static void kp_post_handler(struct kprobe *p, struct pt_regs *regs,
+ unsigned long flags)
+{
+ u32 expval = recursed_target(rand1);
+
+ KUNIT_EXPECT_FALSE(current_test, preemptible());
+ KUNIT_EXPECT_EQ(current_test, preh_val, expval);
+
+ posth_val = preh_val + div_factor;
+}
+
+static struct kprobe kp = {
+ .symbol_name = "kprobe_target",
+ .pre_handler = kp_pre_handler,
+ .post_handler = kp_post_handler
+};
+
+static void test_kprobe(struct kunit *test)
+{
+ current_test = test;
+ KUNIT_EXPECT_EQ(test, 0, register_kprobe(&kp));
+ target(rand1);
+ unregister_kprobe(&kp);
+ KUNIT_EXPECT_NE(test, 0, preh_val);
+ KUNIT_EXPECT_NE(test, 0, posth_val);
+}
+
+static noinline u32 kprobe_target2(u32 value)
+{
+ return (value / div_factor) + 1;
+}
+
+static noinline unsigned long kprobe_stacktrace_internal_target(void)
+{
+ if (!target_return_address[0])
+ target_return_address[0] = (unsigned long)__builtin_return_address(0);
+ return target_return_address[0];
+}
+
+static noinline unsigned long kprobe_stacktrace_target(void)
+{
+ if (!target_return_address[1])
+ target_return_address[1] = (unsigned long)__builtin_return_address(0);
+
+ if (internal_target)
+ internal_target();
+
+ return target_return_address[1];
+}
+
+static noinline unsigned long kprobe_stacktrace_driver(void)
+{
+ if (stacktrace_target)
+ stacktrace_target();
+
+ /* This is for preventing inlining the function */
+ return (unsigned long)__builtin_return_address(0);
+}
+
+static int kp_pre_handler2(struct kprobe *p, struct pt_regs *regs)
+{
+ preh_val = (rand1 / div_factor) + 1;
+ return 0;
+}
+
+static void kp_post_handler2(struct kprobe *p, struct pt_regs *regs,
+ unsigned long flags)
+{
+ KUNIT_EXPECT_EQ(current_test, preh_val, (rand1 / div_factor) + 1);
+ posth_val = preh_val + div_factor;
+}
+
+static struct kprobe kp2 = {
+ .symbol_name = "kprobe_target2",
+ .pre_handler = kp_pre_handler2,
+ .post_handler = kp_post_handler2
+};
+
+static void test_kprobes(struct kunit *test)
+{
+ struct kprobe *kps[2] = {&kp, &kp2};
+
+ current_test = test;
+
+ /* addr and flags should be cleard for reusing kprobe. */
+ kp.addr = NULL;
+ kp.flags = 0;
+
+ KUNIT_EXPECT_EQ(test, 0, register_kprobes(kps, 2));
+ preh_val = 0;
+ posth_val = 0;
+ target(rand1);
+
+ KUNIT_EXPECT_NE(test, 0, preh_val);
+ KUNIT_EXPECT_NE(test, 0, posth_val);
+
+ preh_val = 0;
+ posth_val = 0;
+ target2(rand1);
+
+ KUNIT_EXPECT_NE(test, 0, preh_val);
+ KUNIT_EXPECT_NE(test, 0, posth_val);
+ unregister_kprobes(kps, 2);
+}
+
+static struct kprobe kp_missed = {
+ .symbol_name = "kprobe_recursed_target",
+ .pre_handler = kp_pre_handler,
+ .post_handler = kp_post_handler,
+};
+
+static void test_kprobe_missed(struct kunit *test)
+{
+ current_test = test;
+ preh_val = 0;
+ posth_val = 0;
+
+ KUNIT_EXPECT_EQ(test, 0, register_kprobe(&kp_missed));
+
+ recursed_target(rand1);
+
+ KUNIT_EXPECT_EQ(test, 2, kp_missed.nmissed);
+ KUNIT_EXPECT_NE(test, 0, preh_val);
+ KUNIT_EXPECT_NE(test, 0, posth_val);
+
+ unregister_kprobe(&kp_missed);
+}
+
+#ifdef CONFIG_KRETPROBES
+static u32 krph_val;
+
+static int entry_handler(struct kretprobe_instance *ri, struct pt_regs *regs)
+{
+ KUNIT_EXPECT_FALSE(current_test, preemptible());
+ krph_val = (rand1 / div_factor);
+ return 0;
+}
+
+static int return_handler(struct kretprobe_instance *ri, struct pt_regs *regs)
+{
+ unsigned long ret = regs_return_value(regs);
+
+ KUNIT_EXPECT_FALSE(current_test, preemptible());
+ KUNIT_EXPECT_EQ(current_test, ret, rand1 / div_factor);
+ KUNIT_EXPECT_NE(current_test, krph_val, 0);
+ krph_val = rand1;
+ return 0;
+}
+
+static struct kretprobe rp = {
+ .handler = return_handler,
+ .entry_handler = entry_handler,
+ .kp.symbol_name = "kprobe_target"
+};
+
+static void test_kretprobe(struct kunit *test)
+{
+ current_test = test;
+ KUNIT_EXPECT_EQ(test, 0, register_kretprobe(&rp));
+ target(rand1);
+ unregister_kretprobe(&rp);
+ KUNIT_EXPECT_EQ(test, krph_val, rand1);
+}
+
+static int return_handler2(struct kretprobe_instance *ri, struct pt_regs *regs)
+{
+ unsigned long ret = regs_return_value(regs);
+
+ KUNIT_EXPECT_EQ(current_test, ret, (rand1 / div_factor) + 1);
+ KUNIT_EXPECT_NE(current_test, krph_val, 0);
+ krph_val = rand1;
+ return 0;
+}
+
+static struct kretprobe rp2 = {
+ .handler = return_handler2,
+ .entry_handler = entry_handler,
+ .kp.symbol_name = "kprobe_target2"
+};
+
+static void test_kretprobes(struct kunit *test)
+{
+ struct kretprobe *rps[2] = {&rp, &rp2};
+
+ current_test = test;
+ /* addr and flags should be cleard for reusing kprobe. */
+ rp.kp.addr = NULL;
+ rp.kp.flags = 0;
+ KUNIT_EXPECT_EQ(test, 0, register_kretprobes(rps, 2));
+
+ krph_val = 0;
+ target(rand1);
+ KUNIT_EXPECT_EQ(test, krph_val, rand1);
+
+ krph_val = 0;
+ target2(rand1);
+ KUNIT_EXPECT_EQ(test, krph_val, rand1);
+ unregister_kretprobes(rps, 2);
+}
+
+#ifdef CONFIG_ARCH_CORRECT_STACKTRACE_ON_KRETPROBE
+#define STACK_BUF_SIZE 16
+static unsigned long stack_buf[STACK_BUF_SIZE];
+
+static int stacktrace_return_handler(struct kretprobe_instance *ri, struct pt_regs *regs)
+{
+ unsigned long retval = regs_return_value(regs);
+ int i, ret;
+
+ KUNIT_EXPECT_FALSE(current_test, preemptible());
+ KUNIT_EXPECT_EQ(current_test, retval, target_return_address[1]);
+
+ /*
+ * Test stacktrace inside the kretprobe handler, this will involves
+ * kretprobe trampoline, but must include correct return address
+ * of the target function.
+ */
+ ret = stack_trace_save(stack_buf, STACK_BUF_SIZE, 0);
+ KUNIT_EXPECT_NE(current_test, ret, 0);
+
+ for (i = 0; i < ret; i++) {
+ if (stack_buf[i] == target_return_address[1])
+ break;
+ }
+ KUNIT_EXPECT_NE(current_test, i, ret);
+
+#if !IS_MODULE(CONFIG_KPROBES_SANITY_TEST)
+ /*
+ * Test stacktrace from pt_regs at the return address. Thus the stack
+ * trace must start from the target return address.
+ */
+ ret = stack_trace_save_regs(regs, stack_buf, STACK_BUF_SIZE, 0);
+ KUNIT_EXPECT_NE(current_test, ret, 0);
+ KUNIT_EXPECT_EQ(current_test, stack_buf[0], target_return_address[1]);
+#endif
+
+ return 0;
+}
+
+static struct kretprobe rp3 = {
+ .handler = stacktrace_return_handler,
+ .kp.symbol_name = "kprobe_stacktrace_target"
+};
+
+static void test_stacktrace_on_kretprobe(struct kunit *test)
+{
+ unsigned long myretaddr = (unsigned long)__builtin_return_address(0);
+
+ current_test = test;
+ rp3.kp.addr = NULL;
+ rp3.kp.flags = 0;
+
+ /*
+ * Run the stacktrace_driver() to record correct return address in
+ * stacktrace_target() and ensure stacktrace_driver() call is not
+ * inlined by checking the return address of stacktrace_driver()
+ * and the return address of this function is different.
+ */
+ KUNIT_ASSERT_NE(test, myretaddr, stacktrace_driver());
+
+ KUNIT_ASSERT_EQ(test, 0, register_kretprobe(&rp3));
+ KUNIT_ASSERT_NE(test, myretaddr, stacktrace_driver());
+ unregister_kretprobe(&rp3);
+}
+
+static int stacktrace_internal_return_handler(struct kretprobe_instance *ri, struct pt_regs *regs)
+{
+ unsigned long retval = regs_return_value(regs);
+ int i, ret;
+
+ KUNIT_EXPECT_FALSE(current_test, preemptible());
+ KUNIT_EXPECT_EQ(current_test, retval, target_return_address[0]);
+
+ /*
+ * Test stacktrace inside the kretprobe handler for nested case.
+ * The unwinder will find the kretprobe_trampoline address on the
+ * return address, and kretprobe must solve that.
+ */
+ ret = stack_trace_save(stack_buf, STACK_BUF_SIZE, 0);
+ KUNIT_EXPECT_NE(current_test, ret, 0);
+
+ for (i = 0; i < ret - 1; i++) {
+ if (stack_buf[i] == target_return_address[0]) {
+ KUNIT_EXPECT_EQ(current_test, stack_buf[i + 1], target_return_address[1]);
+ break;
+ }
+ }
+ KUNIT_EXPECT_NE(current_test, i, ret);
+
+#if !IS_MODULE(CONFIG_KPROBES_SANITY_TEST)
+ /* Ditto for the regs version. */
+ ret = stack_trace_save_regs(regs, stack_buf, STACK_BUF_SIZE, 0);
+ KUNIT_EXPECT_NE(current_test, ret, 0);
+ KUNIT_EXPECT_EQ(current_test, stack_buf[0], target_return_address[0]);
+ KUNIT_EXPECT_EQ(current_test, stack_buf[1], target_return_address[1]);
+#endif
+
+ return 0;
+}
+
+static struct kretprobe rp4 = {
+ .handler = stacktrace_internal_return_handler,
+ .kp.symbol_name = "kprobe_stacktrace_internal_target"
+};
+
+static void test_stacktrace_on_nested_kretprobe(struct kunit *test)
+{
+ unsigned long myretaddr = (unsigned long)__builtin_return_address(0);
+ struct kretprobe *rps[2] = {&rp3, &rp4};
+
+ current_test = test;
+ rp3.kp.addr = NULL;
+ rp3.kp.flags = 0;
+
+ //KUNIT_ASSERT_NE(test, myretaddr, stacktrace_driver());
+
+ KUNIT_ASSERT_EQ(test, 0, register_kretprobes(rps, 2));
+ KUNIT_ASSERT_NE(test, myretaddr, stacktrace_driver());
+ unregister_kretprobes(rps, 2);
+}
+#endif /* CONFIG_ARCH_CORRECT_STACKTRACE_ON_KRETPROBE */
+
+#endif /* CONFIG_KRETPROBES */
+
+static int kprobes_test_init(struct kunit *test)
+{
+ target = kprobe_target;
+ target2 = kprobe_target2;
+ recursed_target = kprobe_recursed_target;
+ stacktrace_target = kprobe_stacktrace_target;
+ internal_target = kprobe_stacktrace_internal_target;
+ stacktrace_driver = kprobe_stacktrace_driver;
+ rand1 = get_random_u32_above(div_factor);
+ return 0;
+}
+
+static struct kunit_case kprobes_testcases[] = {
+ KUNIT_CASE(test_kprobe),
+ KUNIT_CASE(test_kprobes),
+ KUNIT_CASE(test_kprobe_missed),
+#ifdef CONFIG_KRETPROBES
+ KUNIT_CASE(test_kretprobe),
+ KUNIT_CASE(test_kretprobes),
+#ifdef CONFIG_ARCH_CORRECT_STACKTRACE_ON_KRETPROBE
+ KUNIT_CASE(test_stacktrace_on_kretprobe),
+ KUNIT_CASE(test_stacktrace_on_nested_kretprobe),
+#endif
+#endif
+ {}
+};
+
+static struct kunit_suite kprobes_test_suite = {
+ .name = "kprobes_test",
+ .init = kprobes_test_init,
+ .test_cases = kprobes_testcases,
+};
+
+kunit_test_suites(&kprobes_test_suite);
+
+MODULE_LICENSE("GPL");
diff --git a/lib/test_linear_ranges.c b/lib/test_linear_ranges.c
new file mode 100644
index 000000000000..c18f9c0f1f25
--- /dev/null
+++ b/lib/test_linear_ranges.c
@@ -0,0 +1,219 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * KUnit test for the linear_ranges helper.
+ *
+ * Copyright (C) 2020, ROHM Semiconductors.
+ * Author: Matti Vaittinen <matti.vaittien@fi.rohmeurope.com>
+ */
+#include <kunit/test.h>
+
+#include <linux/linear_range.h>
+
+/* First things first. I deeply dislike unit-tests. I have seen all the hell
+ * breaking loose when people who think the unit tests are "the silver bullet"
+ * to kill bugs get to decide how a company should implement testing strategy...
+ *
+ * Believe me, it may get _really_ ridiculous. It is tempting to think that
+ * walking through all the possible execution branches will nail down 100% of
+ * bugs. This may lead to ideas about demands to get certain % of "test
+ * coverage" - measured as line coverage. And that is one of the worst things
+ * you can do.
+ *
+ * Ask people to provide line coverage and they do. I've seen clever tools
+ * which generate test cases to test the existing functions - and by default
+ * these tools expect code to be correct and just generate checks which are
+ * passing when ran against current code-base. Run this generator and you'll get
+ * tests that do not test code is correct but just verify nothing changes.
+ * Problem is that testing working code is pointless. And if it is not
+ * working, your test must not assume it is working. You won't catch any bugs
+ * by such tests. What you can do is to generate a huge amount of tests.
+ * Especially if you were are asked to proivde 100% line-coverage x_x. So what
+ * does these tests - which are not finding any bugs now - do?
+ *
+ * They add inertia to every future development. I think it was Terry Pratchet
+ * who wrote someone having same impact as thick syrup has to chronometre.
+ * Excessive amount of unit-tests have this effect to development. If you do
+ * actually find _any_ bug from code in such environment and try fixing it...
+ * ...chances are you also need to fix the test cases. In sunny day you fix one
+ * test. But I've done refactoring which resulted 500+ broken tests (which had
+ * really zero value other than proving to managers that we do do "quality")...
+ *
+ * After this being said - there are situations where UTs can be handy. If you
+ * have algorithms which take some input and should produce output - then you
+ * can implement few, carefully selected simple UT-cases which test this. I've
+ * previously used this for example for netlink and device-tree data parsing
+ * functions. Feed some data examples to functions and verify the output is as
+ * expected. I am not covering all the cases but I will see the logic should be
+ * working.
+ *
+ * Here we also do some minor testing. I don't want to go through all branches
+ * or test more or less obvious things - but I want to see the main logic is
+ * working. And I definitely don't want to add 500+ test cases that break when
+ * some simple fix is done x_x. So - let's only add few, well selected tests
+ * which ensure as much logic is good as possible.
+ */
+
+/*
+ * Test Range 1:
+ * selectors: 2 3 4 5 6
+ * values (5): 10 20 30 40 50
+ *
+ * Test Range 2:
+ * selectors: 7 8 9 10
+ * values (4): 100 150 200 250
+ */
+
+#define RANGE1_MIN 10
+#define RANGE1_MIN_SEL 2
+#define RANGE1_STEP 10
+
+/* 2, 3, 4, 5, 6 */
+static const unsigned int range1_sels[] = { RANGE1_MIN_SEL, RANGE1_MIN_SEL + 1,
+ RANGE1_MIN_SEL + 2,
+ RANGE1_MIN_SEL + 3,
+ RANGE1_MIN_SEL + 4 };
+/* 10, 20, 30, 40, 50 */
+static const unsigned int range1_vals[] = { RANGE1_MIN, RANGE1_MIN +
+ RANGE1_STEP,
+ RANGE1_MIN + RANGE1_STEP * 2,
+ RANGE1_MIN + RANGE1_STEP * 3,
+ RANGE1_MIN + RANGE1_STEP * 4 };
+
+#define RANGE2_MIN 100
+#define RANGE2_MIN_SEL 7
+#define RANGE2_STEP 50
+
+/* 7, 8, 9, 10 */
+static const unsigned int range2_sels[] = { RANGE2_MIN_SEL, RANGE2_MIN_SEL + 1,
+ RANGE2_MIN_SEL + 2,
+ RANGE2_MIN_SEL + 3 };
+/* 100, 150, 200, 250 */
+static const unsigned int range2_vals[] = { RANGE2_MIN, RANGE2_MIN +
+ RANGE2_STEP,
+ RANGE2_MIN + RANGE2_STEP * 2,
+ RANGE2_MIN + RANGE2_STEP * 3 };
+
+#define RANGE1_NUM_VALS (ARRAY_SIZE(range1_vals))
+#define RANGE2_NUM_VALS (ARRAY_SIZE(range2_vals))
+#define RANGE_NUM_VALS (RANGE1_NUM_VALS + RANGE2_NUM_VALS)
+
+#define RANGE1_MAX_SEL (RANGE1_MIN_SEL + RANGE1_NUM_VALS - 1)
+#define RANGE1_MAX_VAL (range1_vals[RANGE1_NUM_VALS - 1])
+
+#define RANGE2_MAX_SEL (RANGE2_MIN_SEL + RANGE2_NUM_VALS - 1)
+#define RANGE2_MAX_VAL (range2_vals[RANGE2_NUM_VALS - 1])
+
+#define SMALLEST_SEL RANGE1_MIN_SEL
+#define SMALLEST_VAL RANGE1_MIN
+
+static struct linear_range testr[] = {
+ LINEAR_RANGE(RANGE1_MIN, RANGE1_MIN_SEL, RANGE1_MAX_SEL, RANGE1_STEP),
+ LINEAR_RANGE(RANGE2_MIN, RANGE2_MIN_SEL, RANGE2_MAX_SEL, RANGE2_STEP),
+};
+
+static void range_test_get_value(struct kunit *test)
+{
+ int ret, i;
+ unsigned int sel, val;
+
+ for (i = 0; i < RANGE1_NUM_VALS; i++) {
+ sel = range1_sels[i];
+ ret = linear_range_get_value_array(&testr[0], 2, sel, &val);
+ KUNIT_EXPECT_EQ(test, 0, ret);
+ KUNIT_EXPECT_EQ(test, val, range1_vals[i]);
+ }
+ for (i = 0; i < RANGE2_NUM_VALS; i++) {
+ sel = range2_sels[i];
+ ret = linear_range_get_value_array(&testr[0], 2, sel, &val);
+ KUNIT_EXPECT_EQ(test, 0, ret);
+ KUNIT_EXPECT_EQ(test, val, range2_vals[i]);
+ }
+ ret = linear_range_get_value_array(&testr[0], 2, sel + 1, &val);
+ KUNIT_EXPECT_NE(test, 0, ret);
+}
+
+static void range_test_get_selector_high(struct kunit *test)
+{
+ int ret, i;
+ unsigned int sel;
+ bool found;
+
+ for (i = 0; i < RANGE1_NUM_VALS; i++) {
+ ret = linear_range_get_selector_high(&testr[0], range1_vals[i],
+ &sel, &found);
+ KUNIT_EXPECT_EQ(test, 0, ret);
+ KUNIT_EXPECT_EQ(test, sel, range1_sels[i]);
+ KUNIT_EXPECT_TRUE(test, found);
+ }
+
+ ret = linear_range_get_selector_high(&testr[0], RANGE1_MAX_VAL + 1,
+ &sel, &found);
+ KUNIT_EXPECT_LE(test, ret, 0);
+
+ ret = linear_range_get_selector_high(&testr[0], RANGE1_MIN - 1,
+ &sel, &found);
+ KUNIT_EXPECT_EQ(test, 0, ret);
+ KUNIT_EXPECT_FALSE(test, found);
+ KUNIT_EXPECT_EQ(test, sel, range1_sels[0]);
+}
+
+static void range_test_get_value_amount(struct kunit *test)
+{
+ int ret;
+
+ ret = linear_range_values_in_range_array(&testr[0], 2);
+ KUNIT_EXPECT_EQ(test, (int)RANGE_NUM_VALS, ret);
+}
+
+static void range_test_get_selector_low(struct kunit *test)
+{
+ int i, ret;
+ unsigned int sel;
+ bool found;
+
+ for (i = 0; i < RANGE1_NUM_VALS; i++) {
+ ret = linear_range_get_selector_low_array(&testr[0], 2,
+ range1_vals[i], &sel,
+ &found);
+ KUNIT_EXPECT_EQ(test, 0, ret);
+ KUNIT_EXPECT_EQ(test, sel, range1_sels[i]);
+ KUNIT_EXPECT_TRUE(test, found);
+ }
+ for (i = 0; i < RANGE2_NUM_VALS; i++) {
+ ret = linear_range_get_selector_low_array(&testr[0], 2,
+ range2_vals[i], &sel,
+ &found);
+ KUNIT_EXPECT_EQ(test, 0, ret);
+ KUNIT_EXPECT_EQ(test, sel, range2_sels[i]);
+ KUNIT_EXPECT_TRUE(test, found);
+ }
+
+ /*
+ * Seek value greater than range max => get_selector_*_low should
+ * return Ok - but set found to false as value is not in range
+ */
+ ret = linear_range_get_selector_low_array(&testr[0], 2,
+ range2_vals[RANGE2_NUM_VALS - 1] + 1,
+ &sel, &found);
+
+ KUNIT_EXPECT_EQ(test, 0, ret);
+ KUNIT_EXPECT_EQ(test, sel, range2_sels[RANGE2_NUM_VALS - 1]);
+ KUNIT_EXPECT_FALSE(test, found);
+}
+
+static struct kunit_case range_test_cases[] = {
+ KUNIT_CASE(range_test_get_value_amount),
+ KUNIT_CASE(range_test_get_selector_high),
+ KUNIT_CASE(range_test_get_selector_low),
+ KUNIT_CASE(range_test_get_value),
+ {},
+};
+
+static struct kunit_suite range_test_module = {
+ .name = "linear-ranges-test",
+ .test_cases = range_test_cases,
+};
+
+kunit_test_suites(&range_test_module);
+
+MODULE_LICENSE("GPL");
diff --git a/lib/test_list_sort.c b/lib/test_list_sort.c
index 1f017d3b610e..cc5f335f29b5 100644
--- a/lib/test_list_sort.c
+++ b/lib/test_list_sort.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-#define pr_fmt(fmt) "list_sort_test: " fmt
+#include <kunit/test.h>
#include <linux/kernel.h>
#include <linux/list_sort.h>
@@ -23,70 +23,55 @@ struct debug_el {
struct list_head list;
unsigned int poison2;
int value;
- unsigned serial;
+ unsigned int serial;
};
-/* Array, containing pointers to all elements in the test list */
-static struct debug_el **elts __initdata;
-
-static int __init check(struct debug_el *ela, struct debug_el *elb)
+static void check(struct kunit *test, struct debug_el *ela, struct debug_el *elb)
{
- if (ela->serial >= TEST_LIST_LEN) {
- pr_err("error: incorrect serial %d\n", ela->serial);
- return -EINVAL;
- }
- if (elb->serial >= TEST_LIST_LEN) {
- pr_err("error: incorrect serial %d\n", elb->serial);
- return -EINVAL;
- }
- if (elts[ela->serial] != ela || elts[elb->serial] != elb) {
- pr_err("error: phantom element\n");
- return -EINVAL;
- }
- if (ela->poison1 != TEST_POISON1 || ela->poison2 != TEST_POISON2) {
- pr_err("error: bad poison: %#x/%#x\n",
- ela->poison1, ela->poison2);
- return -EINVAL;
- }
- if (elb->poison1 != TEST_POISON1 || elb->poison2 != TEST_POISON2) {
- pr_err("error: bad poison: %#x/%#x\n",
- elb->poison1, elb->poison2);
- return -EINVAL;
- }
- return 0;
+ struct debug_el **elts = test->priv;
+
+ KUNIT_EXPECT_LT_MSG(test, ela->serial, (unsigned int)TEST_LIST_LEN, "incorrect serial");
+ KUNIT_EXPECT_LT_MSG(test, elb->serial, (unsigned int)TEST_LIST_LEN, "incorrect serial");
+
+ KUNIT_EXPECT_PTR_EQ_MSG(test, elts[ela->serial], ela, "phantom element");
+ KUNIT_EXPECT_PTR_EQ_MSG(test, elts[elb->serial], elb, "phantom element");
+
+ KUNIT_EXPECT_EQ_MSG(test, ela->poison1, TEST_POISON1, "bad poison");
+ KUNIT_EXPECT_EQ_MSG(test, ela->poison2, TEST_POISON2, "bad poison");
+
+ KUNIT_EXPECT_EQ_MSG(test, elb->poison1, TEST_POISON1, "bad poison");
+ KUNIT_EXPECT_EQ_MSG(test, elb->poison2, TEST_POISON2, "bad poison");
}
-static int __init cmp(void *priv, struct list_head *a, struct list_head *b)
+/* `priv` is the test pointer so check() can fail the test if the list is invalid. */
+static int cmp(void *priv, const struct list_head *a, const struct list_head *b)
{
struct debug_el *ela, *elb;
ela = container_of(a, struct debug_el, list);
elb = container_of(b, struct debug_el, list);
- check(ela, elb);
+ check(priv, ela, elb);
return ela->value - elb->value;
}
-static int __init list_sort_test(void)
+static void list_sort_test(struct kunit *test)
{
- int i, count = 1, err = -ENOMEM;
- struct debug_el *el;
+ int i, count = 1;
+ struct debug_el *el, **elts;
struct list_head *cur;
LIST_HEAD(head);
- pr_debug("start testing list_sort()\n");
-
- elts = kcalloc(TEST_LIST_LEN, sizeof(*elts), GFP_KERNEL);
- if (!elts)
- return err;
+ elts = kunit_kcalloc(test, TEST_LIST_LEN, sizeof(*elts), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, elts);
+ test->priv = elts;
for (i = 0; i < TEST_LIST_LEN; i++) {
- el = kmalloc(sizeof(*el), GFP_KERNEL);
- if (!el)
- goto exit;
+ el = kunit_kmalloc(test, sizeof(*el), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, el);
/* force some equivalencies */
- el->value = prandom_u32() % (TEST_LIST_LEN / 3);
+ el->value = get_random_u32_below(TEST_LIST_LEN / 3);
el->serial = i;
el->poison1 = TEST_POISON1;
el->poison2 = TEST_POISON2;
@@ -94,55 +79,44 @@ static int __init list_sort_test(void)
list_add_tail(&el->list, &head);
}
- list_sort(NULL, &head, cmp);
+ list_sort(test, &head, cmp);
- err = -EINVAL;
for (cur = head.next; cur->next != &head; cur = cur->next) {
struct debug_el *el1;
int cmp_result;
- if (cur->next->prev != cur) {
- pr_err("error: list is corrupted\n");
- goto exit;
- }
+ KUNIT_ASSERT_PTR_EQ_MSG(test, cur->next->prev, cur,
+ "list is corrupted");
- cmp_result = cmp(NULL, cur, cur->next);
- if (cmp_result > 0) {
- pr_err("error: list is not sorted\n");
- goto exit;
- }
+ cmp_result = cmp(test, cur, cur->next);
+ KUNIT_ASSERT_LE_MSG(test, cmp_result, 0, "list is not sorted");
el = container_of(cur, struct debug_el, list);
el1 = container_of(cur->next, struct debug_el, list);
- if (cmp_result == 0 && el->serial >= el1->serial) {
- pr_err("error: order of equivalent elements not "
- "preserved\n");
- goto exit;
+ if (cmp_result == 0) {
+ KUNIT_ASSERT_LE_MSG(test, el->serial, el1->serial,
+ "order of equivalent elements not preserved");
}
- if (check(el, el1)) {
- pr_err("error: element check failed\n");
- goto exit;
- }
+ check(test, el, el1);
count++;
}
- if (head.prev != cur) {
- pr_err("error: list is corrupted\n");
- goto exit;
- }
+ KUNIT_EXPECT_PTR_EQ_MSG(test, head.prev, cur, "list is corrupted");
+ KUNIT_EXPECT_EQ_MSG(test, count, TEST_LIST_LEN,
+ "list length changed after sorting!");
+}
- if (count != TEST_LIST_LEN) {
- pr_err("error: bad list length %d", count);
- goto exit;
- }
+static struct kunit_case list_sort_cases[] = {
+ KUNIT_CASE(list_sort_test),
+ {}
+};
+
+static struct kunit_suite list_sort_suite = {
+ .name = "list_sort",
+ .test_cases = list_sort_cases,
+};
+
+kunit_test_suites(&list_sort_suite);
- err = 0;
-exit:
- for (i = 0; i < TEST_LIST_LEN; i++)
- kfree(elts[i]);
- kfree(elts);
- return err;
-}
-module_init(list_sort_test);
MODULE_LICENSE("GPL");
diff --git a/lib/test_lockup.c b/lib/test_lockup.c
new file mode 100644
index 000000000000..c3fd87d6c2dd
--- /dev/null
+++ b/lib/test_lockup.c
@@ -0,0 +1,620 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Test module to generate lockups
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/sched/signal.h>
+#include <linux/sched/clock.h>
+#include <linux/cpu.h>
+#include <linux/nmi.h>
+#include <linux/mm.h>
+#include <linux/uaccess.h>
+#include <linux/file.h>
+
+static unsigned int time_secs;
+module_param(time_secs, uint, 0600);
+MODULE_PARM_DESC(time_secs, "lockup time in seconds, default 0");
+
+static unsigned int time_nsecs;
+module_param(time_nsecs, uint, 0600);
+MODULE_PARM_DESC(time_nsecs, "nanoseconds part of lockup time, default 0");
+
+static unsigned int cooldown_secs;
+module_param(cooldown_secs, uint, 0600);
+MODULE_PARM_DESC(cooldown_secs, "cooldown time between iterations in seconds, default 0");
+
+static unsigned int cooldown_nsecs;
+module_param(cooldown_nsecs, uint, 0600);
+MODULE_PARM_DESC(cooldown_nsecs, "nanoseconds part of cooldown, default 0");
+
+static unsigned int iterations = 1;
+module_param(iterations, uint, 0600);
+MODULE_PARM_DESC(iterations, "lockup iterations, default 1");
+
+static bool all_cpus;
+module_param(all_cpus, bool, 0400);
+MODULE_PARM_DESC(all_cpus, "trigger lockup at all cpus at once");
+
+static int wait_state;
+static char *state = "R";
+module_param(state, charp, 0400);
+MODULE_PARM_DESC(state, "wait in 'R' running (default), 'D' uninterruptible, 'K' killable, 'S' interruptible state");
+
+static bool use_hrtimer;
+module_param(use_hrtimer, bool, 0400);
+MODULE_PARM_DESC(use_hrtimer, "use high-resolution timer for sleeping");
+
+static bool iowait;
+module_param(iowait, bool, 0400);
+MODULE_PARM_DESC(iowait, "account sleep time as iowait");
+
+static bool lock_read;
+module_param(lock_read, bool, 0400);
+MODULE_PARM_DESC(lock_read, "lock read-write locks for read");
+
+static bool lock_single;
+module_param(lock_single, bool, 0400);
+MODULE_PARM_DESC(lock_single, "acquire locks only at one cpu");
+
+static bool reacquire_locks;
+module_param(reacquire_locks, bool, 0400);
+MODULE_PARM_DESC(reacquire_locks, "release and reacquire locks/irq/preempt between iterations");
+
+static bool touch_softlockup;
+module_param(touch_softlockup, bool, 0600);
+MODULE_PARM_DESC(touch_softlockup, "touch soft-lockup watchdog between iterations");
+
+static bool touch_hardlockup;
+module_param(touch_hardlockup, bool, 0600);
+MODULE_PARM_DESC(touch_hardlockup, "touch hard-lockup watchdog between iterations");
+
+static bool call_cond_resched;
+module_param(call_cond_resched, bool, 0600);
+MODULE_PARM_DESC(call_cond_resched, "call cond_resched() between iterations");
+
+static bool measure_lock_wait;
+module_param(measure_lock_wait, bool, 0400);
+MODULE_PARM_DESC(measure_lock_wait, "measure lock wait time");
+
+static unsigned long lock_wait_threshold = ULONG_MAX;
+module_param(lock_wait_threshold, ulong, 0400);
+MODULE_PARM_DESC(lock_wait_threshold, "print lock wait time longer than this in nanoseconds, default off");
+
+static bool test_disable_irq;
+module_param_named(disable_irq, test_disable_irq, bool, 0400);
+MODULE_PARM_DESC(disable_irq, "disable interrupts: generate hard-lockups");
+
+static bool disable_softirq;
+module_param(disable_softirq, bool, 0400);
+MODULE_PARM_DESC(disable_softirq, "disable bottom-half irq handlers");
+
+static bool disable_preempt;
+module_param(disable_preempt, bool, 0400);
+MODULE_PARM_DESC(disable_preempt, "disable preemption: generate soft-lockups");
+
+static bool lock_rcu;
+module_param(lock_rcu, bool, 0400);
+MODULE_PARM_DESC(lock_rcu, "grab rcu_read_lock: generate rcu stalls");
+
+static bool lock_mmap_sem;
+module_param(lock_mmap_sem, bool, 0400);
+MODULE_PARM_DESC(lock_mmap_sem, "lock mm->mmap_lock: block procfs interfaces");
+
+static unsigned long lock_rwsem_ptr;
+module_param_unsafe(lock_rwsem_ptr, ulong, 0400);
+MODULE_PARM_DESC(lock_rwsem_ptr, "lock rw_semaphore at address");
+
+static unsigned long lock_mutex_ptr;
+module_param_unsafe(lock_mutex_ptr, ulong, 0400);
+MODULE_PARM_DESC(lock_mutex_ptr, "lock mutex at address");
+
+static unsigned long lock_spinlock_ptr;
+module_param_unsafe(lock_spinlock_ptr, ulong, 0400);
+MODULE_PARM_DESC(lock_spinlock_ptr, "lock spinlock at address");
+
+static unsigned long lock_rwlock_ptr;
+module_param_unsafe(lock_rwlock_ptr, ulong, 0400);
+MODULE_PARM_DESC(lock_rwlock_ptr, "lock rwlock at address");
+
+static unsigned int alloc_pages_nr;
+module_param_unsafe(alloc_pages_nr, uint, 0600);
+MODULE_PARM_DESC(alloc_pages_nr, "allocate and free pages under locks");
+
+static unsigned int alloc_pages_order;
+module_param(alloc_pages_order, uint, 0400);
+MODULE_PARM_DESC(alloc_pages_order, "page order to allocate");
+
+static gfp_t alloc_pages_gfp = GFP_KERNEL;
+module_param_unsafe(alloc_pages_gfp, uint, 0400);
+MODULE_PARM_DESC(alloc_pages_gfp, "allocate pages with this gfp_mask, default GFP_KERNEL");
+
+static bool alloc_pages_atomic;
+module_param(alloc_pages_atomic, bool, 0400);
+MODULE_PARM_DESC(alloc_pages_atomic, "allocate pages with GFP_ATOMIC");
+
+static bool reallocate_pages;
+module_param(reallocate_pages, bool, 0400);
+MODULE_PARM_DESC(reallocate_pages, "free and allocate pages between iterations");
+
+struct file *test_file;
+static struct inode *test_inode;
+static char test_file_path[256];
+module_param_string(file_path, test_file_path, sizeof(test_file_path), 0400);
+MODULE_PARM_DESC(file_path, "file path to test");
+
+static bool test_lock_inode;
+module_param_named(lock_inode, test_lock_inode, bool, 0400);
+MODULE_PARM_DESC(lock_inode, "lock file -> inode -> i_rwsem");
+
+static bool test_lock_mapping;
+module_param_named(lock_mapping, test_lock_mapping, bool, 0400);
+MODULE_PARM_DESC(lock_mapping, "lock file -> mapping -> i_mmap_rwsem");
+
+static bool test_lock_sb_umount;
+module_param_named(lock_sb_umount, test_lock_sb_umount, bool, 0400);
+MODULE_PARM_DESC(lock_sb_umount, "lock file -> sb -> s_umount");
+
+static atomic_t alloc_pages_failed = ATOMIC_INIT(0);
+
+static atomic64_t max_lock_wait = ATOMIC64_INIT(0);
+
+static struct task_struct *main_task;
+static int master_cpu;
+
+static void test_lock(bool master, bool verbose)
+{
+ u64 wait_start;
+
+ if (measure_lock_wait)
+ wait_start = local_clock();
+
+ if (lock_mutex_ptr && master) {
+ if (verbose)
+ pr_notice("lock mutex %ps\n", (void *)lock_mutex_ptr);
+ mutex_lock((struct mutex *)lock_mutex_ptr);
+ }
+
+ if (lock_rwsem_ptr && master) {
+ if (verbose)
+ pr_notice("lock rw_semaphore %ps\n",
+ (void *)lock_rwsem_ptr);
+ if (lock_read)
+ down_read((struct rw_semaphore *)lock_rwsem_ptr);
+ else
+ down_write((struct rw_semaphore *)lock_rwsem_ptr);
+ }
+
+ if (lock_mmap_sem && master) {
+ if (verbose)
+ pr_notice("lock mmap_lock pid=%d\n", main_task->pid);
+ if (lock_read)
+ mmap_read_lock(main_task->mm);
+ else
+ mmap_write_lock(main_task->mm);
+ }
+
+ if (test_disable_irq)
+ local_irq_disable();
+
+ if (disable_softirq)
+ local_bh_disable();
+
+ if (disable_preempt)
+ preempt_disable();
+
+ if (lock_rcu)
+ rcu_read_lock();
+
+ if (lock_spinlock_ptr && master) {
+ if (verbose)
+ pr_notice("lock spinlock %ps\n",
+ (void *)lock_spinlock_ptr);
+ spin_lock((spinlock_t *)lock_spinlock_ptr);
+ }
+
+ if (lock_rwlock_ptr && master) {
+ if (verbose)
+ pr_notice("lock rwlock %ps\n",
+ (void *)lock_rwlock_ptr);
+ if (lock_read)
+ read_lock((rwlock_t *)lock_rwlock_ptr);
+ else
+ write_lock((rwlock_t *)lock_rwlock_ptr);
+ }
+
+ if (measure_lock_wait) {
+ s64 cur_wait = local_clock() - wait_start;
+ s64 max_wait = atomic64_read(&max_lock_wait);
+
+ do {
+ if (cur_wait < max_wait)
+ break;
+ max_wait = atomic64_cmpxchg(&max_lock_wait,
+ max_wait, cur_wait);
+ } while (max_wait != cur_wait);
+
+ if (cur_wait > lock_wait_threshold)
+ pr_notice_ratelimited("lock wait %lld ns\n", cur_wait);
+ }
+}
+
+static void test_unlock(bool master, bool verbose)
+{
+ if (lock_rwlock_ptr && master) {
+ if (lock_read)
+ read_unlock((rwlock_t *)lock_rwlock_ptr);
+ else
+ write_unlock((rwlock_t *)lock_rwlock_ptr);
+ if (verbose)
+ pr_notice("unlock rwlock %ps\n",
+ (void *)lock_rwlock_ptr);
+ }
+
+ if (lock_spinlock_ptr && master) {
+ spin_unlock((spinlock_t *)lock_spinlock_ptr);
+ if (verbose)
+ pr_notice("unlock spinlock %ps\n",
+ (void *)lock_spinlock_ptr);
+ }
+
+ if (lock_rcu)
+ rcu_read_unlock();
+
+ if (disable_preempt)
+ preempt_enable();
+
+ if (disable_softirq)
+ local_bh_enable();
+
+ if (test_disable_irq)
+ local_irq_enable();
+
+ if (lock_mmap_sem && master) {
+ if (lock_read)
+ mmap_read_unlock(main_task->mm);
+ else
+ mmap_write_unlock(main_task->mm);
+ if (verbose)
+ pr_notice("unlock mmap_lock pid=%d\n", main_task->pid);
+ }
+
+ if (lock_rwsem_ptr && master) {
+ if (lock_read)
+ up_read((struct rw_semaphore *)lock_rwsem_ptr);
+ else
+ up_write((struct rw_semaphore *)lock_rwsem_ptr);
+ if (verbose)
+ pr_notice("unlock rw_semaphore %ps\n",
+ (void *)lock_rwsem_ptr);
+ }
+
+ if (lock_mutex_ptr && master) {
+ mutex_unlock((struct mutex *)lock_mutex_ptr);
+ if (verbose)
+ pr_notice("unlock mutex %ps\n",
+ (void *)lock_mutex_ptr);
+ }
+}
+
+static void test_alloc_pages(struct list_head *pages)
+{
+ struct page *page;
+ unsigned int i;
+
+ for (i = 0; i < alloc_pages_nr; i++) {
+ page = alloc_pages(alloc_pages_gfp, alloc_pages_order);
+ if (!page) {
+ atomic_inc(&alloc_pages_failed);
+ break;
+ }
+ list_add(&page->lru, pages);
+ }
+}
+
+static void test_free_pages(struct list_head *pages)
+{
+ struct page *page, *next;
+
+ list_for_each_entry_safe(page, next, pages, lru)
+ __free_pages(page, alloc_pages_order);
+ INIT_LIST_HEAD(pages);
+}
+
+static void test_wait(unsigned int secs, unsigned int nsecs)
+{
+ if (wait_state == TASK_RUNNING) {
+ if (secs)
+ mdelay(secs * MSEC_PER_SEC);
+ if (nsecs)
+ ndelay(nsecs);
+ return;
+ }
+
+ __set_current_state(wait_state);
+ if (use_hrtimer) {
+ ktime_t time;
+
+ time = ns_to_ktime((u64)secs * NSEC_PER_SEC + nsecs);
+ schedule_hrtimeout(&time, HRTIMER_MODE_REL);
+ } else {
+ schedule_timeout(secs * HZ + nsecs_to_jiffies(nsecs));
+ }
+}
+
+static void test_lockup(bool master)
+{
+ u64 lockup_start = local_clock();
+ unsigned int iter = 0;
+ LIST_HEAD(pages);
+
+ pr_notice("Start on CPU%d\n", raw_smp_processor_id());
+
+ test_lock(master, true);
+
+ test_alloc_pages(&pages);
+
+ while (iter++ < iterations && !signal_pending(main_task)) {
+
+ if (iowait)
+ current->in_iowait = 1;
+
+ test_wait(time_secs, time_nsecs);
+
+ if (iowait)
+ current->in_iowait = 0;
+
+ if (reallocate_pages)
+ test_free_pages(&pages);
+
+ if (reacquire_locks)
+ test_unlock(master, false);
+
+ if (touch_softlockup)
+ touch_softlockup_watchdog();
+
+ if (touch_hardlockup)
+ touch_nmi_watchdog();
+
+ if (call_cond_resched)
+ cond_resched();
+
+ test_wait(cooldown_secs, cooldown_nsecs);
+
+ if (reacquire_locks)
+ test_lock(master, false);
+
+ if (reallocate_pages)
+ test_alloc_pages(&pages);
+ }
+
+ pr_notice("Finish on CPU%d in %lld ns\n", raw_smp_processor_id(),
+ local_clock() - lockup_start);
+
+ test_free_pages(&pages);
+
+ test_unlock(master, true);
+}
+
+static DEFINE_PER_CPU(struct work_struct, test_works);
+
+static void test_work_fn(struct work_struct *work)
+{
+ test_lockup(!lock_single ||
+ work == per_cpu_ptr(&test_works, master_cpu));
+}
+
+static bool test_kernel_ptr(unsigned long addr, int size)
+{
+ void *ptr = (void *)addr;
+ char buf;
+
+ if (!addr)
+ return false;
+
+ /* should be at least readable kernel address */
+ if (!IS_ENABLED(CONFIG_ALTERNATE_USER_ADDRESS_SPACE) &&
+ (access_ok((void __user *)ptr, 1) ||
+ access_ok((void __user *)ptr + size - 1, 1))) {
+ pr_err("user space ptr invalid in kernel: %#lx\n", addr);
+ return true;
+ }
+
+ if (get_kernel_nofault(buf, ptr) ||
+ get_kernel_nofault(buf, ptr + size - 1)) {
+ pr_err("invalid kernel ptr: %#lx\n", addr);
+ return true;
+ }
+
+ return false;
+}
+
+static bool __maybe_unused test_magic(unsigned long addr, int offset,
+ unsigned int expected)
+{
+ void *ptr = (void *)addr + offset;
+ unsigned int magic = 0;
+
+ if (!addr)
+ return false;
+
+ if (get_kernel_nofault(magic, ptr) || magic != expected) {
+ pr_err("invalid magic at %#lx + %#x = %#x, expected %#x\n",
+ addr, offset, magic, expected);
+ return true;
+ }
+
+ return false;
+}
+
+static int __init test_lockup_init(void)
+{
+ u64 test_start = local_clock();
+
+ main_task = current;
+
+ switch (state[0]) {
+ case 'S':
+ wait_state = TASK_INTERRUPTIBLE;
+ break;
+ case 'D':
+ wait_state = TASK_UNINTERRUPTIBLE;
+ break;
+ case 'K':
+ wait_state = TASK_KILLABLE;
+ break;
+ case 'R':
+ wait_state = TASK_RUNNING;
+ break;
+ default:
+ pr_err("unknown state=%s\n", state);
+ return -EINVAL;
+ }
+
+ if (alloc_pages_atomic)
+ alloc_pages_gfp = GFP_ATOMIC;
+
+ if (test_kernel_ptr(lock_spinlock_ptr, sizeof(spinlock_t)) ||
+ test_kernel_ptr(lock_rwlock_ptr, sizeof(rwlock_t)) ||
+ test_kernel_ptr(lock_mutex_ptr, sizeof(struct mutex)) ||
+ test_kernel_ptr(lock_rwsem_ptr, sizeof(struct rw_semaphore)))
+ return -EINVAL;
+
+#ifdef CONFIG_DEBUG_SPINLOCK
+#ifdef CONFIG_PREEMPT_RT
+ if (test_magic(lock_spinlock_ptr,
+ offsetof(spinlock_t, lock.wait_lock.magic),
+ SPINLOCK_MAGIC) ||
+ test_magic(lock_rwlock_ptr,
+ offsetof(rwlock_t, rwbase.rtmutex.wait_lock.magic),
+ SPINLOCK_MAGIC) ||
+ test_magic(lock_mutex_ptr,
+ offsetof(struct mutex, rtmutex.wait_lock.magic),
+ SPINLOCK_MAGIC) ||
+ test_magic(lock_rwsem_ptr,
+ offsetof(struct rw_semaphore, rwbase.rtmutex.wait_lock.magic),
+ SPINLOCK_MAGIC))
+ return -EINVAL;
+#else
+ if (test_magic(lock_spinlock_ptr,
+ offsetof(spinlock_t, rlock.magic),
+ SPINLOCK_MAGIC) ||
+ test_magic(lock_rwlock_ptr,
+ offsetof(rwlock_t, magic),
+ RWLOCK_MAGIC) ||
+ test_magic(lock_mutex_ptr,
+ offsetof(struct mutex, wait_lock.magic),
+ SPINLOCK_MAGIC) ||
+ test_magic(lock_rwsem_ptr,
+ offsetof(struct rw_semaphore, wait_lock.magic),
+ SPINLOCK_MAGIC))
+ return -EINVAL;
+#endif
+#endif
+
+ if ((wait_state != TASK_RUNNING ||
+ (call_cond_resched && !reacquire_locks) ||
+ (alloc_pages_nr && gfpflags_allow_blocking(alloc_pages_gfp))) &&
+ (test_disable_irq || disable_softirq || disable_preempt ||
+ lock_rcu || lock_spinlock_ptr || lock_rwlock_ptr)) {
+ pr_err("refuse to sleep in atomic context\n");
+ return -EINVAL;
+ }
+
+ if (lock_mmap_sem && !main_task->mm) {
+ pr_err("no mm to lock mmap_lock\n");
+ return -EINVAL;
+ }
+
+ if (test_file_path[0]) {
+ test_file = filp_open(test_file_path, O_RDONLY, 0);
+ if (IS_ERR(test_file)) {
+ pr_err("failed to open %s: %ld\n", test_file_path, PTR_ERR(test_file));
+ return PTR_ERR(test_file);
+ }
+ test_inode = file_inode(test_file);
+ } else if (test_lock_inode ||
+ test_lock_mapping ||
+ test_lock_sb_umount) {
+ pr_err("no file to lock\n");
+ return -EINVAL;
+ }
+
+ if (test_lock_inode && test_inode)
+ lock_rwsem_ptr = (unsigned long)&test_inode->i_rwsem;
+
+ if (test_lock_mapping && test_file && test_file->f_mapping)
+ lock_rwsem_ptr = (unsigned long)&test_file->f_mapping->i_mmap_rwsem;
+
+ if (test_lock_sb_umount && test_inode)
+ lock_rwsem_ptr = (unsigned long)&test_inode->i_sb->s_umount;
+
+ pr_notice("START pid=%d time=%u +%u ns cooldown=%u +%u ns iterations=%u state=%s %s%s%s%s%s%s%s%s%s%s%s\n",
+ main_task->pid, time_secs, time_nsecs,
+ cooldown_secs, cooldown_nsecs, iterations, state,
+ all_cpus ? "all_cpus " : "",
+ iowait ? "iowait " : "",
+ test_disable_irq ? "disable_irq " : "",
+ disable_softirq ? "disable_softirq " : "",
+ disable_preempt ? "disable_preempt " : "",
+ lock_rcu ? "lock_rcu " : "",
+ lock_read ? "lock_read " : "",
+ touch_softlockup ? "touch_softlockup " : "",
+ touch_hardlockup ? "touch_hardlockup " : "",
+ call_cond_resched ? "call_cond_resched " : "",
+ reacquire_locks ? "reacquire_locks " : "");
+
+ if (alloc_pages_nr)
+ pr_notice("ALLOCATE PAGES nr=%u order=%u gfp=%pGg %s\n",
+ alloc_pages_nr, alloc_pages_order, &alloc_pages_gfp,
+ reallocate_pages ? "reallocate_pages " : "");
+
+ if (all_cpus) {
+ unsigned int cpu;
+
+ cpus_read_lock();
+
+ preempt_disable();
+ master_cpu = smp_processor_id();
+ for_each_online_cpu(cpu) {
+ INIT_WORK(per_cpu_ptr(&test_works, cpu), test_work_fn);
+ queue_work_on(cpu, system_highpri_wq,
+ per_cpu_ptr(&test_works, cpu));
+ }
+ preempt_enable();
+
+ for_each_online_cpu(cpu)
+ flush_work(per_cpu_ptr(&test_works, cpu));
+
+ cpus_read_unlock();
+ } else {
+ test_lockup(true);
+ }
+
+ if (measure_lock_wait)
+ pr_notice("Maximum lock wait: %lld ns\n",
+ atomic64_read(&max_lock_wait));
+
+ if (alloc_pages_nr)
+ pr_notice("Page allocation failed %u times\n",
+ atomic_read(&alloc_pages_failed));
+
+ pr_notice("FINISH in %llu ns\n", local_clock() - test_start);
+
+ if (test_file)
+ fput(test_file);
+
+ if (signal_pending(main_task))
+ return -EINTR;
+
+ return -EAGAIN;
+}
+module_init(test_lockup_init);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Konstantin Khlebnikov <khlebnikov@yandex-team.ru>");
+MODULE_DESCRIPTION("Test module to generate lockups");
diff --git a/lib/test_maple_tree.c b/lib/test_maple_tree.c
new file mode 100644
index 000000000000..29185ac5c727
--- /dev/null
+++ b/lib/test_maple_tree.c
@@ -0,0 +1,3905 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * test_maple_tree.c: Test the maple tree API
+ * Copyright (c) 2018-2022 Oracle Corporation
+ * Author: Liam R. Howlett <Liam.Howlett@Oracle.com>
+ *
+ * Any tests that only require the interface of the tree.
+ */
+
+#include <linux/maple_tree.h>
+#include <linux/module.h>
+#include <linux/rwsem.h>
+
+#define MTREE_ALLOC_MAX 0x2000000000000Ul
+#define CONFIG_MAPLE_SEARCH
+#define MAPLE_32BIT (MAPLE_NODE_SLOTS > 31)
+
+#ifndef CONFIG_DEBUG_MAPLE_TREE
+#define mt_dump(mt, fmt) do {} while (0)
+#define mt_validate(mt) do {} while (0)
+#define mt_cache_shrink() do {} while (0)
+#define mas_dump(mas) do {} while (0)
+#define mas_wr_dump(mas) do {} while (0)
+atomic_t maple_tree_tests_run;
+atomic_t maple_tree_tests_passed;
+#undef MT_BUG_ON
+
+#define MT_BUG_ON(__tree, __x) do { \
+ atomic_inc(&maple_tree_tests_run); \
+ if (__x) { \
+ pr_info("BUG at %s:%d (%u)\n", \
+ __func__, __LINE__, __x); \
+ pr_info("Pass: %u Run:%u\n", \
+ atomic_read(&maple_tree_tests_passed), \
+ atomic_read(&maple_tree_tests_run)); \
+ } else { \
+ atomic_inc(&maple_tree_tests_passed); \
+ } \
+} while (0)
+#endif
+
+/* #define BENCH_SLOT_STORE */
+/* #define BENCH_NODE_STORE */
+/* #define BENCH_AWALK */
+/* #define BENCH_WALK */
+/* #define BENCH_LOAD */
+/* #define BENCH_MT_FOR_EACH */
+/* #define BENCH_FORK */
+/* #define BENCH_MAS_FOR_EACH */
+/* #define BENCH_MAS_PREV */
+
+#ifdef __KERNEL__
+#define mt_set_non_kernel(x) do {} while (0)
+#define mt_zero_nr_tallocated(x) do {} while (0)
+#else
+#define cond_resched() do {} while (0)
+#endif
+
+#define mas_is_none(x) ((x)->status == ma_none)
+#define mas_is_overflow(x) ((x)->status == ma_overflow)
+#define mas_is_underflow(x) ((x)->status == ma_underflow)
+
+static int __init mtree_insert_index(struct maple_tree *mt,
+ unsigned long index, gfp_t gfp)
+{
+ return mtree_insert(mt, index, xa_mk_value(index & LONG_MAX), gfp);
+}
+
+static void __init mtree_erase_index(struct maple_tree *mt, unsigned long index)
+{
+ MT_BUG_ON(mt, mtree_erase(mt, index) != xa_mk_value(index & LONG_MAX));
+ MT_BUG_ON(mt, mtree_load(mt, index) != NULL);
+}
+
+static int __init mtree_test_insert(struct maple_tree *mt, unsigned long index,
+ void *ptr)
+{
+ return mtree_insert(mt, index, ptr, GFP_KERNEL);
+}
+
+static int __init mtree_test_store_range(struct maple_tree *mt,
+ unsigned long start, unsigned long end, void *ptr)
+{
+ return mtree_store_range(mt, start, end, ptr, GFP_KERNEL);
+}
+
+static int __init mtree_test_store(struct maple_tree *mt, unsigned long start,
+ void *ptr)
+{
+ return mtree_test_store_range(mt, start, start, ptr);
+}
+
+static int __init mtree_test_insert_range(struct maple_tree *mt,
+ unsigned long start, unsigned long end, void *ptr)
+{
+ return mtree_insert_range(mt, start, end, ptr, GFP_KERNEL);
+}
+
+static void __init *mtree_test_load(struct maple_tree *mt, unsigned long index)
+{
+ return mtree_load(mt, index);
+}
+
+static void __init *mtree_test_erase(struct maple_tree *mt, unsigned long index)
+{
+ return mtree_erase(mt, index);
+}
+
+#if defined(CONFIG_64BIT)
+static noinline void __init check_mtree_alloc_range(struct maple_tree *mt,
+ unsigned long start, unsigned long end, unsigned long size,
+ unsigned long expected, int eret, void *ptr)
+{
+
+ unsigned long result = expected + 1;
+ int ret;
+
+ ret = mtree_alloc_range(mt, &result, ptr, size, start, end,
+ GFP_KERNEL);
+ MT_BUG_ON(mt, ret != eret);
+ if (ret)
+ return;
+
+ MT_BUG_ON(mt, result != expected);
+}
+
+static noinline void __init check_mtree_alloc_rrange(struct maple_tree *mt,
+ unsigned long start, unsigned long end, unsigned long size,
+ unsigned long expected, int eret, void *ptr)
+{
+
+ unsigned long result = expected + 1;
+ int ret;
+
+ ret = mtree_alloc_rrange(mt, &result, ptr, size, start, end,
+ GFP_KERNEL);
+ MT_BUG_ON(mt, ret != eret);
+ if (ret)
+ return;
+
+ MT_BUG_ON(mt, result != expected);
+}
+#endif
+
+static noinline void __init check_load(struct maple_tree *mt,
+ unsigned long index, void *ptr)
+{
+ void *ret = mtree_test_load(mt, index);
+
+ if (ret != ptr)
+ pr_err("Load %lu returned %p expect %p\n", index, ret, ptr);
+ MT_BUG_ON(mt, ret != ptr);
+}
+
+static noinline void __init check_store_range(struct maple_tree *mt,
+ unsigned long start, unsigned long end, void *ptr, int expected)
+{
+ int ret = -EINVAL;
+ unsigned long i;
+
+ ret = mtree_test_store_range(mt, start, end, ptr);
+ MT_BUG_ON(mt, ret != expected);
+
+ if (ret)
+ return;
+
+ for (i = start; i <= end; i++)
+ check_load(mt, i, ptr);
+}
+
+static noinline void __init check_insert_range(struct maple_tree *mt,
+ unsigned long start, unsigned long end, void *ptr, int expected)
+{
+ int ret = -EINVAL;
+ unsigned long i;
+
+ ret = mtree_test_insert_range(mt, start, end, ptr);
+ MT_BUG_ON(mt, ret != expected);
+
+ if (ret)
+ return;
+
+ for (i = start; i <= end; i++)
+ check_load(mt, i, ptr);
+}
+
+static noinline void __init check_insert(struct maple_tree *mt,
+ unsigned long index, void *ptr)
+{
+ int ret = -EINVAL;
+
+ ret = mtree_test_insert(mt, index, ptr);
+ MT_BUG_ON(mt, ret != 0);
+}
+
+static noinline void __init check_dup_insert(struct maple_tree *mt,
+ unsigned long index, void *ptr)
+{
+ int ret = -EINVAL;
+
+ ret = mtree_test_insert(mt, index, ptr);
+ MT_BUG_ON(mt, ret != -EEXIST);
+}
+
+
+static noinline void __init check_index_load(struct maple_tree *mt,
+ unsigned long index)
+{
+ return check_load(mt, index, xa_mk_value(index & LONG_MAX));
+}
+
+static inline __init int not_empty(struct maple_node *node)
+{
+ int i;
+
+ if (node->parent)
+ return 1;
+
+ for (i = 0; i < ARRAY_SIZE(node->slot); i++)
+ if (node->slot[i])
+ return 1;
+
+ return 0;
+}
+
+
+static noinline void __init check_rev_seq(struct maple_tree *mt,
+ unsigned long max, bool verbose)
+{
+ unsigned long i = max, j;
+
+ MT_BUG_ON(mt, !mtree_empty(mt));
+
+ mt_zero_nr_tallocated();
+ while (i) {
+ MT_BUG_ON(mt, mtree_insert_index(mt, i, GFP_KERNEL));
+ for (j = i; j <= max; j++)
+ check_index_load(mt, j);
+
+ check_load(mt, i - 1, NULL);
+ mt_set_in_rcu(mt);
+ MT_BUG_ON(mt, !mt_height(mt));
+ mt_clear_in_rcu(mt);
+ MT_BUG_ON(mt, !mt_height(mt));
+ i--;
+ }
+ check_load(mt, max + 1, NULL);
+
+#ifndef __KERNEL__
+ if (verbose) {
+ rcu_barrier();
+ mt_dump(mt, mt_dump_dec);
+ pr_info(" %s test of 0-%lu %luK in %d active (%d total)\n",
+ __func__, max, mt_get_alloc_size()/1024, mt_nr_allocated(),
+ mt_nr_tallocated());
+ }
+#endif
+}
+
+static noinline void __init check_seq(struct maple_tree *mt, unsigned long max,
+ bool verbose)
+{
+ unsigned long i, j;
+
+ MT_BUG_ON(mt, !mtree_empty(mt));
+
+ mt_zero_nr_tallocated();
+ for (i = 0; i <= max; i++) {
+ MT_BUG_ON(mt, mtree_insert_index(mt, i, GFP_KERNEL));
+ for (j = 0; j <= i; j++)
+ check_index_load(mt, j);
+
+ if (i)
+ MT_BUG_ON(mt, !mt_height(mt));
+ check_load(mt, i + 1, NULL);
+ }
+
+#ifndef __KERNEL__
+ if (verbose) {
+ rcu_barrier();
+ mt_dump(mt, mt_dump_dec);
+ pr_info(" seq test of 0-%lu %luK in %d active (%d total)\n",
+ max, mt_get_alloc_size()/1024, mt_nr_allocated(),
+ mt_nr_tallocated());
+ }
+#endif
+}
+
+static noinline void __init check_lb_not_empty(struct maple_tree *mt)
+{
+ unsigned long i, j;
+ unsigned long huge = 4000UL * 1000 * 1000;
+
+
+ i = huge;
+ while (i > 4096) {
+ check_insert(mt, i, (void *) i);
+ for (j = huge; j >= i; j /= 2) {
+ check_load(mt, j-1, NULL);
+ check_load(mt, j, (void *) j);
+ check_load(mt, j+1, NULL);
+ }
+ i /= 2;
+ }
+ mtree_destroy(mt);
+}
+
+static noinline void __init check_lower_bound_split(struct maple_tree *mt)
+{
+ MT_BUG_ON(mt, !mtree_empty(mt));
+ check_lb_not_empty(mt);
+}
+
+static noinline void __init check_upper_bound_split(struct maple_tree *mt)
+{
+ unsigned long i, j;
+ unsigned long huge;
+
+ MT_BUG_ON(mt, !mtree_empty(mt));
+
+ if (MAPLE_32BIT)
+ huge = 2147483647UL;
+ else
+ huge = 4000UL * 1000 * 1000;
+
+ i = 4096;
+ while (i < huge) {
+ check_insert(mt, i, (void *) i);
+ for (j = i; j >= huge; j *= 2) {
+ check_load(mt, j-1, NULL);
+ check_load(mt, j, (void *) j);
+ check_load(mt, j+1, NULL);
+ }
+ i *= 2;
+ }
+ mtree_destroy(mt);
+}
+
+static noinline void __init check_mid_split(struct maple_tree *mt)
+{
+ unsigned long huge = 8000UL * 1000 * 1000;
+
+ check_insert(mt, huge, (void *) huge);
+ check_insert(mt, 0, xa_mk_value(0));
+ check_lb_not_empty(mt);
+}
+
+static noinline void __init check_rev_find(struct maple_tree *mt)
+{
+ int i, nr_entries = 200;
+ void *val;
+ MA_STATE(mas, mt, 0, 0);
+
+ for (i = 0; i <= nr_entries; i++)
+ mtree_store_range(mt, i*10, i*10 + 5,
+ xa_mk_value(i), GFP_KERNEL);
+
+ rcu_read_lock();
+ mas_set(&mas, 1000);
+ val = mas_find_rev(&mas, 1000);
+ MT_BUG_ON(mt, val != xa_mk_value(100));
+ val = mas_find_rev(&mas, 1000);
+ MT_BUG_ON(mt, val != NULL);
+
+ mas_set(&mas, 999);
+ val = mas_find_rev(&mas, 997);
+ MT_BUG_ON(mt, val != NULL);
+
+ mas_set(&mas, 1000);
+ val = mas_find_rev(&mas, 900);
+ MT_BUG_ON(mt, val != xa_mk_value(100));
+ val = mas_find_rev(&mas, 900);
+ MT_BUG_ON(mt, val != xa_mk_value(99));
+
+ mas_set(&mas, 20);
+ val = mas_find_rev(&mas, 0);
+ MT_BUG_ON(mt, val != xa_mk_value(2));
+ val = mas_find_rev(&mas, 0);
+ MT_BUG_ON(mt, val != xa_mk_value(1));
+ val = mas_find_rev(&mas, 0);
+ MT_BUG_ON(mt, val != xa_mk_value(0));
+ val = mas_find_rev(&mas, 0);
+ MT_BUG_ON(mt, val != NULL);
+ rcu_read_unlock();
+}
+
+static noinline void __init check_find(struct maple_tree *mt)
+{
+ unsigned long val = 0;
+ unsigned long count;
+ unsigned long max;
+ unsigned long top;
+ unsigned long last = 0, index = 0;
+ void *entry, *entry2;
+
+ MA_STATE(mas, mt, 0, 0);
+
+ /* Insert 0. */
+ MT_BUG_ON(mt, mtree_insert_index(mt, val++, GFP_KERNEL));
+
+#if defined(CONFIG_64BIT)
+ top = 4398046511104UL;
+#else
+ top = ULONG_MAX;
+#endif
+
+ if (MAPLE_32BIT) {
+ count = 15;
+ } else {
+ count = 20;
+ }
+
+ for (int i = 0; i <= count; i++) {
+ if (val != 64)
+ MT_BUG_ON(mt, mtree_insert_index(mt, val, GFP_KERNEL));
+ else
+ MT_BUG_ON(mt, mtree_insert(mt, val,
+ XA_ZERO_ENTRY, GFP_KERNEL));
+
+ val <<= 2;
+ }
+
+ val = 0;
+ mas_set(&mas, val);
+ mas_lock(&mas);
+ while ((entry = mas_find(&mas, 268435456)) != NULL) {
+ if (val != 64)
+ MT_BUG_ON(mt, xa_mk_value(val) != entry);
+ else
+ MT_BUG_ON(mt, entry != XA_ZERO_ENTRY);
+
+ val <<= 2;
+ /* For zero check. */
+ if (!val)
+ val = 1;
+ }
+ mas_unlock(&mas);
+
+ val = 0;
+ mas_set(&mas, val);
+ mas_lock(&mas);
+ mas_for_each(&mas, entry, ULONG_MAX) {
+ if (val != 64)
+ MT_BUG_ON(mt, xa_mk_value(val) != entry);
+ else
+ MT_BUG_ON(mt, entry != XA_ZERO_ENTRY);
+ val <<= 2;
+ /* For zero check. */
+ if (!val)
+ val = 1;
+ }
+ mas_unlock(&mas);
+
+ /* Test mas_pause */
+ val = 0;
+ mas_set(&mas, val);
+ mas_lock(&mas);
+ mas_for_each(&mas, entry, ULONG_MAX) {
+ if (val != 64)
+ MT_BUG_ON(mt, xa_mk_value(val) != entry);
+ else
+ MT_BUG_ON(mt, entry != XA_ZERO_ENTRY);
+ val <<= 2;
+ /* For zero check. */
+ if (!val)
+ val = 1;
+
+ mas_pause(&mas);
+ mas_unlock(&mas);
+ mas_lock(&mas);
+ }
+ mas_unlock(&mas);
+
+ val = 0;
+ max = 300; /* A value big enough to include XA_ZERO_ENTRY at 64. */
+ mt_for_each(mt, entry, index, max) {
+ MT_BUG_ON(mt, xa_mk_value(val) != entry);
+ val <<= 2;
+ if (val == 64) /* Skip zero entry. */
+ val <<= 2;
+ /* For zero check. */
+ if (!val)
+ val = 1;
+ }
+
+ val = 0;
+ max = 0;
+ index = 0;
+ MT_BUG_ON(mt, mtree_insert_index(mt, ULONG_MAX, GFP_KERNEL));
+ mt_for_each(mt, entry, index, ULONG_MAX) {
+ if (val == top)
+ MT_BUG_ON(mt, entry != xa_mk_value(LONG_MAX));
+ else
+ MT_BUG_ON(mt, xa_mk_value(val) != entry);
+
+ /* Workaround for 32bit */
+ if ((val << 2) < val)
+ val = ULONG_MAX;
+ else
+ val <<= 2;
+
+ if (val == 64) /* Skip zero entry. */
+ val <<= 2;
+ /* For zero check. */
+ if (!val)
+ val = 1;
+ max++;
+ MT_BUG_ON(mt, max > 25);
+ }
+ mtree_erase_index(mt, ULONG_MAX);
+
+ mas_reset(&mas);
+ index = 17;
+ entry = mt_find(mt, &index, 512);
+ MT_BUG_ON(mt, xa_mk_value(256) != entry);
+
+ mas_reset(&mas);
+ index = 17;
+ entry = mt_find(mt, &index, 20);
+ MT_BUG_ON(mt, entry != NULL);
+
+
+ /* Range check.. */
+ /* Insert ULONG_MAX */
+ MT_BUG_ON(mt, mtree_insert_index(mt, ULONG_MAX, GFP_KERNEL));
+
+ val = 0;
+ mas_set(&mas, 0);
+ mas_lock(&mas);
+ mas_for_each(&mas, entry, ULONG_MAX) {
+ if (val == 64)
+ MT_BUG_ON(mt, entry != XA_ZERO_ENTRY);
+ else if (val == top)
+ MT_BUG_ON(mt, entry != xa_mk_value(LONG_MAX));
+ else
+ MT_BUG_ON(mt, xa_mk_value(val) != entry);
+
+ /* Workaround for 32bit */
+ if ((val << 2) < val)
+ val = ULONG_MAX;
+ else
+ val <<= 2;
+
+ /* For zero check. */
+ if (!val)
+ val = 1;
+ mas_pause(&mas);
+ mas_unlock(&mas);
+ mas_lock(&mas);
+ }
+ mas_unlock(&mas);
+
+ mas_set(&mas, 1048576);
+ mas_lock(&mas);
+ entry = mas_find(&mas, 1048576);
+ mas_unlock(&mas);
+ MT_BUG_ON(mas.tree, entry == NULL);
+
+ /*
+ * Find last value.
+ * 1. get the expected value, leveraging the existence of an end entry
+ * 2. delete end entry
+ * 3. find the last value but searching for ULONG_MAX and then using
+ * prev
+ */
+ /* First, get the expected result. */
+ mas_lock(&mas);
+ mas_reset(&mas);
+ mas.index = ULONG_MAX; /* start at max.. */
+ entry = mas_find(&mas, ULONG_MAX);
+ entry = mas_prev(&mas, 0);
+ index = mas.index;
+ last = mas.last;
+
+ /* Erase the last entry. */
+ mas_reset(&mas);
+ mas.index = ULONG_MAX;
+ mas.last = ULONG_MAX;
+ mas_erase(&mas);
+
+ /* Get the previous value from MAS_START */
+ mas_reset(&mas);
+ entry2 = mas_prev(&mas, 0);
+
+ /* Check results. */
+ MT_BUG_ON(mt, entry != entry2);
+ MT_BUG_ON(mt, index != mas.index);
+ MT_BUG_ON(mt, last != mas.last);
+
+
+ mas.status = ma_none;
+ mas.index = ULONG_MAX;
+ mas.last = ULONG_MAX;
+ entry2 = mas_prev(&mas, 0);
+ MT_BUG_ON(mt, entry != entry2);
+
+ mas_set(&mas, 0);
+ MT_BUG_ON(mt, mas_prev(&mas, 0) != NULL);
+
+ mas_unlock(&mas);
+ mtree_destroy(mt);
+}
+
+static noinline void __init check_find_2(struct maple_tree *mt)
+{
+ unsigned long i, j;
+ void *entry;
+
+ MA_STATE(mas, mt, 0, 0);
+ rcu_read_lock();
+ mas_for_each(&mas, entry, ULONG_MAX)
+ MT_BUG_ON(mt, true);
+ rcu_read_unlock();
+
+ for (i = 0; i < 256; i++) {
+ mtree_insert_index(mt, i, GFP_KERNEL);
+ j = 0;
+ mas_set(&mas, 0);
+ rcu_read_lock();
+ mas_for_each(&mas, entry, ULONG_MAX) {
+ MT_BUG_ON(mt, entry != xa_mk_value(j));
+ j++;
+ }
+ rcu_read_unlock();
+ MT_BUG_ON(mt, j != i + 1);
+ }
+
+ for (i = 0; i < 256; i++) {
+ mtree_erase_index(mt, i);
+ j = i + 1;
+ mas_set(&mas, 0);
+ rcu_read_lock();
+ mas_for_each(&mas, entry, ULONG_MAX) {
+ if (xa_is_zero(entry))
+ continue;
+
+ MT_BUG_ON(mt, entry != xa_mk_value(j));
+ j++;
+ }
+ rcu_read_unlock();
+ MT_BUG_ON(mt, j != 256);
+ }
+
+ /*MT_BUG_ON(mt, !mtree_empty(mt)); */
+}
+
+
+#if defined(CONFIG_64BIT)
+static noinline void __init check_alloc_rev_range(struct maple_tree *mt)
+{
+ /*
+ * Generated by:
+ * cat /proc/self/maps | awk '{print $1}'|
+ * awk -F "-" '{printf "0x%s, 0x%s, ", $1, $2}'
+ */
+
+ static const unsigned long range[] = {
+ /* Inclusive , Exclusive. */
+ 0x565234af2000, 0x565234af4000,
+ 0x565234af4000, 0x565234af9000,
+ 0x565234af9000, 0x565234afb000,
+ 0x565234afc000, 0x565234afd000,
+ 0x565234afd000, 0x565234afe000,
+ 0x565235def000, 0x565235e10000,
+ 0x7f36d4bfd000, 0x7f36d4ee2000,
+ 0x7f36d4ee2000, 0x7f36d4f04000,
+ 0x7f36d4f04000, 0x7f36d504c000,
+ 0x7f36d504c000, 0x7f36d5098000,
+ 0x7f36d5098000, 0x7f36d5099000,
+ 0x7f36d5099000, 0x7f36d509d000,
+ 0x7f36d509d000, 0x7f36d509f000,
+ 0x7f36d509f000, 0x7f36d50a5000,
+ 0x7f36d50b9000, 0x7f36d50db000,
+ 0x7f36d50db000, 0x7f36d50dc000,
+ 0x7f36d50dc000, 0x7f36d50fa000,
+ 0x7f36d50fa000, 0x7f36d5102000,
+ 0x7f36d5102000, 0x7f36d5103000,
+ 0x7f36d5103000, 0x7f36d5104000,
+ 0x7f36d5104000, 0x7f36d5105000,
+ 0x7fff5876b000, 0x7fff5878d000,
+ 0x7fff5878e000, 0x7fff58791000,
+ 0x7fff58791000, 0x7fff58793000,
+ };
+
+ static const unsigned long holes[] = {
+ /*
+ * Note: start of hole is INCLUSIVE
+ * end of hole is EXCLUSIVE
+ * (opposite of the above table.)
+ * Start of hole, end of hole, size of hole (+1)
+ */
+ 0x565234afb000, 0x565234afc000, 0x1000,
+ 0x565234afe000, 0x565235def000, 0x12F1000,
+ 0x565235e10000, 0x7f36d4bfd000, 0x28E49EDED000,
+ };
+
+ /*
+ * req_range consists of 4 values.
+ * 1. min index
+ * 2. max index
+ * 3. size
+ * 4. number that should be returned.
+ * 5. return value
+ */
+ static const unsigned long req_range[] = {
+ 0x565234af9000, /* Min */
+ 0x7fff58791000, /* Max */
+ 0x1000, /* Size */
+ 0x7fff5878d << 12, /* First rev hole of size 0x1000 */
+ 0, /* Return value success. */
+
+ 0x0, /* Min */
+ 0x565234AF0 << 12, /* Max */
+ 0x3000, /* Size */
+ 0x565234AEE << 12, /* max - 3. */
+ 0, /* Return value success. */
+
+ 0x0, /* Min */
+ -1, /* Max */
+ 0x1000, /* Size */
+ 562949953421311 << 12,/* First rev hole of size 0x1000 */
+ 0, /* Return value success. */
+
+ 0x0, /* Min */
+ 0x7F36D5109 << 12, /* Max */
+ 0x4000, /* Size */
+ 0x7F36D5106 << 12, /* First rev hole of size 0x4000 */
+ 0, /* Return value success. */
+
+ /* Ascend test. */
+ 0x0,
+ 34148798628 << 12,
+ 19 << 12,
+ 34148797418 << 12,
+ 0x0,
+
+ /* Too big test. */
+ 0x0,
+ 18446744073709551615UL,
+ 562915594369134UL << 12,
+ 0x0,
+ -EBUSY,
+
+ /* Single space test. */
+ 34148798725 << 12,
+ 34148798725 << 12,
+ 1 << 12,
+ 34148798725 << 12,
+ 0,
+ };
+
+ int i, range_count = ARRAY_SIZE(range);
+ int req_range_count = ARRAY_SIZE(req_range);
+ unsigned long min = 0;
+
+ MA_STATE(mas, mt, 0, 0);
+
+ mtree_store_range(mt, MTREE_ALLOC_MAX, ULONG_MAX, XA_ZERO_ENTRY,
+ GFP_KERNEL);
+#define DEBUG_REV_RANGE 0
+ for (i = 0; i < range_count; i += 2) {
+ /* Inclusive, Inclusive (with the -1) */
+
+#if DEBUG_REV_RANGE
+ pr_debug("\t%s: Insert %lu-%lu\n", __func__, range[i] >> 12,
+ (range[i + 1] >> 12) - 1);
+#endif
+ check_insert_range(mt, range[i] >> 12, (range[i + 1] >> 12) - 1,
+ xa_mk_value(range[i] >> 12), 0);
+ mt_validate(mt);
+ }
+
+
+ mas_lock(&mas);
+ for (i = 0; i < ARRAY_SIZE(holes); i += 3) {
+#if DEBUG_REV_RANGE
+ pr_debug("Search from %lu-%lu for gap %lu should be at %lu\n",
+ min, holes[i+1]>>12, holes[i+2]>>12,
+ holes[i] >> 12);
+#endif
+ MT_BUG_ON(mt, mas_empty_area_rev(&mas, min,
+ holes[i+1] >> 12,
+ holes[i+2] >> 12));
+#if DEBUG_REV_RANGE
+ pr_debug("Found %lu %lu\n", mas.index, mas.last);
+ pr_debug("gap %lu %lu\n", (holes[i] >> 12),
+ (holes[i+1] >> 12));
+#endif
+ MT_BUG_ON(mt, mas.last + 1 != (holes[i+1] >> 12));
+ MT_BUG_ON(mt, mas.index != (holes[i+1] >> 12) - (holes[i+2] >> 12));
+ min = holes[i+1] >> 12;
+ mas_reset(&mas);
+ }
+
+ mas_unlock(&mas);
+ for (i = 0; i < req_range_count; i += 5) {
+#if DEBUG_REV_RANGE
+ pr_debug("\tReverse request %d between %lu-%lu size %lu, should get %lu\n",
+ i, req_range[i] >> 12,
+ (req_range[i + 1] >> 12),
+ req_range[i+2] >> 12,
+ req_range[i+3] >> 12);
+#endif
+ check_mtree_alloc_rrange(mt,
+ req_range[i] >> 12, /* start */
+ req_range[i+1] >> 12, /* end */
+ req_range[i+2] >> 12, /* size */
+ req_range[i+3] >> 12, /* expected address */
+ req_range[i+4], /* expected return */
+ xa_mk_value(req_range[i] >> 12)); /* pointer */
+ mt_validate(mt);
+ }
+
+ mt_set_non_kernel(1);
+ mtree_erase(mt, 34148798727); /* create a deleted range. */
+ mtree_erase(mt, 34148798725);
+ check_mtree_alloc_rrange(mt, 0, 34359052173, 210253414,
+ 34148798725, 0, mt);
+
+ mtree_destroy(mt);
+}
+
+static noinline void __init check_alloc_range(struct maple_tree *mt)
+{
+ /*
+ * Generated by:
+ * cat /proc/self/maps|awk '{print $1}'|
+ * awk -F "-" '{printf "0x%s, 0x%s, ", $1, $2}'
+ */
+
+ static const unsigned long range[] = {
+ /* Inclusive , Exclusive. */
+ 0x565234af2000, 0x565234af4000,
+ 0x565234af4000, 0x565234af9000,
+ 0x565234af9000, 0x565234afb000,
+ 0x565234afc000, 0x565234afd000,
+ 0x565234afd000, 0x565234afe000,
+ 0x565235def000, 0x565235e10000,
+ 0x7f36d4bfd000, 0x7f36d4ee2000,
+ 0x7f36d4ee2000, 0x7f36d4f04000,
+ 0x7f36d4f04000, 0x7f36d504c000,
+ 0x7f36d504c000, 0x7f36d5098000,
+ 0x7f36d5098000, 0x7f36d5099000,
+ 0x7f36d5099000, 0x7f36d509d000,
+ 0x7f36d509d000, 0x7f36d509f000,
+ 0x7f36d509f000, 0x7f36d50a5000,
+ 0x7f36d50b9000, 0x7f36d50db000,
+ 0x7f36d50db000, 0x7f36d50dc000,
+ 0x7f36d50dc000, 0x7f36d50fa000,
+ 0x7f36d50fa000, 0x7f36d5102000,
+ 0x7f36d5102000, 0x7f36d5103000,
+ 0x7f36d5103000, 0x7f36d5104000,
+ 0x7f36d5104000, 0x7f36d5105000,
+ 0x7fff5876b000, 0x7fff5878d000,
+ 0x7fff5878e000, 0x7fff58791000,
+ 0x7fff58791000, 0x7fff58793000,
+ };
+ static const unsigned long holes[] = {
+ /* Start of hole, end of hole, size of hole (+1) */
+ 0x565234afb000, 0x565234afc000, 0x1000,
+ 0x565234afe000, 0x565235def000, 0x12F1000,
+ 0x565235e10000, 0x7f36d4bfd000, 0x28E49EDED000,
+ };
+
+ /*
+ * req_range consists of 4 values.
+ * 1. min index
+ * 2. max index
+ * 3. size
+ * 4. number that should be returned.
+ * 5. return value
+ */
+ static const unsigned long req_range[] = {
+ 0x565234af9000, /* Min */
+ 0x7fff58791000, /* Max */
+ 0x1000, /* Size */
+ 0x565234afb000, /* First hole in our data of size 1000. */
+ 0, /* Return value success. */
+
+ 0x0, /* Min */
+ 0x7fff58791000, /* Max */
+ 0x1F00, /* Size */
+ 0x0, /* First hole in our data of size 2000. */
+ 0, /* Return value success. */
+
+ /* Test ascend. */
+ 34148797436 << 12, /* Min */
+ 0x7fff587AF000, /* Max */
+ 0x3000, /* Size */
+ 34148798629 << 12, /* Expected location */
+ 0, /* Return value success. */
+
+ /* Test failing. */
+ 34148798623 << 12, /* Min */
+ 34148798683 << 12, /* Max */
+ 0x15000, /* Size */
+ 0, /* Expected location */
+ -EBUSY, /* Return value failed. */
+
+ /* Test filling entire gap. */
+ 34148798623 << 12, /* Min */
+ 0x7fff587AF000, /* Max */
+ 0x10000, /* Size */
+ 34148798632 << 12, /* Expected location */
+ 0, /* Return value success. */
+
+ /* Test walking off the end of root. */
+ 0, /* Min */
+ -1, /* Max */
+ -1, /* Size */
+ 0, /* Expected location */
+ -EBUSY, /* Return value failure. */
+
+ /* Test looking for too large a hole across entire range. */
+ 0, /* Min */
+ -1, /* Max */
+ 4503599618982063UL << 12, /* Size */
+ 34359052178 << 12, /* Expected location */
+ -EBUSY, /* Return failure. */
+
+ /* Test a single entry */
+ 34148798648 << 12, /* Min */
+ 34148798648 << 12, /* Max */
+ 4096, /* Size of 1 */
+ 34148798648 << 12, /* Location is the same as min/max */
+ 0, /* Success */
+ };
+ int i, range_count = ARRAY_SIZE(range);
+ int req_range_count = ARRAY_SIZE(req_range);
+ unsigned long min = 0x565234af2000;
+ MA_STATE(mas, mt, 0, 0);
+
+ mtree_store_range(mt, MTREE_ALLOC_MAX, ULONG_MAX, XA_ZERO_ENTRY,
+ GFP_KERNEL);
+ for (i = 0; i < range_count; i += 2) {
+#define DEBUG_ALLOC_RANGE 0
+#if DEBUG_ALLOC_RANGE
+ pr_debug("\tInsert %lu-%lu\n", range[i] >> 12,
+ (range[i + 1] >> 12) - 1);
+ mt_dump(mt, mt_dump_hex);
+#endif
+ check_insert_range(mt, range[i] >> 12, (range[i + 1] >> 12) - 1,
+ xa_mk_value(range[i] >> 12), 0);
+ mt_validate(mt);
+ }
+
+
+
+ mas_lock(&mas);
+ for (i = 0; i < ARRAY_SIZE(holes); i += 3) {
+
+#if DEBUG_ALLOC_RANGE
+ pr_debug("\tGet empty %lu-%lu size %lu (%lx-%lx)\n", min >> 12,
+ holes[i+1] >> 12, holes[i+2] >> 12,
+ min, holes[i+1]);
+#endif
+ MT_BUG_ON(mt, mas_empty_area(&mas, min >> 12,
+ holes[i+1] >> 12,
+ holes[i+2] >> 12));
+ MT_BUG_ON(mt, mas.index != holes[i] >> 12);
+ min = holes[i+1];
+ mas_reset(&mas);
+ }
+ mas_unlock(&mas);
+ for (i = 0; i < req_range_count; i += 5) {
+#if DEBUG_ALLOC_RANGE
+ pr_debug("\tTest %d: %lu-%lu size %lu expected %lu (%lu-%lu)\n",
+ i/5, req_range[i] >> 12, req_range[i + 1] >> 12,
+ req_range[i + 2] >> 12, req_range[i + 3] >> 12,
+ req_range[i], req_range[i+1]);
+#endif
+ check_mtree_alloc_range(mt,
+ req_range[i] >> 12, /* start */
+ req_range[i+1] >> 12, /* end */
+ req_range[i+2] >> 12, /* size */
+ req_range[i+3] >> 12, /* expected address */
+ req_range[i+4], /* expected return */
+ xa_mk_value(req_range[i] >> 12)); /* pointer */
+ mt_validate(mt);
+#if DEBUG_ALLOC_RANGE
+ mt_dump(mt, mt_dump_hex);
+#endif
+ }
+
+ mtree_destroy(mt);
+}
+#endif
+
+static noinline void __init check_ranges(struct maple_tree *mt)
+{
+ int i, val, val2;
+ static const unsigned long r[] = {
+ 10, 15,
+ 20, 25,
+ 17, 22, /* Overlaps previous range. */
+ 9, 1000, /* Huge. */
+ 100, 200,
+ 45, 168,
+ 118, 128,
+ };
+
+ MT_BUG_ON(mt, !mtree_empty(mt));
+ check_insert_range(mt, r[0], r[1], xa_mk_value(r[0]), 0);
+ check_insert_range(mt, r[2], r[3], xa_mk_value(r[2]), 0);
+ check_insert_range(mt, r[4], r[5], xa_mk_value(r[4]), -EEXIST);
+ MT_BUG_ON(mt, !mt_height(mt));
+ /* Store */
+ check_store_range(mt, r[4], r[5], xa_mk_value(r[4]), 0);
+ check_store_range(mt, r[6], r[7], xa_mk_value(r[6]), 0);
+ check_store_range(mt, r[8], r[9], xa_mk_value(r[8]), 0);
+ MT_BUG_ON(mt, !mt_height(mt));
+ mtree_destroy(mt);
+ MT_BUG_ON(mt, mt_height(mt));
+
+ check_seq(mt, 50, false);
+ mt_set_non_kernel(4);
+ check_store_range(mt, 5, 47, xa_mk_value(47), 0);
+ MT_BUG_ON(mt, !mt_height(mt));
+ mtree_destroy(mt);
+
+ /* Create tree of 1-100 */
+ check_seq(mt, 100, false);
+ /* Store 45-168 */
+ mt_set_non_kernel(10);
+ check_store_range(mt, r[10], r[11], xa_mk_value(r[10]), 0);
+ MT_BUG_ON(mt, !mt_height(mt));
+ mtree_destroy(mt);
+
+ /* Create tree of 1-200 */
+ check_seq(mt, 200, false);
+ /* Store 45-168 */
+ check_store_range(mt, r[10], r[11], xa_mk_value(r[10]), 0);
+ MT_BUG_ON(mt, !mt_height(mt));
+ mtree_destroy(mt);
+
+ check_seq(mt, 30, false);
+ check_store_range(mt, 6, 18, xa_mk_value(6), 0);
+ MT_BUG_ON(mt, !mt_height(mt));
+ mtree_destroy(mt);
+
+ /* Overwrite across multiple levels. */
+ /* Create tree of 1-400 */
+ check_seq(mt, 400, false);
+ mt_set_non_kernel(50);
+ /* Store 118-128 */
+ check_store_range(mt, r[12], r[13], xa_mk_value(r[12]), 0);
+ mt_set_non_kernel(50);
+ mtree_test_erase(mt, 140);
+ mtree_test_erase(mt, 141);
+ mtree_test_erase(mt, 142);
+ mtree_test_erase(mt, 143);
+ mtree_test_erase(mt, 130);
+ mtree_test_erase(mt, 131);
+ mtree_test_erase(mt, 132);
+ mtree_test_erase(mt, 133);
+ mtree_test_erase(mt, 134);
+ mtree_test_erase(mt, 135);
+ check_load(mt, r[12], xa_mk_value(r[12]));
+ check_load(mt, r[13], xa_mk_value(r[12]));
+ check_load(mt, r[13] - 1, xa_mk_value(r[12]));
+ check_load(mt, r[13] + 1, xa_mk_value(r[13] + 1));
+ check_load(mt, 135, NULL);
+ check_load(mt, 140, NULL);
+ mt_set_non_kernel(0);
+ MT_BUG_ON(mt, !mt_height(mt));
+ mtree_destroy(mt);
+
+
+
+ /* Overwrite multiple levels at the end of the tree (slot 7) */
+ mt_set_non_kernel(50);
+ check_seq(mt, 400, false);
+ check_store_range(mt, 353, 361, xa_mk_value(353), 0);
+ check_store_range(mt, 347, 352, xa_mk_value(347), 0);
+
+ check_load(mt, 346, xa_mk_value(346));
+ for (i = 347; i <= 352; i++)
+ check_load(mt, i, xa_mk_value(347));
+ for (i = 353; i <= 361; i++)
+ check_load(mt, i, xa_mk_value(353));
+ check_load(mt, 362, xa_mk_value(362));
+ mt_set_non_kernel(0);
+ MT_BUG_ON(mt, !mt_height(mt));
+ mtree_destroy(mt);
+
+ mt_set_non_kernel(50);
+ check_seq(mt, 400, false);
+ check_store_range(mt, 352, 364, NULL, 0);
+ check_store_range(mt, 351, 363, xa_mk_value(352), 0);
+ check_load(mt, 350, xa_mk_value(350));
+ check_load(mt, 351, xa_mk_value(352));
+ for (i = 352; i <= 363; i++)
+ check_load(mt, i, xa_mk_value(352));
+ check_load(mt, 364, NULL);
+ check_load(mt, 365, xa_mk_value(365));
+ mt_set_non_kernel(0);
+ MT_BUG_ON(mt, !mt_height(mt));
+ mtree_destroy(mt);
+
+ mt_set_non_kernel(5);
+ check_seq(mt, 400, false);
+ check_store_range(mt, 352, 364, NULL, 0);
+ check_store_range(mt, 351, 364, xa_mk_value(352), 0);
+ check_load(mt, 350, xa_mk_value(350));
+ check_load(mt, 351, xa_mk_value(352));
+ for (i = 352; i <= 364; i++)
+ check_load(mt, i, xa_mk_value(352));
+ check_load(mt, 365, xa_mk_value(365));
+ mt_set_non_kernel(0);
+ MT_BUG_ON(mt, !mt_height(mt));
+ mtree_destroy(mt);
+
+
+ mt_set_non_kernel(50);
+ check_seq(mt, 400, false);
+ check_store_range(mt, 362, 367, xa_mk_value(362), 0);
+ check_store_range(mt, 353, 361, xa_mk_value(353), 0);
+ mt_set_non_kernel(0);
+ mt_validate(mt);
+ MT_BUG_ON(mt, !mt_height(mt));
+ mtree_destroy(mt);
+ /*
+ * Interesting cases:
+ * 1. Overwrite the end of a node and end in the first entry of the next
+ * node.
+ * 2. Split a single range
+ * 3. Overwrite the start of a range
+ * 4. Overwrite the end of a range
+ * 5. Overwrite the entire range
+ * 6. Overwrite a range that causes multiple parent nodes to be
+ * combined
+ * 7. Overwrite a range that causes multiple parent nodes and part of
+ * root to be combined
+ * 8. Overwrite the whole tree
+ * 9. Try to overwrite the zero entry of an alloc tree.
+ * 10. Write a range larger than a nodes current pivot
+ */
+
+ mt_set_non_kernel(50);
+ for (i = 0; i <= 500; i++) {
+ val = i*5;
+ val2 = (i+1)*5;
+ check_store_range(mt, val, val2, xa_mk_value(val), 0);
+ }
+ check_store_range(mt, 2400, 2400, xa_mk_value(2400), 0);
+ check_store_range(mt, 2411, 2411, xa_mk_value(2411), 0);
+ check_store_range(mt, 2412, 2412, xa_mk_value(2412), 0);
+ check_store_range(mt, 2396, 2400, xa_mk_value(4052020), 0);
+ check_store_range(mt, 2402, 2402, xa_mk_value(2402), 0);
+ mtree_destroy(mt);
+ mt_set_non_kernel(0);
+
+ mt_set_non_kernel(50);
+ for (i = 0; i <= 500; i++) {
+ val = i*5;
+ val2 = (i+1)*5;
+ check_store_range(mt, val, val2, xa_mk_value(val), 0);
+ }
+ check_store_range(mt, 2422, 2422, xa_mk_value(2422), 0);
+ check_store_range(mt, 2424, 2424, xa_mk_value(2424), 0);
+ check_store_range(mt, 2425, 2425, xa_mk_value(2), 0);
+ check_store_range(mt, 2460, 2470, NULL, 0);
+ check_store_range(mt, 2435, 2460, xa_mk_value(2435), 0);
+ check_store_range(mt, 2461, 2470, xa_mk_value(2461), 0);
+ mt_set_non_kernel(0);
+ MT_BUG_ON(mt, !mt_height(mt));
+ mtree_destroy(mt);
+
+ /* Check in-place modifications */
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ /* Append to the start of last range */
+ mt_set_non_kernel(50);
+ for (i = 0; i <= 500; i++) {
+ val = i * 5 + 1;
+ val2 = val + 4;
+ check_store_range(mt, val, val2, xa_mk_value(val), 0);
+ }
+
+ /* Append to the last range without touching any boundaries */
+ for (i = 0; i < 10; i++) {
+ val = val2 + 5;
+ val2 = val + 4;
+ check_store_range(mt, val, val2, xa_mk_value(val), 0);
+ }
+
+ /* Append to the end of last range */
+ val = val2;
+ for (i = 0; i < 10; i++) {
+ val += 5;
+ MT_BUG_ON(mt, mtree_test_store_range(mt, val, ULONG_MAX,
+ xa_mk_value(val)) != 0);
+ }
+
+ /* Overwriting the range and over a part of the next range */
+ for (i = 10; i < 30; i += 2) {
+ val = i * 5 + 1;
+ val2 = val + 5;
+ check_store_range(mt, val, val2, xa_mk_value(val), 0);
+ }
+
+ /* Overwriting a part of the range and over the next range */
+ for (i = 50; i < 70; i += 2) {
+ val2 = i * 5;
+ val = val2 - 5;
+ check_store_range(mt, val, val2, xa_mk_value(val), 0);
+ }
+
+ /*
+ * Expand the range, only partially overwriting the previous and
+ * next ranges
+ */
+ for (i = 100; i < 130; i += 3) {
+ val = i * 5 - 5;
+ val2 = i * 5 + 1;
+ check_store_range(mt, val, val2, xa_mk_value(val), 0);
+ }
+
+ /*
+ * Expand the range, only partially overwriting the previous and
+ * next ranges, in RCU mode
+ */
+ mt_set_in_rcu(mt);
+ for (i = 150; i < 180; i += 3) {
+ val = i * 5 - 5;
+ val2 = i * 5 + 1;
+ check_store_range(mt, val, val2, xa_mk_value(val), 0);
+ }
+
+ MT_BUG_ON(mt, !mt_height(mt));
+ mt_validate(mt);
+ mt_set_non_kernel(0);
+ mtree_destroy(mt);
+
+ /* Test rebalance gaps */
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ mt_set_non_kernel(50);
+ for (i = 0; i <= 50; i++) {
+ val = i*10;
+ val2 = (i+1)*10;
+ check_store_range(mt, val, val2, xa_mk_value(val), 0);
+ }
+ check_store_range(mt, 161, 161, xa_mk_value(161), 0);
+ check_store_range(mt, 162, 162, xa_mk_value(162), 0);
+ check_store_range(mt, 163, 163, xa_mk_value(163), 0);
+ check_store_range(mt, 240, 249, NULL, 0);
+ mtree_erase(mt, 200);
+ mtree_erase(mt, 210);
+ mtree_erase(mt, 220);
+ mtree_erase(mt, 230);
+ mt_set_non_kernel(0);
+ MT_BUG_ON(mt, !mt_height(mt));
+ mtree_destroy(mt);
+
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ for (i = 0; i <= 500; i++) {
+ val = i*10;
+ val2 = (i+1)*10;
+ check_store_range(mt, val, val2, xa_mk_value(val), 0);
+ }
+ check_store_range(mt, 4600, 4959, xa_mk_value(1), 0);
+ mt_validate(mt);
+ MT_BUG_ON(mt, !mt_height(mt));
+ mtree_destroy(mt);
+
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ for (i = 0; i <= 500; i++) {
+ val = i*10;
+ val2 = (i+1)*10;
+ check_store_range(mt, val, val2, xa_mk_value(val), 0);
+ }
+ check_store_range(mt, 4811, 4811, xa_mk_value(4811), 0);
+ check_store_range(mt, 4812, 4812, xa_mk_value(4812), 0);
+ check_store_range(mt, 4861, 4861, xa_mk_value(4861), 0);
+ check_store_range(mt, 4862, 4862, xa_mk_value(4862), 0);
+ check_store_range(mt, 4842, 4849, NULL, 0);
+ mt_validate(mt);
+ MT_BUG_ON(mt, !mt_height(mt));
+ mtree_destroy(mt);
+
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ for (i = 0; i <= 1300; i++) {
+ val = i*10;
+ val2 = (i+1)*10;
+ check_store_range(mt, val, val2, xa_mk_value(val), 0);
+ MT_BUG_ON(mt, mt_height(mt) >= 4);
+ }
+ /* Cause a 3 child split all the way up the tree. */
+ for (i = 5; i < 215; i += 10)
+ check_store_range(mt, 11450 + i, 11450 + i + 1, NULL, 0);
+ for (i = 5; i < 65; i += 10)
+ check_store_range(mt, 11770 + i, 11770 + i + 1, NULL, 0);
+
+ MT_BUG_ON(mt, mt_height(mt) >= 4);
+ for (i = 5; i < 45; i += 10)
+ check_store_range(mt, 11700 + i, 11700 + i + 1, NULL, 0);
+ if (!MAPLE_32BIT)
+ MT_BUG_ON(mt, mt_height(mt) < 4);
+ mtree_destroy(mt);
+
+
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ for (i = 0; i <= 1200; i++) {
+ val = i*10;
+ val2 = (i+1)*10;
+ check_store_range(mt, val, val2, xa_mk_value(val), 0);
+ MT_BUG_ON(mt, mt_height(mt) >= 4);
+ }
+ /* Fill parents and leaves before split. */
+ for (i = 5; i < 455; i += 10)
+ check_store_range(mt, 7800 + i, 7800 + i + 1, NULL, 0);
+
+ for (i = 1; i < 16; i++)
+ check_store_range(mt, 8185 + i, 8185 + i + 1,
+ xa_mk_value(8185+i), 0);
+ MT_BUG_ON(mt, mt_height(mt) >= 4);
+ /* triple split across multiple levels. */
+ check_store_range(mt, 8184, 8184, xa_mk_value(8184), 0);
+ if (!MAPLE_32BIT)
+ MT_BUG_ON(mt, mt_height(mt) != 4);
+}
+
+static noinline void __init check_next_entry(struct maple_tree *mt)
+{
+ void *entry = NULL;
+ unsigned long limit = 30, i = 0;
+ MA_STATE(mas, mt, i, i);
+
+ MT_BUG_ON(mt, !mtree_empty(mt));
+
+ check_seq(mt, limit, false);
+ rcu_read_lock();
+
+ /* Check the first one and get ma_state in the correct state. */
+ MT_BUG_ON(mt, mas_walk(&mas) != xa_mk_value(i++));
+ for ( ; i <= limit + 1; i++) {
+ entry = mas_next(&mas, limit);
+ if (i > limit)
+ MT_BUG_ON(mt, entry != NULL);
+ else
+ MT_BUG_ON(mt, xa_mk_value(i) != entry);
+ }
+ rcu_read_unlock();
+ mtree_destroy(mt);
+}
+
+static noinline void __init check_prev_entry(struct maple_tree *mt)
+{
+ unsigned long index = 16;
+ void *value;
+ int i;
+
+ MA_STATE(mas, mt, index, index);
+
+ MT_BUG_ON(mt, !mtree_empty(mt));
+ check_seq(mt, 30, false);
+
+ rcu_read_lock();
+ value = mas_find(&mas, ULONG_MAX);
+ MT_BUG_ON(mt, value != xa_mk_value(index));
+ value = mas_prev(&mas, 0);
+ MT_BUG_ON(mt, value != xa_mk_value(index - 1));
+ rcu_read_unlock();
+ mtree_destroy(mt);
+
+ /* Check limits on prev */
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ mas_lock(&mas);
+ for (i = 0; i <= index; i++) {
+ mas_set_range(&mas, i*10, i*10+5);
+ mas_store_gfp(&mas, xa_mk_value(i), GFP_KERNEL);
+ }
+
+ mas_set(&mas, 20);
+ value = mas_walk(&mas);
+ MT_BUG_ON(mt, value != xa_mk_value(2));
+
+ value = mas_prev(&mas, 19);
+ MT_BUG_ON(mt, value != NULL);
+
+ mas_set(&mas, 80);
+ value = mas_walk(&mas);
+ MT_BUG_ON(mt, value != xa_mk_value(8));
+
+ value = mas_prev(&mas, 76);
+ MT_BUG_ON(mt, value != NULL);
+
+ mas_unlock(&mas);
+}
+
+static noinline void __init check_root_expand(struct maple_tree *mt)
+{
+ MA_STATE(mas, mt, 0, 0);
+ void *ptr;
+
+
+ mas_lock(&mas);
+ mas_set(&mas, 3);
+ ptr = mas_walk(&mas);
+ MT_BUG_ON(mt, mas.index != 0);
+ MT_BUG_ON(mt, ptr != NULL);
+ MT_BUG_ON(mt, mas.index != 0);
+ MT_BUG_ON(mt, mas.last != ULONG_MAX);
+
+ ptr = &check_prev_entry;
+ mas_set(&mas, 1);
+ mas_store_gfp(&mas, ptr, GFP_KERNEL);
+
+ mas_set(&mas, 0);
+ ptr = mas_walk(&mas);
+ MT_BUG_ON(mt, ptr != NULL);
+
+ mas_set(&mas, 1);
+ ptr = mas_walk(&mas);
+ MT_BUG_ON(mt, ptr != &check_prev_entry);
+
+ mas_set(&mas, 2);
+ ptr = mas_walk(&mas);
+ MT_BUG_ON(mt, ptr != NULL);
+ mas_unlock(&mas);
+ mtree_destroy(mt);
+
+
+ mt_init_flags(mt, 0);
+ mas_lock(&mas);
+
+ mas_set(&mas, 0);
+ ptr = &check_prev_entry;
+ mas_store_gfp(&mas, ptr, GFP_KERNEL);
+
+ mas_set(&mas, 5);
+ ptr = mas_walk(&mas);
+ MT_BUG_ON(mt, ptr != NULL);
+ MT_BUG_ON(mt, mas.index != 1);
+ MT_BUG_ON(mt, mas.last != ULONG_MAX);
+
+ mas_set_range(&mas, 0, 100);
+ ptr = mas_walk(&mas);
+ MT_BUG_ON(mt, ptr != &check_prev_entry);
+ MT_BUG_ON(mt, mas.last != 0);
+ mas_unlock(&mas);
+ mtree_destroy(mt);
+
+ mt_init_flags(mt, 0);
+ mas_lock(&mas);
+
+ mas_set(&mas, 0);
+ ptr = (void *)((unsigned long) check_prev_entry | 1UL);
+ mas_store_gfp(&mas, ptr, GFP_KERNEL);
+ ptr = mas_next(&mas, ULONG_MAX);
+ MT_BUG_ON(mt, ptr != NULL);
+ MT_BUG_ON(mt, (mas.index != 1) && (mas.last != ULONG_MAX));
+
+ mas_set(&mas, 1);
+ ptr = mas_prev(&mas, 0);
+ MT_BUG_ON(mt, (mas.index != 0) && (mas.last != 0));
+ MT_BUG_ON(mt, ptr != (void *)((unsigned long) check_prev_entry | 1UL));
+
+ mas_unlock(&mas);
+
+ mtree_destroy(mt);
+
+ mt_init_flags(mt, 0);
+ mas_lock(&mas);
+ mas_set(&mas, 0);
+ ptr = (void *)((unsigned long) check_prev_entry | 2UL);
+ mas_store_gfp(&mas, ptr, GFP_KERNEL);
+ ptr = mas_next(&mas, ULONG_MAX);
+ MT_BUG_ON(mt, ptr != NULL);
+ MT_BUG_ON(mt, (mas.index != ULONG_MAX) && (mas.last != ULONG_MAX));
+
+ mas_set(&mas, 1);
+ ptr = mas_prev(&mas, 0);
+ MT_BUG_ON(mt, (mas.index != 0) && (mas.last != 0));
+ MT_BUG_ON(mt, ptr != (void *)((unsigned long) check_prev_entry | 2UL));
+
+
+ mas_unlock(&mas);
+}
+
+static noinline void __init check_gap_combining(struct maple_tree *mt)
+{
+ struct maple_enode *mn1, *mn2;
+ void *entry;
+ unsigned long singletons = 100;
+ static const unsigned long *seq100;
+ static const unsigned long seq100_64[] = {
+ /* 0-5 */
+ 74, 75, 76,
+ 50, 100, 2,
+
+ /* 6-12 */
+ 44, 45, 46, 43,
+ 20, 50, 3,
+
+ /* 13-20*/
+ 80, 81, 82,
+ 76, 2, 79, 85, 4,
+ };
+
+ static const unsigned long seq100_32[] = {
+ /* 0-5 */
+ 61, 62, 63,
+ 50, 100, 2,
+
+ /* 6-12 */
+ 31, 32, 33, 30,
+ 20, 50, 3,
+
+ /* 13-20*/
+ 80, 81, 82,
+ 76, 2, 79, 85, 4,
+ };
+
+ static const unsigned long seq2000[] = {
+ 1152, 1151,
+ 1100, 1200, 2,
+ };
+ static const unsigned long seq400[] = {
+ 286, 318,
+ 256, 260, 266, 270, 275, 280, 290, 398,
+ 286, 310,
+ };
+
+ unsigned long index;
+
+ MA_STATE(mas, mt, 0, 0);
+
+ if (MAPLE_32BIT)
+ seq100 = seq100_32;
+ else
+ seq100 = seq100_64;
+
+ index = seq100[0];
+ mas_set(&mas, index);
+ MT_BUG_ON(mt, !mtree_empty(mt));
+ check_seq(mt, singletons, false); /* create 100 singletons. */
+
+ mt_set_non_kernel(1);
+ mtree_test_erase(mt, seq100[2]);
+ check_load(mt, seq100[2], NULL);
+ mtree_test_erase(mt, seq100[1]);
+ check_load(mt, seq100[1], NULL);
+
+ rcu_read_lock();
+ entry = mas_find(&mas, ULONG_MAX);
+ MT_BUG_ON(mt, entry != xa_mk_value(index));
+ mn1 = mas.node;
+ mas_next(&mas, ULONG_MAX);
+ entry = mas_next(&mas, ULONG_MAX);
+ MT_BUG_ON(mt, entry != xa_mk_value(index + 4));
+ mn2 = mas.node;
+ MT_BUG_ON(mt, mn1 == mn2); /* test the test. */
+
+ /*
+ * At this point, there is a gap of 2 at index + 1 between seq100[3] and
+ * seq100[4]. Search for the gap.
+ */
+ mt_set_non_kernel(1);
+ mas_reset(&mas);
+ MT_BUG_ON(mt, mas_empty_area_rev(&mas, seq100[3], seq100[4],
+ seq100[5]));
+ MT_BUG_ON(mt, mas.index != index + 1);
+ rcu_read_unlock();
+
+ mtree_test_erase(mt, seq100[6]);
+ check_load(mt, seq100[6], NULL);
+ mtree_test_erase(mt, seq100[7]);
+ check_load(mt, seq100[7], NULL);
+ mtree_test_erase(mt, seq100[8]);
+ index = seq100[9];
+
+ rcu_read_lock();
+ mas.index = index;
+ mas.last = index;
+ mas_reset(&mas);
+ entry = mas_find(&mas, ULONG_MAX);
+ MT_BUG_ON(mt, entry != xa_mk_value(index));
+ mn1 = mas.node;
+ entry = mas_next(&mas, ULONG_MAX);
+ MT_BUG_ON(mt, entry != xa_mk_value(index + 4));
+ mas_next(&mas, ULONG_MAX); /* go to the next entry. */
+ mn2 = mas.node;
+ MT_BUG_ON(mt, mn1 == mn2); /* test the next entry is in the next node. */
+
+ /*
+ * At this point, there is a gap of 3 at seq100[6]. Find it by
+ * searching 20 - 50 for size 3.
+ */
+ mas_reset(&mas);
+ MT_BUG_ON(mt, mas_empty_area_rev(&mas, seq100[10], seq100[11],
+ seq100[12]));
+ MT_BUG_ON(mt, mas.index != seq100[6]);
+ rcu_read_unlock();
+
+ mt_set_non_kernel(1);
+ mtree_store(mt, seq100[13], NULL, GFP_KERNEL);
+ check_load(mt, seq100[13], NULL);
+ check_load(mt, seq100[14], xa_mk_value(seq100[14]));
+ mtree_store(mt, seq100[14], NULL, GFP_KERNEL);
+ check_load(mt, seq100[13], NULL);
+ check_load(mt, seq100[14], NULL);
+
+ mas_reset(&mas);
+ rcu_read_lock();
+ MT_BUG_ON(mt, mas_empty_area_rev(&mas, seq100[16], seq100[15],
+ seq100[17]));
+ MT_BUG_ON(mt, mas.index != seq100[13]);
+ mt_validate(mt);
+ rcu_read_unlock();
+
+ /*
+ * *DEPRECATED: no retries anymore* Test retry entry in the start of a
+ * gap.
+ */
+ mt_set_non_kernel(2);
+ mtree_test_store_range(mt, seq100[18], seq100[14], NULL);
+ mtree_test_erase(mt, seq100[15]);
+ mas_reset(&mas);
+ rcu_read_lock();
+ MT_BUG_ON(mt, mas_empty_area_rev(&mas, seq100[16], seq100[19],
+ seq100[20]));
+ rcu_read_unlock();
+ MT_BUG_ON(mt, mas.index != seq100[18]);
+ mt_validate(mt);
+ mtree_destroy(mt);
+
+ /* seq 2000 tests are for multi-level tree gaps */
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ check_seq(mt, 2000, false);
+ mt_set_non_kernel(1);
+ mtree_test_erase(mt, seq2000[0]);
+ mtree_test_erase(mt, seq2000[1]);
+
+ mt_set_non_kernel(2);
+ mas_reset(&mas);
+ rcu_read_lock();
+ MT_BUG_ON(mt, mas_empty_area_rev(&mas, seq2000[2], seq2000[3],
+ seq2000[4]));
+ MT_BUG_ON(mt, mas.index != seq2000[1]);
+ rcu_read_unlock();
+ mt_validate(mt);
+ mtree_destroy(mt);
+
+ /* seq 400 tests rebalancing over two levels. */
+ mt_set_non_kernel(99);
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ check_seq(mt, 400, false);
+ mtree_test_store_range(mt, seq400[0], seq400[1], NULL);
+ mt_set_non_kernel(0);
+ mtree_destroy(mt);
+
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ check_seq(mt, 400, false);
+ mt_set_non_kernel(50);
+ mtree_test_store_range(mt, seq400[2], seq400[9],
+ xa_mk_value(seq400[2]));
+ mtree_test_store_range(mt, seq400[3], seq400[9],
+ xa_mk_value(seq400[3]));
+ mtree_test_store_range(mt, seq400[4], seq400[9],
+ xa_mk_value(seq400[4]));
+ mtree_test_store_range(mt, seq400[5], seq400[9],
+ xa_mk_value(seq400[5]));
+ mtree_test_store_range(mt, seq400[0], seq400[9],
+ xa_mk_value(seq400[0]));
+ mtree_test_store_range(mt, seq400[6], seq400[9],
+ xa_mk_value(seq400[6]));
+ mtree_test_store_range(mt, seq400[7], seq400[9],
+ xa_mk_value(seq400[7]));
+ mtree_test_store_range(mt, seq400[8], seq400[9],
+ xa_mk_value(seq400[8]));
+ mtree_test_store_range(mt, seq400[10], seq400[11],
+ xa_mk_value(seq400[10]));
+ mt_validate(mt);
+ mt_set_non_kernel(0);
+ mtree_destroy(mt);
+}
+static noinline void __init check_node_overwrite(struct maple_tree *mt)
+{
+ int i, max = 4000;
+
+ for (i = 0; i < max; i++)
+ mtree_test_store_range(mt, i*100, i*100 + 50, xa_mk_value(i*100));
+
+ mtree_test_store_range(mt, 319951, 367950, NULL);
+ /*mt_dump(mt, mt_dump_dec); */
+ mt_validate(mt);
+}
+
+#if defined(BENCH_SLOT_STORE)
+static noinline void __init bench_slot_store(struct maple_tree *mt)
+{
+ int i, brk = 105, max = 1040, brk_start = 100, count = 20000000;
+
+ for (i = 0; i < max; i += 10)
+ mtree_store_range(mt, i, i + 5, xa_mk_value(i), GFP_KERNEL);
+
+ for (i = 0; i < count; i++) {
+ mtree_store_range(mt, brk, brk, NULL, GFP_KERNEL);
+ mtree_store_range(mt, brk_start, brk, xa_mk_value(brk),
+ GFP_KERNEL);
+ }
+}
+#endif
+
+#if defined(BENCH_NODE_STORE)
+static noinline void __init bench_node_store(struct maple_tree *mt)
+{
+ int i, overwrite = 76, max = 240, count = 20000000;
+
+ for (i = 0; i < max; i += 10)
+ mtree_store_range(mt, i, i + 5, xa_mk_value(i), GFP_KERNEL);
+
+ for (i = 0; i < count; i++) {
+ mtree_store_range(mt, overwrite, overwrite + 15,
+ xa_mk_value(overwrite), GFP_KERNEL);
+
+ overwrite += 5;
+ if (overwrite >= 135)
+ overwrite = 76;
+ }
+}
+#endif
+
+#if defined(BENCH_AWALK)
+static noinline void __init bench_awalk(struct maple_tree *mt)
+{
+ int i, max = 2500, count = 50000000;
+ MA_STATE(mas, mt, 1470, 1470);
+
+ for (i = 0; i < max; i += 10)
+ mtree_store_range(mt, i, i + 5, xa_mk_value(i), GFP_KERNEL);
+
+ mtree_store_range(mt, 1470, 1475, NULL, GFP_KERNEL);
+
+ for (i = 0; i < count; i++) {
+ mas_empty_area_rev(&mas, 0, 2000, 10);
+ mas_reset(&mas);
+ }
+}
+#endif
+#if defined(BENCH_WALK)
+static noinline void __init bench_walk(struct maple_tree *mt)
+{
+ int i, max = 2500, count = 550000000;
+ MA_STATE(mas, mt, 1470, 1470);
+
+ for (i = 0; i < max; i += 10)
+ mtree_store_range(mt, i, i + 5, xa_mk_value(i), GFP_KERNEL);
+
+ for (i = 0; i < count; i++) {
+ mas_walk(&mas);
+ mas_reset(&mas);
+ }
+
+}
+#endif
+
+#if defined(BENCH_LOAD)
+static noinline void __init bench_load(struct maple_tree *mt)
+{
+ int i, max = 2500, count = 550000000;
+
+ for (i = 0; i < max; i += 10)
+ mtree_store_range(mt, i, i + 5, xa_mk_value(i), GFP_KERNEL);
+
+ for (i = 0; i < count; i++)
+ mtree_load(mt, 1470);
+}
+#endif
+
+#if defined(BENCH_MT_FOR_EACH)
+static noinline void __init bench_mt_for_each(struct maple_tree *mt)
+{
+ int i, count = 1000000;
+ unsigned long max = 2500, index = 0;
+ void *entry;
+
+ for (i = 0; i < max; i += 5)
+ mtree_store_range(mt, i, i + 4, xa_mk_value(i), GFP_KERNEL);
+
+ for (i = 0; i < count; i++) {
+ unsigned long j = 0;
+
+ mt_for_each(mt, entry, index, max) {
+ MT_BUG_ON(mt, entry != xa_mk_value(j));
+ j += 5;
+ }
+
+ index = 0;
+ }
+
+}
+#endif
+
+#if defined(BENCH_MAS_FOR_EACH)
+static noinline void __init bench_mas_for_each(struct maple_tree *mt)
+{
+ int i, count = 1000000;
+ unsigned long max = 2500;
+ void *entry;
+ MA_STATE(mas, mt, 0, 0);
+
+ for (i = 0; i < max; i += 5) {
+ int gap = 4;
+
+ if (i % 30 == 0)
+ gap = 3;
+ mtree_store_range(mt, i, i + gap, xa_mk_value(i), GFP_KERNEL);
+ }
+
+ rcu_read_lock();
+ for (i = 0; i < count; i++) {
+ unsigned long j = 0;
+
+ mas_for_each(&mas, entry, max) {
+ MT_BUG_ON(mt, entry != xa_mk_value(j));
+ j += 5;
+ }
+ mas_set(&mas, 0);
+ }
+ rcu_read_unlock();
+
+}
+#endif
+#if defined(BENCH_MAS_PREV)
+static noinline void __init bench_mas_prev(struct maple_tree *mt)
+{
+ int i, count = 1000000;
+ unsigned long max = 2500;
+ void *entry;
+ MA_STATE(mas, mt, 0, 0);
+
+ for (i = 0; i < max; i += 5) {
+ int gap = 4;
+
+ if (i % 30 == 0)
+ gap = 3;
+ mtree_store_range(mt, i, i + gap, xa_mk_value(i), GFP_KERNEL);
+ }
+
+ rcu_read_lock();
+ for (i = 0; i < count; i++) {
+ unsigned long j = 2495;
+
+ mas_set(&mas, ULONG_MAX);
+ while ((entry = mas_prev(&mas, 0)) != NULL) {
+ MT_BUG_ON(mt, entry != xa_mk_value(j));
+ j -= 5;
+ }
+ }
+ rcu_read_unlock();
+
+}
+#endif
+/* check_forking - simulate the kernel forking sequence with the tree. */
+static noinline void __init check_forking(void)
+{
+ struct maple_tree mt, newmt;
+ int i, nr_entries = 134, ret;
+ void *val;
+ MA_STATE(mas, &mt, 0, 0);
+ MA_STATE(newmas, &newmt, 0, 0);
+ struct rw_semaphore mt_lock, newmt_lock;
+
+ init_rwsem(&mt_lock);
+ init_rwsem(&newmt_lock);
+
+ mt_init_flags(&mt, MT_FLAGS_ALLOC_RANGE | MT_FLAGS_LOCK_EXTERN);
+ mt_set_external_lock(&mt, &mt_lock);
+
+ mt_init_flags(&newmt, MT_FLAGS_ALLOC_RANGE | MT_FLAGS_LOCK_EXTERN);
+ mt_set_external_lock(&newmt, &newmt_lock);
+
+ down_write(&mt_lock);
+ for (i = 0; i <= nr_entries; i++) {
+ mas_set_range(&mas, i*10, i*10 + 5);
+ mas_store_gfp(&mas, xa_mk_value(i), GFP_KERNEL);
+ }
+
+ down_write_nested(&newmt_lock, SINGLE_DEPTH_NESTING);
+ ret = __mt_dup(&mt, &newmt, GFP_KERNEL);
+ if (ret) {
+ pr_err("OOM!");
+ BUG_ON(1);
+ }
+
+ mas_set(&newmas, 0);
+ mas_for_each(&newmas, val, ULONG_MAX)
+ mas_store(&newmas, val);
+
+ mas_destroy(&newmas);
+ mas_destroy(&mas);
+ mt_validate(&newmt);
+ __mt_destroy(&newmt);
+ __mt_destroy(&mt);
+ up_write(&newmt_lock);
+ up_write(&mt_lock);
+}
+
+static noinline void __init check_iteration(struct maple_tree *mt)
+{
+ int i, nr_entries = 125;
+ void *val;
+ MA_STATE(mas, mt, 0, 0);
+
+ for (i = 0; i <= nr_entries; i++)
+ mtree_store_range(mt, i * 10, i * 10 + 9,
+ xa_mk_value(i), GFP_KERNEL);
+
+ mt_set_non_kernel(99999);
+
+ i = 0;
+ mas_lock(&mas);
+ mas_for_each(&mas, val, 925) {
+ MT_BUG_ON(mt, mas.index != i * 10);
+ MT_BUG_ON(mt, mas.last != i * 10 + 9);
+ /* Overwrite end of entry 92 */
+ if (i == 92) {
+ mas.index = 925;
+ mas.last = 929;
+ mas_store(&mas, val);
+ }
+ i++;
+ }
+ /* Ensure mas_find() gets the next value */
+ val = mas_find(&mas, ULONG_MAX);
+ MT_BUG_ON(mt, val != xa_mk_value(i));
+
+ mas_set(&mas, 0);
+ i = 0;
+ mas_for_each(&mas, val, 785) {
+ MT_BUG_ON(mt, mas.index != i * 10);
+ MT_BUG_ON(mt, mas.last != i * 10 + 9);
+ /* Overwrite start of entry 78 */
+ if (i == 78) {
+ mas.index = 780;
+ mas.last = 785;
+ mas_store(&mas, val);
+ } else {
+ i++;
+ }
+ }
+ val = mas_find(&mas, ULONG_MAX);
+ MT_BUG_ON(mt, val != xa_mk_value(i));
+
+ mas_set(&mas, 0);
+ i = 0;
+ mas_for_each(&mas, val, 765) {
+ MT_BUG_ON(mt, mas.index != i * 10);
+ MT_BUG_ON(mt, mas.last != i * 10 + 9);
+ /* Overwrite end of entry 76 and advance to the end */
+ if (i == 76) {
+ mas.index = 760;
+ mas.last = 765;
+ mas_store(&mas, val);
+ }
+ i++;
+ }
+ /* Make sure the next find returns the one after 765, 766-769 */
+ val = mas_find(&mas, ULONG_MAX);
+ MT_BUG_ON(mt, val != xa_mk_value(76));
+ mas_unlock(&mas);
+ mas_destroy(&mas);
+ mt_set_non_kernel(0);
+}
+
+static noinline void __init check_mas_store_gfp(struct maple_tree *mt)
+{
+
+ struct maple_tree newmt;
+ int i, nr_entries = 135;
+ void *val;
+ MA_STATE(mas, mt, 0, 0);
+ MA_STATE(newmas, mt, 0, 0);
+
+ for (i = 0; i <= nr_entries; i++)
+ mtree_store_range(mt, i*10, i*10 + 5,
+ xa_mk_value(i), GFP_KERNEL);
+
+ mt_set_non_kernel(99999);
+ mt_init_flags(&newmt, MT_FLAGS_ALLOC_RANGE);
+ newmas.tree = &newmt;
+ rcu_read_lock();
+ mas_lock(&newmas);
+ mas_reset(&newmas);
+ mas_set(&mas, 0);
+ mas_for_each(&mas, val, ULONG_MAX) {
+ newmas.index = mas.index;
+ newmas.last = mas.last;
+ mas_store_gfp(&newmas, val, GFP_KERNEL);
+ }
+ mas_unlock(&newmas);
+ rcu_read_unlock();
+ mt_validate(&newmt);
+ mt_set_non_kernel(0);
+ mtree_destroy(&newmt);
+}
+
+#if defined(BENCH_FORK)
+static noinline void __init bench_forking(void)
+{
+ struct maple_tree mt, newmt;
+ int i, nr_entries = 134, nr_fork = 80000, ret;
+ void *val;
+ MA_STATE(mas, &mt, 0, 0);
+ MA_STATE(newmas, &newmt, 0, 0);
+ struct rw_semaphore mt_lock, newmt_lock;
+
+ init_rwsem(&mt_lock);
+ init_rwsem(&newmt_lock);
+
+ mt_init_flags(&mt, MT_FLAGS_ALLOC_RANGE | MT_FLAGS_LOCK_EXTERN);
+ mt_set_external_lock(&mt, &mt_lock);
+
+ down_write(&mt_lock);
+ for (i = 0; i <= nr_entries; i++) {
+ mas_set_range(&mas, i*10, i*10 + 5);
+ mas_store_gfp(&mas, xa_mk_value(i), GFP_KERNEL);
+ }
+
+ for (i = 0; i < nr_fork; i++) {
+ mt_init_flags(&newmt,
+ MT_FLAGS_ALLOC_RANGE | MT_FLAGS_LOCK_EXTERN);
+ mt_set_external_lock(&newmt, &newmt_lock);
+
+ down_write_nested(&newmt_lock, SINGLE_DEPTH_NESTING);
+ ret = __mt_dup(&mt, &newmt, GFP_KERNEL);
+ if (ret) {
+ pr_err("OOM!");
+ BUG_ON(1);
+ }
+
+ mas_set(&newmas, 0);
+ mas_for_each(&newmas, val, ULONG_MAX)
+ mas_store(&newmas, val);
+
+ mas_destroy(&newmas);
+ mt_validate(&newmt);
+ __mt_destroy(&newmt);
+ up_write(&newmt_lock);
+ }
+ mas_destroy(&mas);
+ __mt_destroy(&mt);
+ up_write(&mt_lock);
+}
+#endif
+
+static noinline void __init next_prev_test(struct maple_tree *mt)
+{
+ int i, nr_entries;
+ void *val;
+ MA_STATE(mas, mt, 0, 0);
+ struct maple_enode *mn;
+ static const unsigned long *level2;
+ static const unsigned long level2_64[] = { 707, 1000, 710, 715, 720,
+ 725};
+ static const unsigned long level2_32[] = { 1747, 2000, 1750, 1755,
+ 1760, 1765};
+ unsigned long last_index;
+
+ if (MAPLE_32BIT) {
+ nr_entries = 500;
+ level2 = level2_32;
+ last_index = 0x138e;
+ } else {
+ nr_entries = 200;
+ level2 = level2_64;
+ last_index = 0x7d6;
+ }
+
+ for (i = 0; i <= nr_entries; i++)
+ mtree_store_range(mt, i*10, i*10 + 5,
+ xa_mk_value(i), GFP_KERNEL);
+
+ mas_lock(&mas);
+ for (i = 0; i <= nr_entries / 2; i++) {
+ mas_next(&mas, 1000);
+ if (mas_is_none(&mas))
+ break;
+
+ }
+ mas_reset(&mas);
+ mas_set(&mas, 0);
+ i = 0;
+ mas_for_each(&mas, val, 1000) {
+ i++;
+ }
+
+ mas_reset(&mas);
+ mas_set(&mas, 0);
+ i = 0;
+ mas_for_each(&mas, val, 1000) {
+ mas_pause(&mas);
+ i++;
+ }
+
+ /*
+ * 680 - 685 = 0x61a00001930c
+ * 686 - 689 = NULL;
+ * 690 - 695 = 0x61a00001930c
+ * Check simple next/prev
+ */
+ mas_set(&mas, 686);
+ val = mas_walk(&mas);
+ MT_BUG_ON(mt, val != NULL);
+
+ val = mas_next(&mas, 1000);
+ MT_BUG_ON(mt, val != xa_mk_value(690 / 10));
+ MT_BUG_ON(mt, mas.index != 690);
+ MT_BUG_ON(mt, mas.last != 695);
+
+ val = mas_prev(&mas, 0);
+ MT_BUG_ON(mt, val != xa_mk_value(680 / 10));
+ MT_BUG_ON(mt, mas.index != 680);
+ MT_BUG_ON(mt, mas.last != 685);
+
+ val = mas_next(&mas, 1000);
+ MT_BUG_ON(mt, val != xa_mk_value(690 / 10));
+ MT_BUG_ON(mt, mas.index != 690);
+ MT_BUG_ON(mt, mas.last != 695);
+
+ val = mas_next(&mas, 1000);
+ MT_BUG_ON(mt, val != xa_mk_value(700 / 10));
+ MT_BUG_ON(mt, mas.index != 700);
+ MT_BUG_ON(mt, mas.last != 705);
+
+ /* Check across node boundaries of the tree */
+ mas_set(&mas, 70);
+ val = mas_walk(&mas);
+ MT_BUG_ON(mt, val != xa_mk_value(70 / 10));
+ MT_BUG_ON(mt, mas.index != 70);
+ MT_BUG_ON(mt, mas.last != 75);
+
+ val = mas_next(&mas, 1000);
+ MT_BUG_ON(mt, val != xa_mk_value(80 / 10));
+ MT_BUG_ON(mt, mas.index != 80);
+ MT_BUG_ON(mt, mas.last != 85);
+
+ val = mas_prev(&mas, 70);
+ MT_BUG_ON(mt, val != xa_mk_value(70 / 10));
+ MT_BUG_ON(mt, mas.index != 70);
+ MT_BUG_ON(mt, mas.last != 75);
+
+ /* Check across two levels of the tree */
+ mas_reset(&mas);
+ mas_set(&mas, level2[0]);
+ val = mas_walk(&mas);
+ MT_BUG_ON(mt, val != NULL);
+ val = mas_next(&mas, level2[1]);
+ MT_BUG_ON(mt, val != xa_mk_value(level2[2] / 10));
+ MT_BUG_ON(mt, mas.index != level2[2]);
+ MT_BUG_ON(mt, mas.last != level2[3]);
+ mn = mas.node;
+
+ val = mas_next(&mas, level2[1]);
+ MT_BUG_ON(mt, val != xa_mk_value(level2[4] / 10));
+ MT_BUG_ON(mt, mas.index != level2[4]);
+ MT_BUG_ON(mt, mas.last != level2[5]);
+ MT_BUG_ON(mt, mn == mas.node);
+
+ val = mas_prev(&mas, 0);
+ MT_BUG_ON(mt, val != xa_mk_value(level2[2] / 10));
+ MT_BUG_ON(mt, mas.index != level2[2]);
+ MT_BUG_ON(mt, mas.last != level2[3]);
+
+ /* Check running off the end and back on */
+ mas_set(&mas, nr_entries * 10);
+ val = mas_walk(&mas);
+ MT_BUG_ON(mt, val != xa_mk_value(nr_entries));
+ MT_BUG_ON(mt, mas.index != (nr_entries * 10));
+ MT_BUG_ON(mt, mas.last != (nr_entries * 10 + 5));
+
+ val = mas_next(&mas, ULONG_MAX);
+ MT_BUG_ON(mt, val != NULL);
+ MT_BUG_ON(mt, mas.index != last_index);
+ MT_BUG_ON(mt, mas.last != ULONG_MAX);
+
+ val = mas_prev(&mas, 0);
+ MT_BUG_ON(mt, val != xa_mk_value(nr_entries));
+ MT_BUG_ON(mt, mas.index != (nr_entries * 10));
+ MT_BUG_ON(mt, mas.last != (nr_entries * 10 + 5));
+
+ /* Check running off the start and back on */
+ mas_reset(&mas);
+ mas_set(&mas, 10);
+ val = mas_walk(&mas);
+ MT_BUG_ON(mt, val != xa_mk_value(1));
+ MT_BUG_ON(mt, mas.index != 10);
+ MT_BUG_ON(mt, mas.last != 15);
+
+ val = mas_prev(&mas, 0);
+ MT_BUG_ON(mt, val != xa_mk_value(0));
+ MT_BUG_ON(mt, mas.index != 0);
+ MT_BUG_ON(mt, mas.last != 5);
+
+ val = mas_prev(&mas, 0);
+ MT_BUG_ON(mt, val != NULL);
+ MT_BUG_ON(mt, mas.index != 0);
+ MT_BUG_ON(mt, mas.last != 5);
+ MT_BUG_ON(mt, !mas_is_underflow(&mas));
+
+ mas.index = 0;
+ mas.last = 5;
+ mas_store(&mas, NULL);
+ mas_reset(&mas);
+ mas_set(&mas, 10);
+ mas_walk(&mas);
+
+ val = mas_prev(&mas, 0);
+ MT_BUG_ON(mt, val != NULL);
+ MT_BUG_ON(mt, mas.index != 0);
+ MT_BUG_ON(mt, mas.last != 9);
+ mas_unlock(&mas);
+
+ mtree_destroy(mt);
+
+ mt_init(mt);
+ mtree_store_range(mt, 0, 0, xa_mk_value(0), GFP_KERNEL);
+ mtree_store_range(mt, 5, 5, xa_mk_value(5), GFP_KERNEL);
+ rcu_read_lock();
+ mas_set(&mas, 5);
+ val = mas_prev(&mas, 4);
+ MT_BUG_ON(mt, val != NULL);
+ rcu_read_unlock();
+}
+
+
+
+/* Test spanning writes that require balancing right sibling or right cousin */
+static noinline void __init check_spanning_relatives(struct maple_tree *mt)
+{
+
+ unsigned long i, nr_entries = 1000;
+
+ for (i = 0; i <= nr_entries; i++)
+ mtree_store_range(mt, i*10, i*10 + 5,
+ xa_mk_value(i), GFP_KERNEL);
+
+
+ mtree_store_range(mt, 9365, 9955, NULL, GFP_KERNEL);
+}
+
+static noinline void __init check_fuzzer(struct maple_tree *mt)
+{
+ /*
+ * 1. Causes a spanning rebalance of a single root node.
+ * Fixed by setting the correct limit in mast_cp_to_nodes() when the
+ * entire right side is consumed.
+ */
+ mtree_test_insert(mt, 88, (void *)0xb1);
+ mtree_test_insert(mt, 84, (void *)0xa9);
+ mtree_test_insert(mt, 2, (void *)0x5);
+ mtree_test_insert(mt, 4, (void *)0x9);
+ mtree_test_insert(mt, 14, (void *)0x1d);
+ mtree_test_insert(mt, 7, (void *)0xf);
+ mtree_test_insert(mt, 12, (void *)0x19);
+ mtree_test_insert(mt, 18, (void *)0x25);
+ mtree_test_store_range(mt, 8, 18, (void *)0x11);
+ mtree_destroy(mt);
+
+
+ /*
+ * 2. Cause a spanning rebalance of two nodes in root.
+ * Fixed by setting mast->r->max correctly.
+ */
+ mt_init_flags(mt, 0);
+ mtree_test_store(mt, 87, (void *)0xaf);
+ mtree_test_store(mt, 0, (void *)0x1);
+ mtree_test_load(mt, 4);
+ mtree_test_insert(mt, 4, (void *)0x9);
+ mtree_test_store(mt, 8, (void *)0x11);
+ mtree_test_store(mt, 44, (void *)0x59);
+ mtree_test_store(mt, 68, (void *)0x89);
+ mtree_test_store(mt, 2, (void *)0x5);
+ mtree_test_insert(mt, 43, (void *)0x57);
+ mtree_test_insert(mt, 24, (void *)0x31);
+ mtree_test_insert(mt, 844, (void *)0x699);
+ mtree_test_store(mt, 84, (void *)0xa9);
+ mtree_test_store(mt, 4, (void *)0x9);
+ mtree_test_erase(mt, 4);
+ mtree_test_load(mt, 5);
+ mtree_test_erase(mt, 0);
+ mtree_destroy(mt);
+
+ /*
+ * 3. Cause a node overflow on copy
+ * Fixed by using the correct check for node size in mas_wr_modify()
+ * Also discovered issue with metadata setting.
+ */
+ mt_init_flags(mt, 0);
+ mtree_test_store_range(mt, 0, ULONG_MAX, (void *)0x1);
+ mtree_test_store(mt, 4, (void *)0x9);
+ mtree_test_erase(mt, 5);
+ mtree_test_erase(mt, 0);
+ mtree_test_erase(mt, 4);
+ mtree_test_store(mt, 5, (void *)0xb);
+ mtree_test_erase(mt, 5);
+ mtree_test_store(mt, 5, (void *)0xb);
+ mtree_test_erase(mt, 5);
+ mtree_test_erase(mt, 4);
+ mtree_test_store(mt, 4, (void *)0x9);
+ mtree_test_store(mt, 444, (void *)0x379);
+ mtree_test_store(mt, 0, (void *)0x1);
+ mtree_test_load(mt, 0);
+ mtree_test_store(mt, 5, (void *)0xb);
+ mtree_test_erase(mt, 0);
+ mtree_destroy(mt);
+
+ /*
+ * 4. spanning store failure due to writing incorrect pivot value at
+ * last slot.
+ * Fixed by setting mast->r->max correctly in mast_cp_to_nodes()
+ *
+ */
+ mt_init_flags(mt, 0);
+ mtree_test_insert(mt, 261, (void *)0x20b);
+ mtree_test_store(mt, 516, (void *)0x409);
+ mtree_test_store(mt, 6, (void *)0xd);
+ mtree_test_insert(mt, 5, (void *)0xb);
+ mtree_test_insert(mt, 1256, (void *)0x9d1);
+ mtree_test_store(mt, 4, (void *)0x9);
+ mtree_test_erase(mt, 1);
+ mtree_test_store(mt, 56, (void *)0x71);
+ mtree_test_insert(mt, 1, (void *)0x3);
+ mtree_test_store(mt, 24, (void *)0x31);
+ mtree_test_erase(mt, 1);
+ mtree_test_insert(mt, 2263, (void *)0x11af);
+ mtree_test_insert(mt, 446, (void *)0x37d);
+ mtree_test_store_range(mt, 6, 45, (void *)0xd);
+ mtree_test_store_range(mt, 3, 446, (void *)0x7);
+ mtree_destroy(mt);
+
+ /*
+ * 5. mas_wr_extend_null() may overflow slots.
+ * Fix by checking against wr_mas->node_end.
+ */
+ mt_init_flags(mt, 0);
+ mtree_test_store(mt, 48, (void *)0x61);
+ mtree_test_store(mt, 3, (void *)0x7);
+ mtree_test_load(mt, 0);
+ mtree_test_store(mt, 88, (void *)0xb1);
+ mtree_test_store(mt, 81, (void *)0xa3);
+ mtree_test_insert(mt, 0, (void *)0x1);
+ mtree_test_insert(mt, 8, (void *)0x11);
+ mtree_test_insert(mt, 4, (void *)0x9);
+ mtree_test_insert(mt, 2480, (void *)0x1361);
+ mtree_test_insert(mt, ULONG_MAX,
+ (void *)0xffffffffffffffff);
+ mtree_test_erase(mt, ULONG_MAX);
+ mtree_destroy(mt);
+
+ /*
+ * 6. When reusing a node with an implied pivot and the node is
+ * shrinking, old data would be left in the implied slot
+ * Fixed by checking the last pivot for the mas->max and clear
+ * accordingly. This only affected the left-most node as that node is
+ * the only one allowed to end in NULL.
+ */
+ mt_init_flags(mt, 0);
+ mtree_test_erase(mt, 3);
+ mtree_test_insert(mt, 22, (void *)0x2d);
+ mtree_test_insert(mt, 15, (void *)0x1f);
+ mtree_test_load(mt, 2);
+ mtree_test_insert(mt, 1, (void *)0x3);
+ mtree_test_insert(mt, 1, (void *)0x3);
+ mtree_test_insert(mt, 5, (void *)0xb);
+ mtree_test_erase(mt, 1);
+ mtree_test_insert(mt, 1, (void *)0x3);
+ mtree_test_insert(mt, 4, (void *)0x9);
+ mtree_test_insert(mt, 1, (void *)0x3);
+ mtree_test_erase(mt, 1);
+ mtree_test_insert(mt, 2, (void *)0x5);
+ mtree_test_insert(mt, 1, (void *)0x3);
+ mtree_test_erase(mt, 3);
+ mtree_test_insert(mt, 22, (void *)0x2d);
+ mtree_test_insert(mt, 15, (void *)0x1f);
+ mtree_test_insert(mt, 2, (void *)0x5);
+ mtree_test_insert(mt, 1, (void *)0x3);
+ mtree_test_insert(mt, 8, (void *)0x11);
+ mtree_test_load(mt, 2);
+ mtree_test_insert(mt, 1, (void *)0x3);
+ mtree_test_insert(mt, 1, (void *)0x3);
+ mtree_test_store(mt, 1, (void *)0x3);
+ mtree_test_insert(mt, 5, (void *)0xb);
+ mtree_test_erase(mt, 1);
+ mtree_test_insert(mt, 1, (void *)0x3);
+ mtree_test_insert(mt, 4, (void *)0x9);
+ mtree_test_insert(mt, 1, (void *)0x3);
+ mtree_test_erase(mt, 1);
+ mtree_test_insert(mt, 2, (void *)0x5);
+ mtree_test_insert(mt, 1, (void *)0x3);
+ mtree_test_erase(mt, 3);
+ mtree_test_insert(mt, 22, (void *)0x2d);
+ mtree_test_insert(mt, 15, (void *)0x1f);
+ mtree_test_insert(mt, 2, (void *)0x5);
+ mtree_test_insert(mt, 1, (void *)0x3);
+ mtree_test_insert(mt, 8, (void *)0x11);
+ mtree_test_insert(mt, 12, (void *)0x19);
+ mtree_test_erase(mt, 1);
+ mtree_test_store_range(mt, 4, 62, (void *)0x9);
+ mtree_test_erase(mt, 62);
+ mtree_test_store_range(mt, 1, 0, (void *)0x3);
+ mtree_test_insert(mt, 11, (void *)0x17);
+ mtree_test_insert(mt, 3, (void *)0x7);
+ mtree_test_insert(mt, 3, (void *)0x7);
+ mtree_test_store(mt, 62, (void *)0x7d);
+ mtree_test_erase(mt, 62);
+ mtree_test_store_range(mt, 1, 15, (void *)0x3);
+ mtree_test_erase(mt, 1);
+ mtree_test_insert(mt, 22, (void *)0x2d);
+ mtree_test_insert(mt, 12, (void *)0x19);
+ mtree_test_erase(mt, 1);
+ mtree_test_insert(mt, 3, (void *)0x7);
+ mtree_test_store(mt, 62, (void *)0x7d);
+ mtree_test_erase(mt, 62);
+ mtree_test_insert(mt, 122, (void *)0xf5);
+ mtree_test_store(mt, 3, (void *)0x7);
+ mtree_test_insert(mt, 0, (void *)0x1);
+ mtree_test_store_range(mt, 0, 1, (void *)0x1);
+ mtree_test_insert(mt, 85, (void *)0xab);
+ mtree_test_insert(mt, 72, (void *)0x91);
+ mtree_test_insert(mt, 81, (void *)0xa3);
+ mtree_test_insert(mt, 726, (void *)0x5ad);
+ mtree_test_insert(mt, 0, (void *)0x1);
+ mtree_test_insert(mt, 1, (void *)0x3);
+ mtree_test_store(mt, 51, (void *)0x67);
+ mtree_test_insert(mt, 611, (void *)0x4c7);
+ mtree_test_insert(mt, 485, (void *)0x3cb);
+ mtree_test_insert(mt, 1, (void *)0x3);
+ mtree_test_erase(mt, 1);
+ mtree_test_insert(mt, 0, (void *)0x1);
+ mtree_test_insert(mt, 1, (void *)0x3);
+ mtree_test_insert_range(mt, 26, 1, (void *)0x35);
+ mtree_test_load(mt, 1);
+ mtree_test_store_range(mt, 1, 22, (void *)0x3);
+ mtree_test_insert(mt, 1, (void *)0x3);
+ mtree_test_erase(mt, 1);
+ mtree_test_load(mt, 53);
+ mtree_test_load(mt, 1);
+ mtree_test_store_range(mt, 1, 1, (void *)0x3);
+ mtree_test_insert(mt, 222, (void *)0x1bd);
+ mtree_test_insert(mt, 485, (void *)0x3cb);
+ mtree_test_insert(mt, 1, (void *)0x3);
+ mtree_test_erase(mt, 1);
+ mtree_test_load(mt, 0);
+ mtree_test_insert(mt, 21, (void *)0x2b);
+ mtree_test_insert(mt, 3, (void *)0x7);
+ mtree_test_store(mt, 621, (void *)0x4db);
+ mtree_test_insert(mt, 0, (void *)0x1);
+ mtree_test_erase(mt, 5);
+ mtree_test_insert(mt, 1, (void *)0x3);
+ mtree_test_store(mt, 62, (void *)0x7d);
+ mtree_test_erase(mt, 62);
+ mtree_test_store_range(mt, 1, 0, (void *)0x3);
+ mtree_test_insert(mt, 22, (void *)0x2d);
+ mtree_test_insert(mt, 12, (void *)0x19);
+ mtree_test_erase(mt, 1);
+ mtree_test_insert(mt, 1, (void *)0x3);
+ mtree_test_store_range(mt, 4, 62, (void *)0x9);
+ mtree_test_erase(mt, 62);
+ mtree_test_erase(mt, 1);
+ mtree_test_load(mt, 1);
+ mtree_test_store_range(mt, 1, 22, (void *)0x3);
+ mtree_test_insert(mt, 1, (void *)0x3);
+ mtree_test_erase(mt, 1);
+ mtree_test_load(mt, 53);
+ mtree_test_load(mt, 1);
+ mtree_test_store_range(mt, 1, 1, (void *)0x3);
+ mtree_test_insert(mt, 222, (void *)0x1bd);
+ mtree_test_insert(mt, 485, (void *)0x3cb);
+ mtree_test_insert(mt, 1, (void *)0x3);
+ mtree_test_erase(mt, 1);
+ mtree_test_insert(mt, 1, (void *)0x3);
+ mtree_test_load(mt, 0);
+ mtree_test_load(mt, 0);
+ mtree_destroy(mt);
+
+ /*
+ * 7. Previous fix was incomplete, fix mas_resuse_node() clearing of old
+ * data by overwriting it first - that way metadata is of no concern.
+ */
+ mt_init_flags(mt, 0);
+ mtree_test_load(mt, 1);
+ mtree_test_insert(mt, 102, (void *)0xcd);
+ mtree_test_erase(mt, 2);
+ mtree_test_erase(mt, 0);
+ mtree_test_load(mt, 0);
+ mtree_test_insert(mt, 4, (void *)0x9);
+ mtree_test_insert(mt, 2, (void *)0x5);
+ mtree_test_insert(mt, 110, (void *)0xdd);
+ mtree_test_insert(mt, 1, (void *)0x3);
+ mtree_test_insert_range(mt, 5, 0, (void *)0xb);
+ mtree_test_erase(mt, 2);
+ mtree_test_store(mt, 0, (void *)0x1);
+ mtree_test_store(mt, 112, (void *)0xe1);
+ mtree_test_insert(mt, 21, (void *)0x2b);
+ mtree_test_store(mt, 1, (void *)0x3);
+ mtree_test_insert_range(mt, 110, 2, (void *)0xdd);
+ mtree_test_store(mt, 2, (void *)0x5);
+ mtree_test_load(mt, 22);
+ mtree_test_erase(mt, 2);
+ mtree_test_store(mt, 210, (void *)0x1a5);
+ mtree_test_store_range(mt, 0, 2, (void *)0x1);
+ mtree_test_store(mt, 2, (void *)0x5);
+ mtree_test_erase(mt, 2);
+ mtree_test_erase(mt, 22);
+ mtree_test_erase(mt, 1);
+ mtree_test_erase(mt, 2);
+ mtree_test_store(mt, 0, (void *)0x1);
+ mtree_test_load(mt, 112);
+ mtree_test_insert(mt, 2, (void *)0x5);
+ mtree_test_erase(mt, 2);
+ mtree_test_store(mt, 1, (void *)0x3);
+ mtree_test_insert_range(mt, 1, 2, (void *)0x3);
+ mtree_test_erase(mt, 0);
+ mtree_test_erase(mt, 2);
+ mtree_test_store(mt, 2, (void *)0x5);
+ mtree_test_erase(mt, 0);
+ mtree_test_erase(mt, 2);
+ mtree_test_store(mt, 0, (void *)0x1);
+ mtree_test_store(mt, 0, (void *)0x1);
+ mtree_test_erase(mt, 2);
+ mtree_test_store(mt, 2, (void *)0x5);
+ mtree_test_erase(mt, 2);
+ mtree_test_insert(mt, 2, (void *)0x5);
+ mtree_test_insert_range(mt, 1, 2, (void *)0x3);
+ mtree_test_erase(mt, 0);
+ mtree_test_erase(mt, 2);
+ mtree_test_store(mt, 0, (void *)0x1);
+ mtree_test_load(mt, 112);
+ mtree_test_store_range(mt, 110, 12, (void *)0xdd);
+ mtree_test_store(mt, 2, (void *)0x5);
+ mtree_test_load(mt, 110);
+ mtree_test_insert_range(mt, 4, 71, (void *)0x9);
+ mtree_test_load(mt, 2);
+ mtree_test_store(mt, 2, (void *)0x5);
+ mtree_test_insert_range(mt, 11, 22, (void *)0x17);
+ mtree_test_erase(mt, 12);
+ mtree_test_store(mt, 2, (void *)0x5);
+ mtree_test_load(mt, 22);
+ mtree_destroy(mt);
+
+
+ /*
+ * 8. When rebalancing or spanning_rebalance(), the max of the new node
+ * may be set incorrectly to the final pivot and not the right max.
+ * Fix by setting the left max to orig right max if the entire node is
+ * consumed.
+ */
+ mt_init_flags(mt, 0);
+ mtree_test_store(mt, 6, (void *)0xd);
+ mtree_test_store(mt, 67, (void *)0x87);
+ mtree_test_insert(mt, 15, (void *)0x1f);
+ mtree_test_insert(mt, 6716, (void *)0x3479);
+ mtree_test_store(mt, 61, (void *)0x7b);
+ mtree_test_insert(mt, 13, (void *)0x1b);
+ mtree_test_store(mt, 8, (void *)0x11);
+ mtree_test_insert(mt, 1, (void *)0x3);
+ mtree_test_load(mt, 0);
+ mtree_test_erase(mt, 67167);
+ mtree_test_insert_range(mt, 6, 7167, (void *)0xd);
+ mtree_test_insert(mt, 6, (void *)0xd);
+ mtree_test_erase(mt, 67);
+ mtree_test_insert(mt, 1, (void *)0x3);
+ mtree_test_erase(mt, 667167);
+ mtree_test_insert(mt, 6, (void *)0xd);
+ mtree_test_store(mt, 67, (void *)0x87);
+ mtree_test_insert(mt, 5, (void *)0xb);
+ mtree_test_erase(mt, 1);
+ mtree_test_insert(mt, 6, (void *)0xd);
+ mtree_test_erase(mt, 67);
+ mtree_test_insert(mt, 15, (void *)0x1f);
+ mtree_test_insert(mt, 67167, (void *)0x20cbf);
+ mtree_test_insert(mt, 1, (void *)0x3);
+ mtree_test_load(mt, 7);
+ mtree_test_insert(mt, 16, (void *)0x21);
+ mtree_test_insert(mt, 36, (void *)0x49);
+ mtree_test_store(mt, 67, (void *)0x87);
+ mtree_test_store(mt, 6, (void *)0xd);
+ mtree_test_insert(mt, 367, (void *)0x2df);
+ mtree_test_insert(mt, 115, (void *)0xe7);
+ mtree_test_store(mt, 0, (void *)0x1);
+ mtree_test_store_range(mt, 1, 3, (void *)0x3);
+ mtree_test_store(mt, 1, (void *)0x3);
+ mtree_test_erase(mt, 67167);
+ mtree_test_insert_range(mt, 6, 47, (void *)0xd);
+ mtree_test_store(mt, 1, (void *)0x3);
+ mtree_test_insert_range(mt, 1, 67, (void *)0x3);
+ mtree_test_load(mt, 67);
+ mtree_test_insert(mt, 1, (void *)0x3);
+ mtree_test_erase(mt, 67167);
+ mtree_destroy(mt);
+
+ /*
+ * 9. spanning store to the end of data caused an invalid metadata
+ * length which resulted in a crash eventually.
+ * Fix by checking if there is a value in pivot before incrementing the
+ * metadata end in mab_mas_cp(). To ensure this doesn't happen again,
+ * abstract the two locations this happens into a function called
+ * mas_leaf_set_meta().
+ */
+ mt_init_flags(mt, 0);
+ mtree_test_insert(mt, 21, (void *)0x2b);
+ mtree_test_insert(mt, 12, (void *)0x19);
+ mtree_test_insert(mt, 6, (void *)0xd);
+ mtree_test_insert(mt, 8, (void *)0x11);
+ mtree_test_insert(mt, 2, (void *)0x5);
+ mtree_test_insert(mt, 91, (void *)0xb7);
+ mtree_test_insert(mt, 18, (void *)0x25);
+ mtree_test_insert(mt, 81, (void *)0xa3);
+ mtree_test_store_range(mt, 0, 128, (void *)0x1);
+ mtree_test_store(mt, 1, (void *)0x3);
+ mtree_test_erase(mt, 8);
+ mtree_test_insert(mt, 11, (void *)0x17);
+ mtree_test_insert(mt, 8, (void *)0x11);
+ mtree_test_insert(mt, 21, (void *)0x2b);
+ mtree_test_insert(mt, 2, (void *)0x5);
+ mtree_test_insert(mt, ULONG_MAX - 10, (void *)0xffffffffffffffeb);
+ mtree_test_erase(mt, ULONG_MAX - 10);
+ mtree_test_store_range(mt, 0, 281, (void *)0x1);
+ mtree_test_erase(mt, 2);
+ mtree_test_insert(mt, 1211, (void *)0x977);
+ mtree_test_insert(mt, 111, (void *)0xdf);
+ mtree_test_insert(mt, 13, (void *)0x1b);
+ mtree_test_insert(mt, 211, (void *)0x1a7);
+ mtree_test_insert(mt, 11, (void *)0x17);
+ mtree_test_insert(mt, 5, (void *)0xb);
+ mtree_test_insert(mt, 1218, (void *)0x985);
+ mtree_test_insert(mt, 61, (void *)0x7b);
+ mtree_test_store(mt, 1, (void *)0x3);
+ mtree_test_insert(mt, 121, (void *)0xf3);
+ mtree_test_insert(mt, 8, (void *)0x11);
+ mtree_test_insert(mt, 21, (void *)0x2b);
+ mtree_test_insert(mt, 2, (void *)0x5);
+ mtree_test_insert(mt, ULONG_MAX - 10, (void *)0xffffffffffffffeb);
+ mtree_test_erase(mt, ULONG_MAX - 10);
+}
+
+/* duplicate the tree with a specific gap */
+static noinline void __init check_dup_gaps(struct maple_tree *mt,
+ unsigned long nr_entries, bool zero_start,
+ unsigned long gap)
+{
+ unsigned long i = 0;
+ struct maple_tree newmt;
+ int ret;
+ void *tmp;
+ MA_STATE(mas, mt, 0, 0);
+ MA_STATE(newmas, &newmt, 0, 0);
+ struct rw_semaphore newmt_lock;
+
+ init_rwsem(&newmt_lock);
+ mt_set_external_lock(&newmt, &newmt_lock);
+
+ if (!zero_start)
+ i = 1;
+
+ mt_zero_nr_tallocated();
+ for (; i <= nr_entries; i++)
+ mtree_store_range(mt, i*10, (i+1)*10 - gap,
+ xa_mk_value(i), GFP_KERNEL);
+
+ mt_init_flags(&newmt, MT_FLAGS_ALLOC_RANGE | MT_FLAGS_LOCK_EXTERN);
+ mt_set_non_kernel(99999);
+ down_write(&newmt_lock);
+ ret = mas_expected_entries(&newmas, nr_entries);
+ mt_set_non_kernel(0);
+ MT_BUG_ON(mt, ret != 0);
+
+ rcu_read_lock();
+ mas_for_each(&mas, tmp, ULONG_MAX) {
+ newmas.index = mas.index;
+ newmas.last = mas.last;
+ mas_store(&newmas, tmp);
+ }
+ rcu_read_unlock();
+ mas_destroy(&newmas);
+
+ __mt_destroy(&newmt);
+ up_write(&newmt_lock);
+}
+
+/* Duplicate many sizes of trees. Mainly to test expected entry values */
+static noinline void __init check_dup(struct maple_tree *mt)
+{
+ int i;
+ int big_start = 100010;
+
+ /* Check with a value at zero */
+ for (i = 10; i < 1000; i++) {
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ check_dup_gaps(mt, i, true, 5);
+ mtree_destroy(mt);
+ rcu_barrier();
+ }
+
+ cond_resched();
+ mt_cache_shrink();
+ /* Check with a value at zero, no gap */
+ for (i = 1000; i < 2000; i++) {
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ check_dup_gaps(mt, i, true, 0);
+ mtree_destroy(mt);
+ rcu_barrier();
+ }
+
+ cond_resched();
+ mt_cache_shrink();
+ /* Check with a value at zero and unreasonably large */
+ for (i = big_start; i < big_start + 10; i++) {
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ check_dup_gaps(mt, i, true, 5);
+ mtree_destroy(mt);
+ rcu_barrier();
+ }
+
+ cond_resched();
+ mt_cache_shrink();
+ /* Small to medium size not starting at zero*/
+ for (i = 200; i < 1000; i++) {
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ check_dup_gaps(mt, i, false, 5);
+ mtree_destroy(mt);
+ rcu_barrier();
+ }
+
+ cond_resched();
+ mt_cache_shrink();
+ /* Unreasonably large not starting at zero*/
+ for (i = big_start; i < big_start + 10; i++) {
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ check_dup_gaps(mt, i, false, 5);
+ mtree_destroy(mt);
+ rcu_barrier();
+ cond_resched();
+ mt_cache_shrink();
+ }
+
+ /* Check non-allocation tree not starting at zero */
+ for (i = 1500; i < 3000; i++) {
+ mt_init_flags(mt, 0);
+ check_dup_gaps(mt, i, false, 5);
+ mtree_destroy(mt);
+ rcu_barrier();
+ cond_resched();
+ if (i % 2 == 0)
+ mt_cache_shrink();
+ }
+
+ mt_cache_shrink();
+ /* Check non-allocation tree starting at zero */
+ for (i = 200; i < 1000; i++) {
+ mt_init_flags(mt, 0);
+ check_dup_gaps(mt, i, true, 5);
+ mtree_destroy(mt);
+ rcu_barrier();
+ cond_resched();
+ }
+
+ mt_cache_shrink();
+ /* Unreasonably large */
+ for (i = big_start + 5; i < big_start + 10; i++) {
+ mt_init_flags(mt, 0);
+ check_dup_gaps(mt, i, true, 5);
+ mtree_destroy(mt);
+ rcu_barrier();
+ mt_cache_shrink();
+ cond_resched();
+ }
+}
+
+static noinline void __init check_bnode_min_spanning(struct maple_tree *mt)
+{
+ int i = 50;
+ MA_STATE(mas, mt, 0, 0);
+
+ mt_set_non_kernel(9999);
+ mas_lock(&mas);
+ do {
+ mas_set_range(&mas, i*10, i*10+9);
+ mas_store(&mas, check_bnode_min_spanning);
+ } while (i--);
+
+ mas_set_range(&mas, 240, 509);
+ mas_store(&mas, NULL);
+ mas_unlock(&mas);
+ mas_destroy(&mas);
+ mt_set_non_kernel(0);
+}
+
+static noinline void __init check_empty_area_window(struct maple_tree *mt)
+{
+ unsigned long i, nr_entries = 20;
+ MA_STATE(mas, mt, 0, 0);
+
+ for (i = 1; i <= nr_entries; i++)
+ mtree_store_range(mt, i*10, i*10 + 9,
+ xa_mk_value(i), GFP_KERNEL);
+
+ /* Create another hole besides the one at 0 */
+ mtree_store_range(mt, 160, 169, NULL, GFP_KERNEL);
+
+ /* Check lower bounds that don't fit */
+ rcu_read_lock();
+ MT_BUG_ON(mt, mas_empty_area_rev(&mas, 5, 90, 10) != -EBUSY);
+
+ mas_reset(&mas);
+ MT_BUG_ON(mt, mas_empty_area_rev(&mas, 6, 90, 5) != -EBUSY);
+
+ /* Check lower bound that does fit */
+ mas_reset(&mas);
+ MT_BUG_ON(mt, mas_empty_area_rev(&mas, 5, 90, 5) != 0);
+ MT_BUG_ON(mt, mas.index != 5);
+ MT_BUG_ON(mt, mas.last != 9);
+ rcu_read_unlock();
+
+ /* Check one gap that doesn't fit and one that does */
+ rcu_read_lock();
+ mas_reset(&mas);
+ MT_BUG_ON(mt, mas_empty_area_rev(&mas, 5, 217, 9) != 0);
+ MT_BUG_ON(mt, mas.index != 161);
+ MT_BUG_ON(mt, mas.last != 169);
+
+ /* Check one gap that does fit above the min */
+ mas_reset(&mas);
+ MT_BUG_ON(mt, mas_empty_area_rev(&mas, 100, 218, 3) != 0);
+ MT_BUG_ON(mt, mas.index != 216);
+ MT_BUG_ON(mt, mas.last != 218);
+
+ /* Check size that doesn't fit any gap */
+ mas_reset(&mas);
+ MT_BUG_ON(mt, mas_empty_area_rev(&mas, 100, 218, 16) != -EBUSY);
+
+ /*
+ * Check size that doesn't fit the lower end of the window but
+ * does fit the gap
+ */
+ mas_reset(&mas);
+ MT_BUG_ON(mt, mas_empty_area_rev(&mas, 167, 200, 4) != -EBUSY);
+
+ /*
+ * Check size that doesn't fit the upper end of the window but
+ * does fit the gap
+ */
+ mas_reset(&mas);
+ MT_BUG_ON(mt, mas_empty_area_rev(&mas, 100, 162, 4) != -EBUSY);
+
+ /* Check mas_empty_area forward */
+ mas_reset(&mas);
+ MT_BUG_ON(mt, mas_empty_area(&mas, 0, 100, 9) != 0);
+ MT_BUG_ON(mt, mas.index != 0);
+ MT_BUG_ON(mt, mas.last != 8);
+
+ mas_reset(&mas);
+ MT_BUG_ON(mt, mas_empty_area(&mas, 0, 100, 4) != 0);
+ MT_BUG_ON(mt, mas.index != 0);
+ MT_BUG_ON(mt, mas.last != 3);
+
+ mas_reset(&mas);
+ MT_BUG_ON(mt, mas_empty_area(&mas, 0, 100, 11) != -EBUSY);
+
+ mas_reset(&mas);
+ MT_BUG_ON(mt, mas_empty_area(&mas, 5, 100, 6) != -EBUSY);
+
+ mas_reset(&mas);
+ MT_BUG_ON(mt, mas_empty_area(&mas, 0, 8, 10) != -EINVAL);
+
+ mas_reset(&mas);
+ mas_empty_area(&mas, 100, 165, 3);
+
+ mas_reset(&mas);
+ MT_BUG_ON(mt, mas_empty_area(&mas, 100, 163, 6) != -EBUSY);
+ rcu_read_unlock();
+}
+
+static noinline void __init check_empty_area_fill(struct maple_tree *mt)
+{
+ const unsigned long max = 0x25D78000;
+ unsigned long size;
+ int loop, shift;
+ MA_STATE(mas, mt, 0, 0);
+
+ mt_set_non_kernel(99999);
+ for (shift = 12; shift <= 16; shift++) {
+ loop = 5000;
+ size = 1 << shift;
+ while (loop--) {
+ mas_set(&mas, 0);
+ mas_lock(&mas);
+ MT_BUG_ON(mt, mas_empty_area(&mas, 0, max, size) != 0);
+ MT_BUG_ON(mt, mas.last != mas.index + size - 1);
+ mas_store_gfp(&mas, (void *)size, GFP_KERNEL);
+ mas_unlock(&mas);
+ mas_reset(&mas);
+ }
+ }
+
+ /* No space left. */
+ size = 0x1000;
+ rcu_read_lock();
+ MT_BUG_ON(mt, mas_empty_area(&mas, 0, max, size) != -EBUSY);
+ rcu_read_unlock();
+
+ /* Fill a depth 3 node to the maximum */
+ for (unsigned long i = 629440511; i <= 629440800; i += 6)
+ mtree_store_range(mt, i, i + 5, (void *)i, GFP_KERNEL);
+ /* Make space in the second-last depth 4 node */
+ mtree_erase(mt, 631668735);
+ /* Make space in the last depth 4 node */
+ mtree_erase(mt, 629506047);
+ mas_reset(&mas);
+ /* Search from just after the gap in the second-last depth 4 */
+ rcu_read_lock();
+ MT_BUG_ON(mt, mas_empty_area(&mas, 629506048, 690000000, 0x5000) != 0);
+ rcu_read_unlock();
+ mt_set_non_kernel(0);
+}
+
+/*
+ * Check MAS_START, MAS_PAUSE, active (implied), and MAS_NONE transitions.
+ *
+ * The table below shows the single entry tree (0-0 pointer) and normal tree
+ * with nodes.
+ *
+ * Function ENTRY Start Result index & last
+ * ┬ ┬ ┬ ┬ ┬
+ * │ │ │ │ └─ the final range
+ * │ │ │ └─ The node value after execution
+ * │ │ └─ The node value before execution
+ * │ └─ If the entry exists or does not exists (DNE)
+ * └─ The function name
+ *
+ * Function ENTRY Start Result index & last
+ * mas_next()
+ * - after last
+ * Single entry tree at 0-0
+ * ------------------------
+ * DNE MAS_START MAS_NONE 1 - oo
+ * DNE MAS_PAUSE MAS_NONE 1 - oo
+ * DNE MAS_ROOT MAS_NONE 1 - oo
+ * when index = 0
+ * DNE MAS_NONE MAS_ROOT 0
+ * when index > 0
+ * DNE MAS_NONE MAS_NONE 1 - oo
+ *
+ * Normal tree
+ * -----------
+ * exists MAS_START active range
+ * DNE MAS_START active set to last range
+ * exists MAS_PAUSE active range
+ * DNE MAS_PAUSE active set to last range
+ * exists MAS_NONE active range
+ * exists active active range
+ * DNE active active set to last range
+ * ERANGE active MAS_OVERFLOW last range
+ *
+ * Function ENTRY Start Result index & last
+ * mas_prev()
+ * - before index
+ * Single entry tree at 0-0
+ * ------------------------
+ * if index > 0
+ * exists MAS_START MAS_ROOT 0
+ * exists MAS_PAUSE MAS_ROOT 0
+ * exists MAS_NONE MAS_ROOT 0
+ *
+ * if index == 0
+ * DNE MAS_START MAS_NONE 0
+ * DNE MAS_PAUSE MAS_NONE 0
+ * DNE MAS_NONE MAS_NONE 0
+ * DNE MAS_ROOT MAS_NONE 0
+ *
+ * Normal tree
+ * -----------
+ * exists MAS_START active range
+ * DNE MAS_START active set to min
+ * exists MAS_PAUSE active range
+ * DNE MAS_PAUSE active set to min
+ * exists MAS_NONE active range
+ * DNE MAS_NONE MAS_NONE set to min
+ * any MAS_ROOT MAS_NONE 0
+ * exists active active range
+ * DNE active active last range
+ * ERANGE active MAS_UNDERFLOW last range
+ *
+ * Function ENTRY Start Result index & last
+ * mas_find()
+ * - at index or next
+ * Single entry tree at 0-0
+ * ------------------------
+ * if index > 0
+ * DNE MAS_START MAS_NONE 0
+ * DNE MAS_PAUSE MAS_NONE 0
+ * DNE MAS_ROOT MAS_NONE 0
+ * DNE MAS_NONE MAS_NONE 1
+ * if index == 0
+ * exists MAS_START MAS_ROOT 0
+ * exists MAS_PAUSE MAS_ROOT 0
+ * exists MAS_NONE MAS_ROOT 0
+ *
+ * Normal tree
+ * -----------
+ * exists MAS_START active range
+ * DNE MAS_START active set to max
+ * exists MAS_PAUSE active range
+ * DNE MAS_PAUSE active set to max
+ * exists MAS_NONE active range (start at last)
+ * exists active active range
+ * DNE active active last range (max < last)
+ *
+ * Function ENTRY Start Result index & last
+ * mas_find_rev()
+ * - at index or before
+ * Single entry tree at 0-0
+ * ------------------------
+ * if index > 0
+ * exists MAS_START MAS_ROOT 0
+ * exists MAS_PAUSE MAS_ROOT 0
+ * exists MAS_NONE MAS_ROOT 0
+ * if index == 0
+ * DNE MAS_START MAS_NONE 0
+ * DNE MAS_PAUSE MAS_NONE 0
+ * DNE MAS_NONE MAS_NONE 0
+ * DNE MAS_ROOT MAS_NONE 0
+ *
+ * Normal tree
+ * -----------
+ * exists MAS_START active range
+ * DNE MAS_START active set to min
+ * exists MAS_PAUSE active range
+ * DNE MAS_PAUSE active set to min
+ * exists MAS_NONE active range (start at index)
+ * exists active active range
+ * DNE active active last range (min > index)
+ *
+ * Function ENTRY Start Result index & last
+ * mas_walk()
+ * - Look up index
+ * Single entry tree at 0-0
+ * ------------------------
+ * if index > 0
+ * DNE MAS_START MAS_ROOT 1 - oo
+ * DNE MAS_PAUSE MAS_ROOT 1 - oo
+ * DNE MAS_NONE MAS_ROOT 1 - oo
+ * DNE MAS_ROOT MAS_ROOT 1 - oo
+ * if index == 0
+ * exists MAS_START MAS_ROOT 0
+ * exists MAS_PAUSE MAS_ROOT 0
+ * exists MAS_NONE MAS_ROOT 0
+ * exists MAS_ROOT MAS_ROOT 0
+ *
+ * Normal tree
+ * -----------
+ * exists MAS_START active range
+ * DNE MAS_START active range of NULL
+ * exists MAS_PAUSE active range
+ * DNE MAS_PAUSE active range of NULL
+ * exists MAS_NONE active range
+ * DNE MAS_NONE active range of NULL
+ * exists active active range
+ * DNE active active range of NULL
+ */
+
+static noinline void __init check_state_handling(struct maple_tree *mt)
+{
+ MA_STATE(mas, mt, 0, 0);
+ void *entry, *ptr = (void *) 0x1234500;
+ void *ptr2 = &ptr;
+ void *ptr3 = &ptr2;
+
+ /* Check MAS_ROOT First */
+ mtree_store_range(mt, 0, 0, ptr, GFP_KERNEL);
+
+ mas_lock(&mas);
+ /* prev: Start -> underflow*/
+ entry = mas_prev(&mas, 0);
+ MT_BUG_ON(mt, entry != NULL);
+ MT_BUG_ON(mt, mas.status != ma_underflow);
+
+ /* prev: Start -> root */
+ mas_set(&mas, 10);
+ entry = mas_prev(&mas, 0);
+ MT_BUG_ON(mt, entry != ptr);
+ MT_BUG_ON(mt, mas.index != 0);
+ MT_BUG_ON(mt, mas.last != 0);
+ MT_BUG_ON(mt, mas.status != ma_root);
+
+ /* prev: pause -> root */
+ mas_set(&mas, 10);
+ mas_pause(&mas);
+ entry = mas_prev(&mas, 0);
+ MT_BUG_ON(mt, entry != ptr);
+ MT_BUG_ON(mt, mas.index != 0);
+ MT_BUG_ON(mt, mas.last != 0);
+ MT_BUG_ON(mt, mas.status != ma_root);
+
+ /* next: start -> none */
+ mas_set(&mas, 0);
+ entry = mas_next(&mas, ULONG_MAX);
+ MT_BUG_ON(mt, mas.index != 1);
+ MT_BUG_ON(mt, mas.last != ULONG_MAX);
+ MT_BUG_ON(mt, entry != NULL);
+ MT_BUG_ON(mt, mas.status != ma_none);
+
+ /* next: start -> none*/
+ mas_set(&mas, 10);
+ entry = mas_next(&mas, ULONG_MAX);
+ MT_BUG_ON(mt, mas.index != 1);
+ MT_BUG_ON(mt, mas.last != ULONG_MAX);
+ MT_BUG_ON(mt, entry != NULL);
+ MT_BUG_ON(mt, mas.status != ma_none);
+
+ /* find: start -> root */
+ mas_set(&mas, 0);
+ entry = mas_find(&mas, ULONG_MAX);
+ MT_BUG_ON(mt, entry != ptr);
+ MT_BUG_ON(mt, mas.index != 0);
+ MT_BUG_ON(mt, mas.last != 0);
+ MT_BUG_ON(mt, mas.status != ma_root);
+
+ /* find: root -> none */
+ entry = mas_find(&mas, ULONG_MAX);
+ MT_BUG_ON(mt, entry != NULL);
+ MT_BUG_ON(mt, mas.index != 1);
+ MT_BUG_ON(mt, mas.last != ULONG_MAX);
+ MT_BUG_ON(mt, mas.status != ma_none);
+
+ /* find: none -> none */
+ entry = mas_find(&mas, ULONG_MAX);
+ MT_BUG_ON(mt, entry != NULL);
+ MT_BUG_ON(mt, mas.index != 1);
+ MT_BUG_ON(mt, mas.last != ULONG_MAX);
+ MT_BUG_ON(mt, mas.status != ma_none);
+
+ /* find: start -> none */
+ mas_set(&mas, 10);
+ entry = mas_find(&mas, ULONG_MAX);
+ MT_BUG_ON(mt, entry != NULL);
+ MT_BUG_ON(mt, mas.index != 1);
+ MT_BUG_ON(mt, mas.last != ULONG_MAX);
+ MT_BUG_ON(mt, mas.status != ma_none);
+
+ /* find_rev: none -> root */
+ entry = mas_find_rev(&mas, 0);
+ MT_BUG_ON(mt, entry != ptr);
+ MT_BUG_ON(mt, mas.index != 0);
+ MT_BUG_ON(mt, mas.last != 0);
+ MT_BUG_ON(mt, mas.status != ma_root);
+
+ /* find_rev: start -> root */
+ mas_set(&mas, 0);
+ entry = mas_find_rev(&mas, 0);
+ MT_BUG_ON(mt, entry != ptr);
+ MT_BUG_ON(mt, mas.index != 0);
+ MT_BUG_ON(mt, mas.last != 0);
+ MT_BUG_ON(mt, mas.status != ma_root);
+
+ /* find_rev: root -> none */
+ entry = mas_find_rev(&mas, 0);
+ MT_BUG_ON(mt, entry != NULL);
+ MT_BUG_ON(mt, mas.index != 0);
+ MT_BUG_ON(mt, mas.last != 0);
+ MT_BUG_ON(mt, mas.status != ma_none);
+
+ /* find_rev: none -> none */
+ entry = mas_find_rev(&mas, 0);
+ MT_BUG_ON(mt, entry != NULL);
+ MT_BUG_ON(mt, mas.index != 0);
+ MT_BUG_ON(mt, mas.last != 0);
+ MT_BUG_ON(mt, mas.status != ma_none);
+
+ /* find_rev: start -> root */
+ mas_set(&mas, 10);
+ entry = mas_find_rev(&mas, 0);
+ MT_BUG_ON(mt, entry != ptr);
+ MT_BUG_ON(mt, mas.index != 0);
+ MT_BUG_ON(mt, mas.last != 0);
+ MT_BUG_ON(mt, mas.status != ma_root);
+
+ /* walk: start -> none */
+ mas_set(&mas, 10);
+ entry = mas_walk(&mas);
+ MT_BUG_ON(mt, entry != NULL);
+ MT_BUG_ON(mt, mas.index != 1);
+ MT_BUG_ON(mt, mas.last != ULONG_MAX);
+ MT_BUG_ON(mt, mas.status != ma_none);
+
+ /* walk: pause -> none*/
+ mas_set(&mas, 10);
+ mas_pause(&mas);
+ entry = mas_walk(&mas);
+ MT_BUG_ON(mt, entry != NULL);
+ MT_BUG_ON(mt, mas.index != 1);
+ MT_BUG_ON(mt, mas.last != ULONG_MAX);
+ MT_BUG_ON(mt, mas.status != ma_none);
+
+ /* walk: none -> none */
+ mas.index = mas.last = 10;
+ entry = mas_walk(&mas);
+ MT_BUG_ON(mt, entry != NULL);
+ MT_BUG_ON(mt, mas.index != 1);
+ MT_BUG_ON(mt, mas.last != ULONG_MAX);
+ MT_BUG_ON(mt, mas.status != ma_none);
+
+ /* walk: none -> none */
+ entry = mas_walk(&mas);
+ MT_BUG_ON(mt, entry != NULL);
+ MT_BUG_ON(mt, mas.index != 1);
+ MT_BUG_ON(mt, mas.last != ULONG_MAX);
+ MT_BUG_ON(mt, mas.status != ma_none);
+
+ /* walk: start -> root */
+ mas_set(&mas, 0);
+ entry = mas_walk(&mas);
+ MT_BUG_ON(mt, entry != ptr);
+ MT_BUG_ON(mt, mas.index != 0);
+ MT_BUG_ON(mt, mas.last != 0);
+ MT_BUG_ON(mt, mas.status != ma_root);
+
+ /* walk: pause -> root */
+ mas_set(&mas, 0);
+ mas_pause(&mas);
+ entry = mas_walk(&mas);
+ MT_BUG_ON(mt, entry != ptr);
+ MT_BUG_ON(mt, mas.index != 0);
+ MT_BUG_ON(mt, mas.last != 0);
+ MT_BUG_ON(mt, mas.status != ma_root);
+
+ /* walk: none -> root */
+ mas.status = ma_none;
+ entry = mas_walk(&mas);
+ MT_BUG_ON(mt, entry != ptr);
+ MT_BUG_ON(mt, mas.index != 0);
+ MT_BUG_ON(mt, mas.last != 0);
+ MT_BUG_ON(mt, mas.status != ma_root);
+
+ /* walk: root -> root */
+ entry = mas_walk(&mas);
+ MT_BUG_ON(mt, entry != ptr);
+ MT_BUG_ON(mt, mas.index != 0);
+ MT_BUG_ON(mt, mas.last != 0);
+ MT_BUG_ON(mt, mas.status != ma_root);
+
+ /* walk: root -> none */
+ mas_set(&mas, 10);
+ entry = mas_walk(&mas);
+ MT_BUG_ON(mt, entry != NULL);
+ MT_BUG_ON(mt, mas.index != 1);
+ MT_BUG_ON(mt, mas.last != ULONG_MAX);
+ MT_BUG_ON(mt, mas.status != ma_none);
+
+ /* walk: none -> root */
+ mas.index = mas.last = 0;
+ entry = mas_walk(&mas);
+ MT_BUG_ON(mt, entry != ptr);
+ MT_BUG_ON(mt, mas.index != 0);
+ MT_BUG_ON(mt, mas.last != 0);
+ MT_BUG_ON(mt, mas.status != ma_root);
+
+ mas_unlock(&mas);
+
+ /* Check when there is an actual node */
+ mtree_store_range(mt, 0, 0, NULL, GFP_KERNEL);
+ mtree_store_range(mt, 0x1000, 0x1500, ptr, GFP_KERNEL);
+ mtree_store_range(mt, 0x2000, 0x2500, ptr2, GFP_KERNEL);
+ mtree_store_range(mt, 0x3000, 0x3500, ptr3, GFP_KERNEL);
+
+ mas_lock(&mas);
+
+ /* next: start ->active */
+ mas_set(&mas, 0);
+ entry = mas_next(&mas, ULONG_MAX);
+ MT_BUG_ON(mt, entry != ptr);
+ MT_BUG_ON(mt, mas.index != 0x1000);
+ MT_BUG_ON(mt, mas.last != 0x1500);
+ MT_BUG_ON(mt, !mas_is_active(&mas));
+
+ /* next: pause ->active */
+ mas_set(&mas, 0);
+ mas_pause(&mas);
+ entry = mas_next(&mas, ULONG_MAX);
+ MT_BUG_ON(mt, entry != ptr);
+ MT_BUG_ON(mt, mas.index != 0x1000);
+ MT_BUG_ON(mt, mas.last != 0x1500);
+ MT_BUG_ON(mt, !mas_is_active(&mas));
+
+ /* next: none ->active */
+ mas.index = mas.last = 0;
+ mas.offset = 0;
+ mas.status = ma_none;
+ entry = mas_next(&mas, ULONG_MAX);
+ MT_BUG_ON(mt, entry != ptr);
+ MT_BUG_ON(mt, mas.index != 0x1000);
+ MT_BUG_ON(mt, mas.last != 0x1500);
+ MT_BUG_ON(mt, !mas_is_active(&mas));
+
+ /* next:active ->active (spanning limit) */
+ entry = mas_next(&mas, 0x2100);
+ MT_BUG_ON(mt, entry != ptr2);
+ MT_BUG_ON(mt, mas.index != 0x2000);
+ MT_BUG_ON(mt, mas.last != 0x2500);
+ MT_BUG_ON(mt, !mas_is_active(&mas));
+
+ /* next:active -> overflow (limit reached) beyond data */
+ entry = mas_next(&mas, 0x2999);
+ MT_BUG_ON(mt, entry != NULL);
+ MT_BUG_ON(mt, mas.index != 0x2501);
+ MT_BUG_ON(mt, mas.last != 0x2fff);
+ MT_BUG_ON(mt, !mas_is_overflow(&mas));
+
+ /* next:overflow -> active (limit changed) */
+ entry = mas_next(&mas, ULONG_MAX);
+ MT_BUG_ON(mt, entry != ptr3);
+ MT_BUG_ON(mt, mas.index != 0x3000);
+ MT_BUG_ON(mt, mas.last != 0x3500);
+ MT_BUG_ON(mt, !mas_is_active(&mas));
+
+ /* next:active -> overflow (limit reached) */
+ entry = mas_next(&mas, ULONG_MAX);
+ MT_BUG_ON(mt, entry != NULL);
+ MT_BUG_ON(mt, mas.index != 0x3501);
+ MT_BUG_ON(mt, mas.last != ULONG_MAX);
+ MT_BUG_ON(mt, !mas_is_overflow(&mas));
+
+ /* next:overflow -> overflow */
+ entry = mas_next(&mas, ULONG_MAX);
+ MT_BUG_ON(mt, entry != NULL);
+ MT_BUG_ON(mt, mas.index != 0x3501);
+ MT_BUG_ON(mt, mas.last != ULONG_MAX);
+ MT_BUG_ON(mt, !mas_is_overflow(&mas));
+
+ /* prev:overflow -> active */
+ entry = mas_prev(&mas, 0);
+ MT_BUG_ON(mt, entry != ptr3);
+ MT_BUG_ON(mt, mas.index != 0x3000);
+ MT_BUG_ON(mt, mas.last != 0x3500);
+ MT_BUG_ON(mt, !mas_is_active(&mas));
+
+ /* next: none -> active, skip value at location */
+ mas_set(&mas, 0);
+ entry = mas_next(&mas, ULONG_MAX);
+ mas.status = ma_none;
+ mas.offset = 0;
+ entry = mas_next(&mas, ULONG_MAX);
+ MT_BUG_ON(mt, entry != ptr2);
+ MT_BUG_ON(mt, mas.index != 0x2000);
+ MT_BUG_ON(mt, mas.last != 0x2500);
+ MT_BUG_ON(mt, !mas_is_active(&mas));
+
+ /* prev:active ->active */
+ entry = mas_prev(&mas, 0);
+ MT_BUG_ON(mt, entry != ptr);
+ MT_BUG_ON(mt, mas.index != 0x1000);
+ MT_BUG_ON(mt, mas.last != 0x1500);
+ MT_BUG_ON(mt, !mas_is_active(&mas));
+
+ /* prev:active -> underflow (span limit) */
+ mas_next(&mas, ULONG_MAX);
+ entry = mas_prev(&mas, 0x1200);
+ MT_BUG_ON(mt, entry != ptr);
+ MT_BUG_ON(mt, mas.index != 0x1000);
+ MT_BUG_ON(mt, mas.last != 0x1500);
+ MT_BUG_ON(mt, !mas_is_active(&mas)); /* spanning limit */
+ entry = mas_prev(&mas, 0x1200); /* underflow */
+ MT_BUG_ON(mt, entry != NULL);
+ MT_BUG_ON(mt, mas.index != 0x1000);
+ MT_BUG_ON(mt, mas.last != 0x1500);
+ MT_BUG_ON(mt, !mas_is_underflow(&mas));
+
+ /* prev:underflow -> underflow (lower limit) spanning end range */
+ entry = mas_prev(&mas, 0x0100);
+ MT_BUG_ON(mt, entry != NULL);
+ MT_BUG_ON(mt, mas.index != 0);
+ MT_BUG_ON(mt, mas.last != 0x0FFF);
+ MT_BUG_ON(mt, !mas_is_underflow(&mas));
+
+ /* prev:underflow -> underflow */
+ entry = mas_prev(&mas, 0);
+ MT_BUG_ON(mt, entry != NULL);
+ MT_BUG_ON(mt, mas.index != 0);
+ MT_BUG_ON(mt, mas.last != 0x0FFF);
+ MT_BUG_ON(mt, !mas_is_underflow(&mas));
+
+ /* prev:underflow -> underflow */
+ entry = mas_prev(&mas, 0);
+ MT_BUG_ON(mt, entry != NULL);
+ MT_BUG_ON(mt, mas.index != 0);
+ MT_BUG_ON(mt, mas.last != 0x0FFF);
+ MT_BUG_ON(mt, !mas_is_underflow(&mas));
+
+ /* next:underflow -> active */
+ entry = mas_next(&mas, ULONG_MAX);
+ MT_BUG_ON(mt, entry != ptr);
+ MT_BUG_ON(mt, mas.index != 0x1000);
+ MT_BUG_ON(mt, mas.last != 0x1500);
+ MT_BUG_ON(mt, !mas_is_active(&mas));
+
+ /* prev:first value -> underflow */
+ entry = mas_prev(&mas, 0x1000);
+ MT_BUG_ON(mt, entry != NULL);
+ MT_BUG_ON(mt, mas.index != 0x1000);
+ MT_BUG_ON(mt, mas.last != 0x1500);
+ MT_BUG_ON(mt, !mas_is_underflow(&mas));
+
+ /* find:underflow -> first value */
+ entry = mas_find(&mas, ULONG_MAX);
+ MT_BUG_ON(mt, entry != ptr);
+ MT_BUG_ON(mt, mas.index != 0x1000);
+ MT_BUG_ON(mt, mas.last != 0x1500);
+ MT_BUG_ON(mt, !mas_is_active(&mas));
+
+ /* prev: pause ->active */
+ mas_set(&mas, 0x3600);
+ entry = mas_prev(&mas, 0);
+ MT_BUG_ON(mt, entry != ptr3);
+ mas_pause(&mas);
+ entry = mas_prev(&mas, 0);
+ MT_BUG_ON(mt, entry != ptr2);
+ MT_BUG_ON(mt, mas.index != 0x2000);
+ MT_BUG_ON(mt, mas.last != 0x2500);
+ MT_BUG_ON(mt, !mas_is_active(&mas));
+
+ /* prev:active -> underflow spanning min */
+ entry = mas_prev(&mas, 0x1600);
+ MT_BUG_ON(mt, entry != NULL);
+ MT_BUG_ON(mt, mas.index != 0x1501);
+ MT_BUG_ON(mt, mas.last != 0x1FFF);
+ MT_BUG_ON(mt, !mas_is_underflow(&mas));
+
+ /* prev: active ->active, continue */
+ entry = mas_prev(&mas, 0);
+ MT_BUG_ON(mt, entry != ptr);
+ MT_BUG_ON(mt, mas.index != 0x1000);
+ MT_BUG_ON(mt, mas.last != 0x1500);
+ MT_BUG_ON(mt, !mas_is_active(&mas));
+
+ /* find: start ->active */
+ mas_set(&mas, 0);
+ entry = mas_find(&mas, ULONG_MAX);
+ MT_BUG_ON(mt, entry != ptr);
+ MT_BUG_ON(mt, mas.index != 0x1000);
+ MT_BUG_ON(mt, mas.last != 0x1500);
+ MT_BUG_ON(mt, !mas_is_active(&mas));
+
+ /* find: pause ->active */
+ mas_set(&mas, 0);
+ mas_pause(&mas);
+ entry = mas_find(&mas, ULONG_MAX);
+ MT_BUG_ON(mt, entry != ptr);
+ MT_BUG_ON(mt, mas.index != 0x1000);
+ MT_BUG_ON(mt, mas.last != 0x1500);
+ MT_BUG_ON(mt, !mas_is_active(&mas));
+
+ /* find: start ->active on value */;
+ mas_set(&mas, 1200);
+ entry = mas_find(&mas, ULONG_MAX);
+ MT_BUG_ON(mt, entry != ptr);
+ MT_BUG_ON(mt, mas.index != 0x1000);
+ MT_BUG_ON(mt, mas.last != 0x1500);
+ MT_BUG_ON(mt, !mas_is_active(&mas));
+
+ /* find:active ->active */
+ entry = mas_find(&mas, ULONG_MAX);
+ MT_BUG_ON(mt, entry != ptr2);
+ MT_BUG_ON(mt, mas.index != 0x2000);
+ MT_BUG_ON(mt, mas.last != 0x2500);
+ MT_BUG_ON(mt, !mas_is_active(&mas));
+
+
+ /* find:active -> active (NULL)*/
+ entry = mas_find(&mas, 0x2700);
+ MT_BUG_ON(mt, entry != NULL);
+ MT_BUG_ON(mt, mas.index != 0x2501);
+ MT_BUG_ON(mt, mas.last != 0x2FFF);
+ MAS_BUG_ON(&mas, !mas_is_active(&mas));
+
+ /* find: overflow ->active */
+ entry = mas_find(&mas, 0x5000);
+ MT_BUG_ON(mt, entry != ptr3);
+ MT_BUG_ON(mt, mas.index != 0x3000);
+ MT_BUG_ON(mt, mas.last != 0x3500);
+ MT_BUG_ON(mt, !mas_is_active(&mas));
+
+ /* find:active -> active (NULL) end*/
+ entry = mas_find(&mas, ULONG_MAX);
+ MT_BUG_ON(mt, entry != NULL);
+ MT_BUG_ON(mt, mas.index != 0x3501);
+ MT_BUG_ON(mt, mas.last != ULONG_MAX);
+ MAS_BUG_ON(&mas, !mas_is_active(&mas));
+
+ /* find_rev: active (END) ->active */
+ entry = mas_find_rev(&mas, 0);
+ MT_BUG_ON(mt, entry != ptr3);
+ MT_BUG_ON(mt, mas.index != 0x3000);
+ MT_BUG_ON(mt, mas.last != 0x3500);
+ MT_BUG_ON(mt, !mas_is_active(&mas));
+
+ /* find_rev:active ->active */
+ entry = mas_find_rev(&mas, 0);
+ MT_BUG_ON(mt, entry != ptr2);
+ MT_BUG_ON(mt, mas.index != 0x2000);
+ MT_BUG_ON(mt, mas.last != 0x2500);
+ MT_BUG_ON(mt, !mas_is_active(&mas));
+
+ /* find_rev: pause ->active */
+ mas_pause(&mas);
+ entry = mas_find_rev(&mas, 0);
+ MT_BUG_ON(mt, entry != ptr);
+ MT_BUG_ON(mt, mas.index != 0x1000);
+ MT_BUG_ON(mt, mas.last != 0x1500);
+ MT_BUG_ON(mt, !mas_is_active(&mas));
+
+ /* find_rev:active -> underflow */
+ entry = mas_find_rev(&mas, 0);
+ MT_BUG_ON(mt, entry != NULL);
+ MT_BUG_ON(mt, mas.index != 0);
+ MT_BUG_ON(mt, mas.last != 0x0FFF);
+ MT_BUG_ON(mt, !mas_is_underflow(&mas));
+
+ /* find_rev: start ->active */
+ mas_set(&mas, 0x1200);
+ entry = mas_find_rev(&mas, 0);
+ MT_BUG_ON(mt, entry != ptr);
+ MT_BUG_ON(mt, mas.index != 0x1000);
+ MT_BUG_ON(mt, mas.last != 0x1500);
+ MT_BUG_ON(mt, !mas_is_active(&mas));
+
+ /* mas_walk start ->active */
+ mas_set(&mas, 0x1200);
+ entry = mas_walk(&mas);
+ MT_BUG_ON(mt, entry != ptr);
+ MT_BUG_ON(mt, mas.index != 0x1000);
+ MT_BUG_ON(mt, mas.last != 0x1500);
+ MT_BUG_ON(mt, !mas_is_active(&mas));
+
+ /* mas_walk start ->active */
+ mas_set(&mas, 0x1600);
+ entry = mas_walk(&mas);
+ MT_BUG_ON(mt, entry != NULL);
+ MT_BUG_ON(mt, mas.index != 0x1501);
+ MT_BUG_ON(mt, mas.last != 0x1fff);
+ MT_BUG_ON(mt, !mas_is_active(&mas));
+
+ /* mas_walk pause ->active */
+ mas_set(&mas, 0x1200);
+ mas_pause(&mas);
+ entry = mas_walk(&mas);
+ MT_BUG_ON(mt, entry != ptr);
+ MT_BUG_ON(mt, mas.index != 0x1000);
+ MT_BUG_ON(mt, mas.last != 0x1500);
+ MT_BUG_ON(mt, !mas_is_active(&mas));
+
+ /* mas_walk pause -> active */
+ mas_set(&mas, 0x1600);
+ mas_pause(&mas);
+ entry = mas_walk(&mas);
+ MT_BUG_ON(mt, entry != NULL);
+ MT_BUG_ON(mt, mas.index != 0x1501);
+ MT_BUG_ON(mt, mas.last != 0x1fff);
+ MT_BUG_ON(mt, !mas_is_active(&mas));
+
+ /* mas_walk none -> active */
+ mas_set(&mas, 0x1200);
+ mas.status = ma_none;
+ entry = mas_walk(&mas);
+ MT_BUG_ON(mt, entry != ptr);
+ MT_BUG_ON(mt, mas.index != 0x1000);
+ MT_BUG_ON(mt, mas.last != 0x1500);
+ MT_BUG_ON(mt, !mas_is_active(&mas));
+
+ /* mas_walk none -> active */
+ mas_set(&mas, 0x1600);
+ mas.status = ma_none;
+ entry = mas_walk(&mas);
+ MT_BUG_ON(mt, entry != NULL);
+ MT_BUG_ON(mt, mas.index != 0x1501);
+ MT_BUG_ON(mt, mas.last != 0x1fff);
+ MT_BUG_ON(mt, !mas_is_active(&mas));
+
+ /* mas_walk active -> active */
+ mas.index = 0x1200;
+ mas.last = 0x1200;
+ mas.offset = 0;
+ entry = mas_walk(&mas);
+ MT_BUG_ON(mt, entry != ptr);
+ MT_BUG_ON(mt, mas.index != 0x1000);
+ MT_BUG_ON(mt, mas.last != 0x1500);
+ MT_BUG_ON(mt, !mas_is_active(&mas));
+
+ /* mas_walk active -> active */
+ mas.index = 0x1600;
+ mas.last = 0x1600;
+ entry = mas_walk(&mas);
+ MT_BUG_ON(mt, entry != NULL);
+ MT_BUG_ON(mt, mas.index != 0x1501);
+ MT_BUG_ON(mt, mas.last != 0x1fff);
+ MT_BUG_ON(mt, !mas_is_active(&mas));
+
+ mas_unlock(&mas);
+}
+
+static DEFINE_MTREE(tree);
+static int __init maple_tree_seed(void)
+{
+ unsigned long set[] = { 5015, 5014, 5017, 25, 1000,
+ 1001, 1002, 1003, 1005, 0,
+ 5003, 5002};
+ void *ptr = &set;
+
+ pr_info("\nTEST STARTING\n\n");
+
+#if defined(BENCH_SLOT_STORE)
+#define BENCH
+ mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
+ bench_slot_store(&tree);
+ mtree_destroy(&tree);
+ goto skip;
+#endif
+#if defined(BENCH_NODE_STORE)
+#define BENCH
+ mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
+ bench_node_store(&tree);
+ mtree_destroy(&tree);
+ goto skip;
+#endif
+#if defined(BENCH_AWALK)
+#define BENCH
+ mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
+ bench_awalk(&tree);
+ mtree_destroy(&tree);
+ goto skip;
+#endif
+#if defined(BENCH_WALK)
+#define BENCH
+ mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
+ bench_walk(&tree);
+ mtree_destroy(&tree);
+ goto skip;
+#endif
+#if defined(BENCH_LOAD)
+#define BENCH
+ mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
+ bench_load(&tree);
+ mtree_destroy(&tree);
+ goto skip;
+#endif
+#if defined(BENCH_FORK)
+#define BENCH
+ bench_forking();
+ goto skip;
+#endif
+#if defined(BENCH_MT_FOR_EACH)
+#define BENCH
+ mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
+ bench_mt_for_each(&tree);
+ mtree_destroy(&tree);
+ goto skip;
+#endif
+#if defined(BENCH_MAS_FOR_EACH)
+#define BENCH
+ mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
+ bench_mas_for_each(&tree);
+ mtree_destroy(&tree);
+ goto skip;
+#endif
+#if defined(BENCH_MAS_PREV)
+#define BENCH
+ mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
+ bench_mas_prev(&tree);
+ mtree_destroy(&tree);
+ goto skip;
+#endif
+
+ mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
+ check_root_expand(&tree);
+ mtree_destroy(&tree);
+
+ mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
+ check_iteration(&tree);
+ mtree_destroy(&tree);
+
+ check_forking();
+
+ mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
+ check_mas_store_gfp(&tree);
+ mtree_destroy(&tree);
+
+ /* Test ranges (store and insert) */
+ mt_init_flags(&tree, 0);
+ check_ranges(&tree);
+ mtree_destroy(&tree);
+
+#if defined(CONFIG_64BIT)
+ /* These tests have ranges outside of 4GB */
+ mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
+ check_alloc_range(&tree);
+ mtree_destroy(&tree);
+
+ mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
+ check_alloc_rev_range(&tree);
+ mtree_destroy(&tree);
+#endif
+
+ mt_init_flags(&tree, 0);
+
+ check_load(&tree, set[0], NULL); /* See if 5015 -> NULL */
+
+ check_insert(&tree, set[9], &tree); /* Insert 0 */
+ check_load(&tree, set[9], &tree); /* See if 0 -> &tree */
+ check_load(&tree, set[0], NULL); /* See if 5015 -> NULL */
+
+ check_insert(&tree, set[10], ptr); /* Insert 5003 */
+ check_load(&tree, set[9], &tree); /* See if 0 -> &tree */
+ check_load(&tree, set[11], NULL); /* See if 5002 -> NULL */
+ check_load(&tree, set[10], ptr); /* See if 5003 -> ptr */
+
+ /* Clear out the tree */
+ mtree_destroy(&tree);
+
+ /* Try to insert, insert a dup, and load back what was inserted. */
+ mt_init_flags(&tree, 0);
+ check_insert(&tree, set[0], &tree); /* Insert 5015 */
+ check_dup_insert(&tree, set[0], &tree); /* Insert 5015 again */
+ check_load(&tree, set[0], &tree); /* See if 5015 -> &tree */
+
+ /*
+ * Second set of tests try to load a value that doesn't exist, inserts
+ * a second value, then loads the value again
+ */
+ check_load(&tree, set[1], NULL); /* See if 5014 -> NULL */
+ check_insert(&tree, set[1], ptr); /* insert 5014 -> ptr */
+ check_load(&tree, set[1], ptr); /* See if 5014 -> ptr */
+ check_load(&tree, set[0], &tree); /* See if 5015 -> &tree */
+ /*
+ * Tree currently contains:
+ * p[0]: 14 -> (nil) p[1]: 15 -> ptr p[2]: 16 -> &tree p[3]: 0 -> (nil)
+ */
+ check_insert(&tree, set[6], ptr); /* insert 1002 -> ptr */
+ check_insert(&tree, set[7], &tree); /* insert 1003 -> &tree */
+
+ check_load(&tree, set[0], &tree); /* See if 5015 -> &tree */
+ check_load(&tree, set[1], ptr); /* See if 5014 -> ptr */
+ check_load(&tree, set[6], ptr); /* See if 1002 -> ptr */
+ check_load(&tree, set[7], &tree); /* 1003 = &tree ? */
+
+ /* Clear out tree */
+ mtree_destroy(&tree);
+
+ mt_init_flags(&tree, 0);
+ /* Test inserting into a NULL hole. */
+ check_insert(&tree, set[5], ptr); /* insert 1001 -> ptr */
+ check_insert(&tree, set[7], &tree); /* insert 1003 -> &tree */
+ check_insert(&tree, set[6], ptr); /* insert 1002 -> ptr */
+ check_load(&tree, set[5], ptr); /* See if 1001 -> ptr */
+ check_load(&tree, set[6], ptr); /* See if 1002 -> ptr */
+ check_load(&tree, set[7], &tree); /* See if 1003 -> &tree */
+
+ /* Clear out the tree */
+ mtree_destroy(&tree);
+
+ mt_init_flags(&tree, 0);
+ /*
+ * set[] = {5015, 5014, 5017, 25, 1000,
+ * 1001, 1002, 1003, 1005, 0,
+ * 5003, 5002};
+ */
+
+ check_insert(&tree, set[0], ptr); /* 5015 */
+ check_insert(&tree, set[1], &tree); /* 5014 */
+ check_insert(&tree, set[2], ptr); /* 5017 */
+ check_insert(&tree, set[3], &tree); /* 25 */
+ check_load(&tree, set[0], ptr);
+ check_load(&tree, set[1], &tree);
+ check_load(&tree, set[2], ptr);
+ check_load(&tree, set[3], &tree);
+ check_insert(&tree, set[4], ptr); /* 1000 < Should split. */
+ check_load(&tree, set[0], ptr);
+ check_load(&tree, set[1], &tree);
+ check_load(&tree, set[2], ptr);
+ check_load(&tree, set[3], &tree); /*25 */
+ check_load(&tree, set[4], ptr);
+ check_insert(&tree, set[5], &tree); /* 1001 */
+ check_load(&tree, set[0], ptr);
+ check_load(&tree, set[1], &tree);
+ check_load(&tree, set[2], ptr);
+ check_load(&tree, set[3], &tree);
+ check_load(&tree, set[4], ptr);
+ check_load(&tree, set[5], &tree);
+ check_insert(&tree, set[6], ptr);
+ check_load(&tree, set[0], ptr);
+ check_load(&tree, set[1], &tree);
+ check_load(&tree, set[2], ptr);
+ check_load(&tree, set[3], &tree);
+ check_load(&tree, set[4], ptr);
+ check_load(&tree, set[5], &tree);
+ check_load(&tree, set[6], ptr);
+ check_insert(&tree, set[7], &tree);
+ check_load(&tree, set[0], ptr);
+ check_insert(&tree, set[8], ptr);
+
+ check_insert(&tree, set[9], &tree);
+
+ check_load(&tree, set[0], ptr);
+ check_load(&tree, set[1], &tree);
+ check_load(&tree, set[2], ptr);
+ check_load(&tree, set[3], &tree);
+ check_load(&tree, set[4], ptr);
+ check_load(&tree, set[5], &tree);
+ check_load(&tree, set[6], ptr);
+ check_load(&tree, set[9], &tree);
+ mtree_destroy(&tree);
+
+ mt_init_flags(&tree, 0);
+ check_seq(&tree, 16, false);
+ mtree_destroy(&tree);
+
+ mt_init_flags(&tree, 0);
+ check_seq(&tree, 1000, true);
+ mtree_destroy(&tree);
+
+ mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
+ check_rev_seq(&tree, 1000, true);
+ mtree_destroy(&tree);
+
+ check_lower_bound_split(&tree);
+ check_upper_bound_split(&tree);
+ check_mid_split(&tree);
+
+ mt_init_flags(&tree, 0);
+ check_next_entry(&tree);
+ check_find(&tree);
+ check_find_2(&tree);
+ mtree_destroy(&tree);
+
+ mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
+ check_prev_entry(&tree);
+ mtree_destroy(&tree);
+
+ mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
+ check_gap_combining(&tree);
+ mtree_destroy(&tree);
+
+ mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
+ check_node_overwrite(&tree);
+ mtree_destroy(&tree);
+
+ mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
+ next_prev_test(&tree);
+ mtree_destroy(&tree);
+
+ mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
+ check_spanning_relatives(&tree);
+ mtree_destroy(&tree);
+
+ mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
+ check_rev_find(&tree);
+ mtree_destroy(&tree);
+
+ mt_init_flags(&tree, 0);
+ check_fuzzer(&tree);
+ mtree_destroy(&tree);
+
+ mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
+ check_dup(&tree);
+ mtree_destroy(&tree);
+
+ mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
+ check_bnode_min_spanning(&tree);
+ mtree_destroy(&tree);
+
+ mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
+ check_empty_area_window(&tree);
+ mtree_destroy(&tree);
+
+ mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
+ check_empty_area_fill(&tree);
+ mtree_destroy(&tree);
+
+ mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
+ check_state_handling(&tree);
+ mtree_destroy(&tree);
+
+#if defined(BENCH)
+skip:
+#endif
+ rcu_barrier();
+ pr_info("maple_tree: %u of %u tests passed\n",
+ atomic_read(&maple_tree_tests_passed),
+ atomic_read(&maple_tree_tests_run));
+ if (atomic_read(&maple_tree_tests_run) ==
+ atomic_read(&maple_tree_tests_passed))
+ return 0;
+
+ return -EINVAL;
+}
+
+static void __exit maple_tree_harvest(void)
+{
+
+}
+
+module_init(maple_tree_seed);
+module_exit(maple_tree_harvest);
+MODULE_AUTHOR("Liam R. Howlett <Liam.Howlett@Oracle.com>");
+MODULE_LICENSE("GPL");
diff --git a/lib/test_meminit.c b/lib/test_meminit.c
new file mode 100644
index 000000000000..0dc173849a54
--- /dev/null
+++ b/lib/test_meminit.c
@@ -0,0 +1,439 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Test cases for SL[AOU]B/page initialization at alloc/free time.
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/vmalloc.h>
+
+#define GARBAGE_INT (0x09A7BA9E)
+#define GARBAGE_BYTE (0x9E)
+
+#define REPORT_FAILURES_IN_FN() \
+ do { \
+ if (failures) \
+ pr_info("%s failed %d out of %d times\n", \
+ __func__, failures, num_tests); \
+ else \
+ pr_info("all %d tests in %s passed\n", \
+ num_tests, __func__); \
+ } while (0)
+
+/* Calculate the number of uninitialized bytes in the buffer. */
+static int __init count_nonzero_bytes(void *ptr, size_t size)
+{
+ int i, ret = 0;
+ unsigned char *p = (unsigned char *)ptr;
+
+ for (i = 0; i < size; i++)
+ if (p[i])
+ ret++;
+ return ret;
+}
+
+/* Fill a buffer with garbage, skipping |skip| first bytes. */
+static void __init fill_with_garbage_skip(void *ptr, int size, size_t skip)
+{
+ unsigned int *p = (unsigned int *)((char *)ptr + skip);
+ int i = 0;
+
+ WARN_ON(skip > size);
+ size -= skip;
+
+ while (size >= sizeof(*p)) {
+ p[i] = GARBAGE_INT;
+ i++;
+ size -= sizeof(*p);
+ }
+ if (size)
+ memset(&p[i], GARBAGE_BYTE, size);
+}
+
+static void __init fill_with_garbage(void *ptr, size_t size)
+{
+ fill_with_garbage_skip(ptr, size, 0);
+}
+
+static int __init do_alloc_pages_order(int order, int *total_failures)
+{
+ struct page *page;
+ void *buf;
+ size_t size = PAGE_SIZE << order;
+
+ page = alloc_pages(GFP_KERNEL, order);
+ if (!page)
+ goto err;
+ buf = page_address(page);
+ fill_with_garbage(buf, size);
+ __free_pages(page, order);
+
+ page = alloc_pages(GFP_KERNEL, order);
+ if (!page)
+ goto err;
+ buf = page_address(page);
+ if (count_nonzero_bytes(buf, size))
+ (*total_failures)++;
+ fill_with_garbage(buf, size);
+ __free_pages(page, order);
+ return 1;
+err:
+ (*total_failures)++;
+ return 1;
+}
+
+/* Test the page allocator by calling alloc_pages with different orders. */
+static int __init test_pages(int *total_failures)
+{
+ int failures = 0, num_tests = 0;
+ int i;
+
+ for (i = 0; i < NR_PAGE_ORDERS; i++)
+ num_tests += do_alloc_pages_order(i, &failures);
+
+ REPORT_FAILURES_IN_FN();
+ *total_failures += failures;
+ return num_tests;
+}
+
+/* Test kmalloc() with given parameters. */
+static int __init do_kmalloc_size(size_t size, int *total_failures)
+{
+ void *buf;
+
+ buf = kmalloc(size, GFP_KERNEL);
+ if (!buf)
+ goto err;
+ fill_with_garbage(buf, size);
+ kfree(buf);
+
+ buf = kmalloc(size, GFP_KERNEL);
+ if (!buf)
+ goto err;
+ if (count_nonzero_bytes(buf, size))
+ (*total_failures)++;
+ fill_with_garbage(buf, size);
+ kfree(buf);
+ return 1;
+err:
+ (*total_failures)++;
+ return 1;
+}
+
+/* Test vmalloc() with given parameters. */
+static int __init do_vmalloc_size(size_t size, int *total_failures)
+{
+ void *buf;
+
+ buf = vmalloc(size);
+ if (!buf)
+ goto err;
+ fill_with_garbage(buf, size);
+ vfree(buf);
+
+ buf = vmalloc(size);
+ if (!buf)
+ goto err;
+ if (count_nonzero_bytes(buf, size))
+ (*total_failures)++;
+ fill_with_garbage(buf, size);
+ vfree(buf);
+ return 1;
+err:
+ (*total_failures)++;
+ return 1;
+}
+
+/* Test kmalloc()/vmalloc() by allocating objects of different sizes. */
+static int __init test_kvmalloc(int *total_failures)
+{
+ int failures = 0, num_tests = 0;
+ int i, size;
+
+ for (i = 0; i < 20; i++) {
+ size = 1 << i;
+ num_tests += do_kmalloc_size(size, &failures);
+ num_tests += do_vmalloc_size(size, &failures);
+ }
+
+ REPORT_FAILURES_IN_FN();
+ *total_failures += failures;
+ return num_tests;
+}
+
+#define CTOR_BYTES (sizeof(unsigned int))
+#define CTOR_PATTERN (0x41414141)
+/* Initialize the first 4 bytes of the object. */
+static void test_ctor(void *obj)
+{
+ *(unsigned int *)obj = CTOR_PATTERN;
+}
+
+/*
+ * Check the invariants for the buffer allocated from a slab cache.
+ * If the cache has a test constructor, the first 4 bytes of the object must
+ * always remain equal to CTOR_PATTERN.
+ * If the cache isn't an RCU-typesafe one, or if the allocation is done with
+ * __GFP_ZERO, then the object contents must be zeroed after allocation.
+ * If the cache is an RCU-typesafe one, the object contents must never be
+ * zeroed after the first use. This is checked by memcmp() in
+ * do_kmem_cache_size().
+ */
+static bool __init check_buf(void *buf, int size, bool want_ctor,
+ bool want_rcu, bool want_zero)
+{
+ int bytes;
+ bool fail = false;
+
+ bytes = count_nonzero_bytes(buf, size);
+ WARN_ON(want_ctor && want_zero);
+ if (want_zero)
+ return bytes;
+ if (want_ctor) {
+ if (*(unsigned int *)buf != CTOR_PATTERN)
+ fail = 1;
+ } else {
+ if (bytes)
+ fail = !want_rcu;
+ }
+ return fail;
+}
+
+#define BULK_SIZE 100
+static void *bulk_array[BULK_SIZE];
+
+/*
+ * Test kmem_cache with given parameters:
+ * want_ctor - use a constructor;
+ * want_rcu - use SLAB_TYPESAFE_BY_RCU;
+ * want_zero - use __GFP_ZERO.
+ */
+static int __init do_kmem_cache_size(size_t size, bool want_ctor,
+ bool want_rcu, bool want_zero,
+ int *total_failures)
+{
+ struct kmem_cache *c;
+ int iter;
+ bool fail = false;
+ gfp_t alloc_mask = GFP_KERNEL | (want_zero ? __GFP_ZERO : 0);
+ void *buf, *buf_copy;
+
+ c = kmem_cache_create("test_cache", size, 1,
+ want_rcu ? SLAB_TYPESAFE_BY_RCU : 0,
+ want_ctor ? test_ctor : NULL);
+ for (iter = 0; iter < 10; iter++) {
+ /* Do a test of bulk allocations */
+ if (!want_rcu && !want_ctor) {
+ int ret;
+
+ ret = kmem_cache_alloc_bulk(c, alloc_mask, BULK_SIZE, bulk_array);
+ if (!ret) {
+ fail = true;
+ } else {
+ int i;
+ for (i = 0; i < ret; i++)
+ fail |= check_buf(bulk_array[i], size, want_ctor, want_rcu, want_zero);
+ kmem_cache_free_bulk(c, ret, bulk_array);
+ }
+ }
+
+ buf = kmem_cache_alloc(c, alloc_mask);
+ /* Check that buf is zeroed, if it must be. */
+ fail |= check_buf(buf, size, want_ctor, want_rcu, want_zero);
+ fill_with_garbage_skip(buf, size, want_ctor ? CTOR_BYTES : 0);
+
+ if (!want_rcu) {
+ kmem_cache_free(c, buf);
+ continue;
+ }
+
+ /*
+ * If this is an RCU cache, use a critical section to ensure we
+ * can touch objects after they're freed.
+ */
+ rcu_read_lock();
+ /*
+ * Copy the buffer to check that it's not wiped on
+ * free().
+ */
+ buf_copy = kmalloc(size, GFP_ATOMIC);
+ if (buf_copy)
+ memcpy(buf_copy, buf, size);
+
+ kmem_cache_free(c, buf);
+ /*
+ * Check that |buf| is intact after kmem_cache_free().
+ * |want_zero| is false, because we wrote garbage to
+ * the buffer already.
+ */
+ fail |= check_buf(buf, size, want_ctor, want_rcu,
+ false);
+ if (buf_copy) {
+ fail |= (bool)memcmp(buf, buf_copy, size);
+ kfree(buf_copy);
+ }
+ rcu_read_unlock();
+ }
+ kmem_cache_destroy(c);
+
+ *total_failures += fail;
+ return 1;
+}
+
+/*
+ * Check that the data written to an RCU-allocated object survives
+ * reallocation.
+ */
+static int __init do_kmem_cache_rcu_persistent(int size, int *total_failures)
+{
+ struct kmem_cache *c;
+ void *buf, *buf_contents, *saved_ptr;
+ void **used_objects;
+ int i, iter, maxiter = 1024;
+ bool fail = false;
+
+ c = kmem_cache_create("test_cache", size, size, SLAB_TYPESAFE_BY_RCU,
+ NULL);
+ buf = kmem_cache_alloc(c, GFP_KERNEL);
+ if (!buf)
+ goto out;
+ saved_ptr = buf;
+ fill_with_garbage(buf, size);
+ buf_contents = kmalloc(size, GFP_KERNEL);
+ if (!buf_contents) {
+ kmem_cache_free(c, buf);
+ goto out;
+ }
+ used_objects = kmalloc_array(maxiter, sizeof(void *), GFP_KERNEL);
+ if (!used_objects) {
+ kmem_cache_free(c, buf);
+ kfree(buf_contents);
+ goto out;
+ }
+ memcpy(buf_contents, buf, size);
+ kmem_cache_free(c, buf);
+ /*
+ * Run for a fixed number of iterations. If we never hit saved_ptr,
+ * assume the test passes.
+ */
+ for (iter = 0; iter < maxiter; iter++) {
+ buf = kmem_cache_alloc(c, GFP_KERNEL);
+ used_objects[iter] = buf;
+ if (buf == saved_ptr) {
+ fail = memcmp(buf_contents, buf, size);
+ for (i = 0; i <= iter; i++)
+ kmem_cache_free(c, used_objects[i]);
+ goto free_out;
+ }
+ }
+
+ for (iter = 0; iter < maxiter; iter++)
+ kmem_cache_free(c, used_objects[iter]);
+
+free_out:
+ kfree(buf_contents);
+ kfree(used_objects);
+out:
+ kmem_cache_destroy(c);
+ *total_failures += fail;
+ return 1;
+}
+
+static int __init do_kmem_cache_size_bulk(int size, int *total_failures)
+{
+ struct kmem_cache *c;
+ int i, iter, maxiter = 1024;
+ int num, bytes;
+ bool fail = false;
+ void *objects[10];
+
+ c = kmem_cache_create("test_cache", size, size, 0, NULL);
+ for (iter = 0; (iter < maxiter) && !fail; iter++) {
+ num = kmem_cache_alloc_bulk(c, GFP_KERNEL, ARRAY_SIZE(objects),
+ objects);
+ for (i = 0; i < num; i++) {
+ bytes = count_nonzero_bytes(objects[i], size);
+ if (bytes)
+ fail = true;
+ fill_with_garbage(objects[i], size);
+ }
+
+ if (num)
+ kmem_cache_free_bulk(c, num, objects);
+ }
+ kmem_cache_destroy(c);
+ *total_failures += fail;
+ return 1;
+}
+
+/*
+ * Test kmem_cache allocation by creating caches of different sizes, with and
+ * without constructors, with and without SLAB_TYPESAFE_BY_RCU.
+ */
+static int __init test_kmemcache(int *total_failures)
+{
+ int failures = 0, num_tests = 0;
+ int i, flags, size;
+ bool ctor, rcu, zero;
+
+ for (i = 0; i < 10; i++) {
+ size = 8 << i;
+ for (flags = 0; flags < 8; flags++) {
+ ctor = flags & 1;
+ rcu = flags & 2;
+ zero = flags & 4;
+ if (ctor & zero)
+ continue;
+ num_tests += do_kmem_cache_size(size, ctor, rcu, zero,
+ &failures);
+ }
+ num_tests += do_kmem_cache_size_bulk(size, &failures);
+ }
+ REPORT_FAILURES_IN_FN();
+ *total_failures += failures;
+ return num_tests;
+}
+
+/* Test the behavior of SLAB_TYPESAFE_BY_RCU caches of different sizes. */
+static int __init test_rcu_persistent(int *total_failures)
+{
+ int failures = 0, num_tests = 0;
+ int i, size;
+
+ for (i = 0; i < 10; i++) {
+ size = 8 << i;
+ num_tests += do_kmem_cache_rcu_persistent(size, &failures);
+ }
+ REPORT_FAILURES_IN_FN();
+ *total_failures += failures;
+ return num_tests;
+}
+
+/*
+ * Run the tests. Each test function returns the number of executed tests and
+ * updates |failures| with the number of failed tests.
+ */
+static int __init test_meminit_init(void)
+{
+ int failures = 0, num_tests = 0;
+
+ num_tests += test_pages(&failures);
+ num_tests += test_kvmalloc(&failures);
+ num_tests += test_kmemcache(&failures);
+ num_tests += test_rcu_persistent(&failures);
+
+ if (failures == 0)
+ pr_info("all %d tests passed!\n", num_tests);
+ else
+ pr_info("failures: %d out of %d\n", failures, num_tests);
+
+ return failures ? -EINVAL : 0;
+}
+module_init(test_meminit_init);
+
+MODULE_LICENSE("GPL");
diff --git a/lib/test_min_heap.c b/lib/test_min_heap.c
new file mode 100644
index 000000000000..7b01b4387cfb
--- /dev/null
+++ b/lib/test_min_heap.c
@@ -0,0 +1,194 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#define pr_fmt(fmt) "min_heap_test: " fmt
+
+/*
+ * Test cases for the min max heap.
+ */
+
+#include <linux/log2.h>
+#include <linux/min_heap.h>
+#include <linux/module.h>
+#include <linux/printk.h>
+#include <linux/random.h>
+
+static __init bool less_than(const void *lhs, const void *rhs)
+{
+ return *(int *)lhs < *(int *)rhs;
+}
+
+static __init bool greater_than(const void *lhs, const void *rhs)
+{
+ return *(int *)lhs > *(int *)rhs;
+}
+
+static __init void swap_ints(void *lhs, void *rhs)
+{
+ int temp = *(int *)lhs;
+
+ *(int *)lhs = *(int *)rhs;
+ *(int *)rhs = temp;
+}
+
+static __init int pop_verify_heap(bool min_heap,
+ struct min_heap *heap,
+ const struct min_heap_callbacks *funcs)
+{
+ int *values = heap->data;
+ int err = 0;
+ int last;
+
+ last = values[0];
+ min_heap_pop(heap, funcs);
+ while (heap->nr > 0) {
+ if (min_heap) {
+ if (last > values[0]) {
+ pr_err("error: expected %d <= %d\n", last,
+ values[0]);
+ err++;
+ }
+ } else {
+ if (last < values[0]) {
+ pr_err("error: expected %d >= %d\n", last,
+ values[0]);
+ err++;
+ }
+ }
+ last = values[0];
+ min_heap_pop(heap, funcs);
+ }
+ return err;
+}
+
+static __init int test_heapify_all(bool min_heap)
+{
+ int values[] = { 3, 1, 2, 4, 0x8000000, 0x7FFFFFF, 0,
+ -3, -1, -2, -4, 0x8000000, 0x7FFFFFF };
+ struct min_heap heap = {
+ .data = values,
+ .nr = ARRAY_SIZE(values),
+ .size = ARRAY_SIZE(values),
+ };
+ struct min_heap_callbacks funcs = {
+ .elem_size = sizeof(int),
+ .less = min_heap ? less_than : greater_than,
+ .swp = swap_ints,
+ };
+ int i, err;
+
+ /* Test with known set of values. */
+ min_heapify_all(&heap, &funcs);
+ err = pop_verify_heap(min_heap, &heap, &funcs);
+
+
+ /* Test with randomly generated values. */
+ heap.nr = ARRAY_SIZE(values);
+ for (i = 0; i < heap.nr; i++)
+ values[i] = get_random_u32();
+
+ min_heapify_all(&heap, &funcs);
+ err += pop_verify_heap(min_heap, &heap, &funcs);
+
+ return err;
+}
+
+static __init int test_heap_push(bool min_heap)
+{
+ const int data[] = { 3, 1, 2, 4, 0x80000000, 0x7FFFFFFF, 0,
+ -3, -1, -2, -4, 0x80000000, 0x7FFFFFFF };
+ int values[ARRAY_SIZE(data)];
+ struct min_heap heap = {
+ .data = values,
+ .nr = 0,
+ .size = ARRAY_SIZE(values),
+ };
+ struct min_heap_callbacks funcs = {
+ .elem_size = sizeof(int),
+ .less = min_heap ? less_than : greater_than,
+ .swp = swap_ints,
+ };
+ int i, temp, err;
+
+ /* Test with known set of values copied from data. */
+ for (i = 0; i < ARRAY_SIZE(data); i++)
+ min_heap_push(&heap, &data[i], &funcs);
+
+ err = pop_verify_heap(min_heap, &heap, &funcs);
+
+ /* Test with randomly generated values. */
+ while (heap.nr < heap.size) {
+ temp = get_random_u32();
+ min_heap_push(&heap, &temp, &funcs);
+ }
+ err += pop_verify_heap(min_heap, &heap, &funcs);
+
+ return err;
+}
+
+static __init int test_heap_pop_push(bool min_heap)
+{
+ const int data[] = { 3, 1, 2, 4, 0x80000000, 0x7FFFFFFF, 0,
+ -3, -1, -2, -4, 0x80000000, 0x7FFFFFFF };
+ int values[ARRAY_SIZE(data)];
+ struct min_heap heap = {
+ .data = values,
+ .nr = 0,
+ .size = ARRAY_SIZE(values),
+ };
+ struct min_heap_callbacks funcs = {
+ .elem_size = sizeof(int),
+ .less = min_heap ? less_than : greater_than,
+ .swp = swap_ints,
+ };
+ int i, temp, err;
+
+ /* Fill values with data to pop and replace. */
+ temp = min_heap ? 0x80000000 : 0x7FFFFFFF;
+ for (i = 0; i < ARRAY_SIZE(data); i++)
+ min_heap_push(&heap, &temp, &funcs);
+
+ /* Test with known set of values copied from data. */
+ for (i = 0; i < ARRAY_SIZE(data); i++)
+ min_heap_pop_push(&heap, &data[i], &funcs);
+
+ err = pop_verify_heap(min_heap, &heap, &funcs);
+
+ heap.nr = 0;
+ for (i = 0; i < ARRAY_SIZE(data); i++)
+ min_heap_push(&heap, &temp, &funcs);
+
+ /* Test with randomly generated values. */
+ for (i = 0; i < ARRAY_SIZE(data); i++) {
+ temp = get_random_u32();
+ min_heap_pop_push(&heap, &temp, &funcs);
+ }
+ err += pop_verify_heap(min_heap, &heap, &funcs);
+
+ return err;
+}
+
+static int __init test_min_heap_init(void)
+{
+ int err = 0;
+
+ err += test_heapify_all(true);
+ err += test_heapify_all(false);
+ err += test_heap_push(true);
+ err += test_heap_push(false);
+ err += test_heap_pop_push(true);
+ err += test_heap_pop_push(false);
+ if (err) {
+ pr_err("test failed with %d errors\n", err);
+ return -EINVAL;
+ }
+ pr_info("test passed\n");
+ return 0;
+}
+module_init(test_min_heap_init);
+
+static void __exit test_min_heap_exit(void)
+{
+ /* do nothing */
+}
+module_exit(test_min_heap_exit);
+
+MODULE_LICENSE("GPL");
diff --git a/lib/test_objagg.c b/lib/test_objagg.c
index 72c1abfa154d..c0c957c50635 100644
--- a/lib/test_objagg.c
+++ b/lib/test_objagg.c
@@ -157,7 +157,7 @@ static int test_nodelta_obj_get(struct world *world, struct objagg *objagg,
int err;
if (should_create_root)
- prandom_bytes(world->next_root_buf,
+ get_random_bytes(world->next_root_buf,
sizeof(world->next_root_buf));
objagg_obj = world_obj_get(world, objagg, key_id);
@@ -979,10 +979,10 @@ err_check_expect_stats2:
err_world2_obj_get:
for (i--; i >= 0; i--)
world_obj_put(&world2, objagg, hints_case->key_ids[i]);
- objagg_hints_put(hints);
- objagg_destroy(objagg2);
i = hints_case->key_ids_count;
+ objagg_destroy(objagg2);
err_check_expect_hints_stats:
+ objagg_hints_put(hints);
err_hints_get:
err_check_expect_stats:
err_world_obj_get:
diff --git a/lib/test_objpool.c b/lib/test_objpool.c
new file mode 100644
index 000000000000..bfdb81599832
--- /dev/null
+++ b/lib/test_objpool.c
@@ -0,0 +1,690 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Test module for lockless object pool
+ *
+ * Copyright: wuqiang.matt@bytedance.com
+ */
+
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/completion.h>
+#include <linux/kthread.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/delay.h>
+#include <linux/hrtimer.h>
+#include <linux/objpool.h>
+
+#define OT_NR_MAX_BULK (16)
+
+/* memory usage */
+struct ot_mem_stat {
+ atomic_long_t alloc;
+ atomic_long_t free;
+};
+
+/* object allocation results */
+struct ot_obj_stat {
+ unsigned long nhits;
+ unsigned long nmiss;
+};
+
+/* control & results per testcase */
+struct ot_data {
+ struct rw_semaphore start;
+ struct completion wait;
+ struct completion rcu;
+ atomic_t nthreads ____cacheline_aligned_in_smp;
+ atomic_t stop ____cacheline_aligned_in_smp;
+ struct ot_mem_stat kmalloc;
+ struct ot_mem_stat vmalloc;
+ struct ot_obj_stat objects;
+ u64 duration;
+};
+
+/* testcase */
+struct ot_test {
+ int async; /* synchronous or asynchronous */
+ int mode; /* only mode 0 supported */
+ int objsz; /* object size */
+ int duration; /* ms */
+ int delay; /* ms */
+ int bulk_normal;
+ int bulk_irq;
+ unsigned long hrtimer; /* ms */
+ const char *name;
+ struct ot_data data;
+};
+
+/* per-cpu worker */
+struct ot_item {
+ struct objpool_head *pool; /* pool head */
+ struct ot_test *test; /* test parameters */
+
+ void (*worker)(struct ot_item *item, int irq);
+
+ /* hrtimer control */
+ ktime_t hrtcycle;
+ struct hrtimer hrtimer;
+
+ int bulk[2]; /* for thread and irq */
+ int delay;
+ u32 niters;
+
+ /* summary per thread */
+ struct ot_obj_stat stat[2]; /* thread and irq */
+ u64 duration;
+};
+
+/*
+ * memory leakage checking
+ */
+
+static void *ot_kzalloc(struct ot_test *test, long size)
+{
+ void *ptr = kzalloc(size, GFP_KERNEL);
+
+ if (ptr)
+ atomic_long_add(size, &test->data.kmalloc.alloc);
+ return ptr;
+}
+
+static void ot_kfree(struct ot_test *test, void *ptr, long size)
+{
+ if (!ptr)
+ return;
+ atomic_long_add(size, &test->data.kmalloc.free);
+ kfree(ptr);
+}
+
+static void ot_mem_report(struct ot_test *test)
+{
+ long alloc, free;
+
+ pr_info("memory allocation summary for %s\n", test->name);
+
+ alloc = atomic_long_read(&test->data.kmalloc.alloc);
+ free = atomic_long_read(&test->data.kmalloc.free);
+ pr_info(" kmalloc: %lu - %lu = %lu\n", alloc, free, alloc - free);
+
+ alloc = atomic_long_read(&test->data.vmalloc.alloc);
+ free = atomic_long_read(&test->data.vmalloc.free);
+ pr_info(" vmalloc: %lu - %lu = %lu\n", alloc, free, alloc - free);
+}
+
+/* user object instance */
+struct ot_node {
+ void *owner;
+ unsigned long data;
+ unsigned long refs;
+ unsigned long payload[32];
+};
+
+/* user objpool manager */
+struct ot_context {
+ struct objpool_head pool; /* objpool head */
+ struct ot_test *test; /* test parameters */
+ void *ptr; /* user pool buffer */
+ unsigned long size; /* buffer size */
+ struct rcu_head rcu;
+};
+
+static DEFINE_PER_CPU(struct ot_item, ot_pcup_items);
+
+static int ot_init_data(struct ot_data *data)
+{
+ memset(data, 0, sizeof(*data));
+ init_rwsem(&data->start);
+ init_completion(&data->wait);
+ init_completion(&data->rcu);
+ atomic_set(&data->nthreads, 1);
+
+ return 0;
+}
+
+static int ot_init_node(void *nod, void *context)
+{
+ struct ot_context *sop = context;
+ struct ot_node *on = nod;
+
+ on->owner = &sop->pool;
+ return 0;
+}
+
+static enum hrtimer_restart ot_hrtimer_handler(struct hrtimer *hrt)
+{
+ struct ot_item *item = container_of(hrt, struct ot_item, hrtimer);
+ struct ot_test *test = item->test;
+
+ if (atomic_read_acquire(&test->data.stop))
+ return HRTIMER_NORESTART;
+
+ /* do bulk-testings for objects pop/push */
+ item->worker(item, 1);
+
+ hrtimer_forward(hrt, hrt->base->get_time(), item->hrtcycle);
+ return HRTIMER_RESTART;
+}
+
+static void ot_start_hrtimer(struct ot_item *item)
+{
+ if (!item->test->hrtimer)
+ return;
+ hrtimer_start(&item->hrtimer, item->hrtcycle, HRTIMER_MODE_REL);
+}
+
+static void ot_stop_hrtimer(struct ot_item *item)
+{
+ if (!item->test->hrtimer)
+ return;
+ hrtimer_cancel(&item->hrtimer);
+}
+
+static int ot_init_hrtimer(struct ot_item *item, unsigned long hrtimer)
+{
+ struct hrtimer *hrt = &item->hrtimer;
+
+ if (!hrtimer)
+ return -ENOENT;
+
+ item->hrtcycle = ktime_set(0, hrtimer * 1000000UL);
+ hrtimer_init(hrt, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ hrt->function = ot_hrtimer_handler;
+ return 0;
+}
+
+static int ot_init_cpu_item(struct ot_item *item,
+ struct ot_test *test,
+ struct objpool_head *pool,
+ void (*worker)(struct ot_item *, int))
+{
+ memset(item, 0, sizeof(*item));
+ item->pool = pool;
+ item->test = test;
+ item->worker = worker;
+
+ item->bulk[0] = test->bulk_normal;
+ item->bulk[1] = test->bulk_irq;
+ item->delay = test->delay;
+
+ /* initialize hrtimer */
+ ot_init_hrtimer(item, item->test->hrtimer);
+ return 0;
+}
+
+static int ot_thread_worker(void *arg)
+{
+ struct ot_item *item = arg;
+ struct ot_test *test = item->test;
+ ktime_t start;
+
+ atomic_inc(&test->data.nthreads);
+ down_read(&test->data.start);
+ up_read(&test->data.start);
+ start = ktime_get();
+ ot_start_hrtimer(item);
+ do {
+ if (atomic_read_acquire(&test->data.stop))
+ break;
+ /* do bulk-testings for objects pop/push */
+ item->worker(item, 0);
+ } while (!kthread_should_stop());
+ ot_stop_hrtimer(item);
+ item->duration = (u64) ktime_us_delta(ktime_get(), start);
+ if (atomic_dec_and_test(&test->data.nthreads))
+ complete(&test->data.wait);
+
+ return 0;
+}
+
+static void ot_perf_report(struct ot_test *test, u64 duration)
+{
+ struct ot_obj_stat total, normal = {0}, irq = {0};
+ int cpu, nthreads = 0;
+
+ pr_info("\n");
+ pr_info("Testing summary for %s\n", test->name);
+
+ for_each_possible_cpu(cpu) {
+ struct ot_item *item = per_cpu_ptr(&ot_pcup_items, cpu);
+ if (!item->duration)
+ continue;
+ normal.nhits += item->stat[0].nhits;
+ normal.nmiss += item->stat[0].nmiss;
+ irq.nhits += item->stat[1].nhits;
+ irq.nmiss += item->stat[1].nmiss;
+ pr_info("CPU: %d duration: %lluus\n", cpu, item->duration);
+ pr_info("\tthread:\t%16lu hits \t%16lu miss\n",
+ item->stat[0].nhits, item->stat[0].nmiss);
+ pr_info("\tirq: \t%16lu hits \t%16lu miss\n",
+ item->stat[1].nhits, item->stat[1].nmiss);
+ pr_info("\ttotal: \t%16lu hits \t%16lu miss\n",
+ item->stat[0].nhits + item->stat[1].nhits,
+ item->stat[0].nmiss + item->stat[1].nmiss);
+ nthreads++;
+ }
+
+ total.nhits = normal.nhits + irq.nhits;
+ total.nmiss = normal.nmiss + irq.nmiss;
+
+ pr_info("ALL: \tnthreads: %d duration: %lluus\n", nthreads, duration);
+ pr_info("SUM: \t%16lu hits \t%16lu miss\n",
+ total.nhits, total.nmiss);
+
+ test->data.objects = total;
+ test->data.duration = duration;
+}
+
+/*
+ * synchronous test cases for objpool manipulation
+ */
+
+/* objpool manipulation for synchronous mode (percpu objpool) */
+static struct ot_context *ot_init_sync_m0(struct ot_test *test)
+{
+ struct ot_context *sop = NULL;
+ int max = num_possible_cpus() << 3;
+ gfp_t gfp = GFP_KERNEL;
+
+ sop = (struct ot_context *)ot_kzalloc(test, sizeof(*sop));
+ if (!sop)
+ return NULL;
+ sop->test = test;
+ if (test->objsz < 512)
+ gfp = GFP_ATOMIC;
+
+ if (objpool_init(&sop->pool, max, test->objsz,
+ gfp, sop, ot_init_node, NULL)) {
+ ot_kfree(test, sop, sizeof(*sop));
+ return NULL;
+ }
+ WARN_ON(max != sop->pool.nr_objs);
+
+ return sop;
+}
+
+static void ot_fini_sync(struct ot_context *sop)
+{
+ objpool_fini(&sop->pool);
+ ot_kfree(sop->test, sop, sizeof(*sop));
+}
+
+static struct {
+ struct ot_context * (*init)(struct ot_test *oc);
+ void (*fini)(struct ot_context *sop);
+} g_ot_sync_ops[] = {
+ {.init = ot_init_sync_m0, .fini = ot_fini_sync},
+};
+
+/*
+ * synchronous test cases: performance mode
+ */
+
+static void ot_bulk_sync(struct ot_item *item, int irq)
+{
+ struct ot_node *nods[OT_NR_MAX_BULK];
+ int i;
+
+ for (i = 0; i < item->bulk[irq]; i++)
+ nods[i] = objpool_pop(item->pool);
+
+ if (!irq && (item->delay || !(++(item->niters) & 0x7FFF)))
+ msleep(item->delay);
+
+ while (i-- > 0) {
+ struct ot_node *on = nods[i];
+ if (on) {
+ on->refs++;
+ objpool_push(on, item->pool);
+ item->stat[irq].nhits++;
+ } else {
+ item->stat[irq].nmiss++;
+ }
+ }
+}
+
+static int ot_start_sync(struct ot_test *test)
+{
+ struct ot_context *sop;
+ ktime_t start;
+ u64 duration;
+ unsigned long timeout;
+ int cpu;
+
+ /* initialize objpool for syncrhonous testcase */
+ sop = g_ot_sync_ops[test->mode].init(test);
+ if (!sop)
+ return -ENOMEM;
+
+ /* grab rwsem to block testing threads */
+ down_write(&test->data.start);
+
+ for_each_possible_cpu(cpu) {
+ struct ot_item *item = per_cpu_ptr(&ot_pcup_items, cpu);
+ struct task_struct *work;
+
+ ot_init_cpu_item(item, test, &sop->pool, ot_bulk_sync);
+
+ /* skip offline cpus */
+ if (!cpu_online(cpu))
+ continue;
+
+ work = kthread_create_on_node(ot_thread_worker, item,
+ cpu_to_node(cpu), "ot_worker_%d", cpu);
+ if (IS_ERR(work)) {
+ pr_err("failed to create thread for cpu %d\n", cpu);
+ } else {
+ kthread_bind(work, cpu);
+ wake_up_process(work);
+ }
+ }
+
+ /* wait a while to make sure all threads waiting at start line */
+ msleep(20);
+
+ /* in case no threads were created: memory insufficient ? */
+ if (atomic_dec_and_test(&test->data.nthreads))
+ complete(&test->data.wait);
+
+ // sched_set_fifo_low(current);
+
+ /* start objpool testing threads */
+ start = ktime_get();
+ up_write(&test->data.start);
+
+ /* yeild cpu to worker threads for duration ms */
+ timeout = msecs_to_jiffies(test->duration);
+ schedule_timeout_interruptible(timeout);
+
+ /* tell workers threads to quit */
+ atomic_set_release(&test->data.stop, 1);
+
+ /* wait all workers threads finish and quit */
+ wait_for_completion(&test->data.wait);
+ duration = (u64) ktime_us_delta(ktime_get(), start);
+
+ /* cleanup objpool */
+ g_ot_sync_ops[test->mode].fini(sop);
+
+ /* report testing summary and performance results */
+ ot_perf_report(test, duration);
+
+ /* report memory allocation summary */
+ ot_mem_report(test);
+
+ return 0;
+}
+
+/*
+ * asynchronous test cases: pool lifecycle controlled by refcount
+ */
+
+static void ot_fini_async_rcu(struct rcu_head *rcu)
+{
+ struct ot_context *sop = container_of(rcu, struct ot_context, rcu);
+ struct ot_test *test = sop->test;
+
+ /* here all cpus are aware of the stop event: test->data.stop = 1 */
+ WARN_ON(!atomic_read_acquire(&test->data.stop));
+
+ objpool_fini(&sop->pool);
+ complete(&test->data.rcu);
+}
+
+static void ot_fini_async(struct ot_context *sop)
+{
+ /* make sure the stop event is acknowledged by all cores */
+ call_rcu(&sop->rcu, ot_fini_async_rcu);
+}
+
+static int ot_objpool_release(struct objpool_head *head, void *context)
+{
+ struct ot_context *sop = context;
+
+ WARN_ON(!head || !sop || head != &sop->pool);
+
+ /* do context cleaning if needed */
+ if (sop)
+ ot_kfree(sop->test, sop, sizeof(*sop));
+
+ return 0;
+}
+
+static struct ot_context *ot_init_async_m0(struct ot_test *test)
+{
+ struct ot_context *sop = NULL;
+ int max = num_possible_cpus() << 3;
+ gfp_t gfp = GFP_KERNEL;
+
+ sop = (struct ot_context *)ot_kzalloc(test, sizeof(*sop));
+ if (!sop)
+ return NULL;
+ sop->test = test;
+ if (test->objsz < 512)
+ gfp = GFP_ATOMIC;
+
+ if (objpool_init(&sop->pool, max, test->objsz, gfp, sop,
+ ot_init_node, ot_objpool_release)) {
+ ot_kfree(test, sop, sizeof(*sop));
+ return NULL;
+ }
+ WARN_ON(max != sop->pool.nr_objs);
+
+ return sop;
+}
+
+static struct {
+ struct ot_context * (*init)(struct ot_test *oc);
+ void (*fini)(struct ot_context *sop);
+} g_ot_async_ops[] = {
+ {.init = ot_init_async_m0, .fini = ot_fini_async},
+};
+
+static void ot_nod_recycle(struct ot_node *on, struct objpool_head *pool,
+ int release)
+{
+ struct ot_context *sop;
+
+ on->refs++;
+
+ if (!release) {
+ /* push object back to opjpool for reuse */
+ objpool_push(on, pool);
+ return;
+ }
+
+ sop = container_of(pool, struct ot_context, pool);
+ WARN_ON(sop != pool->context);
+
+ /* unref objpool with nod removed forever */
+ objpool_drop(on, pool);
+}
+
+static void ot_bulk_async(struct ot_item *item, int irq)
+{
+ struct ot_test *test = item->test;
+ struct ot_node *nods[OT_NR_MAX_BULK];
+ int i, stop;
+
+ for (i = 0; i < item->bulk[irq]; i++)
+ nods[i] = objpool_pop(item->pool);
+
+ if (!irq) {
+ if (item->delay || !(++(item->niters) & 0x7FFF))
+ msleep(item->delay);
+ get_cpu();
+ }
+
+ stop = atomic_read_acquire(&test->data.stop);
+
+ /* drop all objects and deref objpool */
+ while (i-- > 0) {
+ struct ot_node *on = nods[i];
+
+ if (on) {
+ on->refs++;
+ ot_nod_recycle(on, item->pool, stop);
+ item->stat[irq].nhits++;
+ } else {
+ item->stat[irq].nmiss++;
+ }
+ }
+
+ if (!irq)
+ put_cpu();
+}
+
+static int ot_start_async(struct ot_test *test)
+{
+ struct ot_context *sop;
+ ktime_t start;
+ u64 duration;
+ unsigned long timeout;
+ int cpu;
+
+ /* initialize objpool for syncrhonous testcase */
+ sop = g_ot_async_ops[test->mode].init(test);
+ if (!sop)
+ return -ENOMEM;
+
+ /* grab rwsem to block testing threads */
+ down_write(&test->data.start);
+
+ for_each_possible_cpu(cpu) {
+ struct ot_item *item = per_cpu_ptr(&ot_pcup_items, cpu);
+ struct task_struct *work;
+
+ ot_init_cpu_item(item, test, &sop->pool, ot_bulk_async);
+
+ /* skip offline cpus */
+ if (!cpu_online(cpu))
+ continue;
+
+ work = kthread_create_on_node(ot_thread_worker, item,
+ cpu_to_node(cpu), "ot_worker_%d", cpu);
+ if (IS_ERR(work)) {
+ pr_err("failed to create thread for cpu %d\n", cpu);
+ } else {
+ kthread_bind(work, cpu);
+ wake_up_process(work);
+ }
+ }
+
+ /* wait a while to make sure all threads waiting at start line */
+ msleep(20);
+
+ /* in case no threads were created: memory insufficient ? */
+ if (atomic_dec_and_test(&test->data.nthreads))
+ complete(&test->data.wait);
+
+ /* start objpool testing threads */
+ start = ktime_get();
+ up_write(&test->data.start);
+
+ /* yeild cpu to worker threads for duration ms */
+ timeout = msecs_to_jiffies(test->duration);
+ schedule_timeout_interruptible(timeout);
+
+ /* tell workers threads to quit */
+ atomic_set_release(&test->data.stop, 1);
+
+ /* do async-finalization */
+ g_ot_async_ops[test->mode].fini(sop);
+
+ /* wait all workers threads finish and quit */
+ wait_for_completion(&test->data.wait);
+ duration = (u64) ktime_us_delta(ktime_get(), start);
+
+ /* assure rcu callback is triggered */
+ wait_for_completion(&test->data.rcu);
+
+ /*
+ * now we are sure that objpool is finalized either
+ * by rcu callback or by worker threads
+ */
+
+ /* report testing summary and performance results */
+ ot_perf_report(test, duration);
+
+ /* report memory allocation summary */
+ ot_mem_report(test);
+
+ return 0;
+}
+
+/*
+ * predefined testing cases:
+ * synchronous case / overrun case / async case
+ *
+ * async: synchronous or asynchronous testing
+ * mode: only mode 0 supported
+ * objsz: object size
+ * duration: int, total test time in ms
+ * delay: int, delay (in ms) between each iteration
+ * bulk_normal: int, repeat times for thread worker
+ * bulk_irq: int, repeat times for irq consumer
+ * hrtimer: unsigned long, hrtimer intervnal in ms
+ * name: char *, tag for current test ot_item
+ */
+
+#define NODE_COMPACT sizeof(struct ot_node)
+#define NODE_VMALLOC (512)
+
+static struct ot_test g_testcases[] = {
+
+ /* sync & normal */
+ {0, 0, NODE_COMPACT, 1000, 0, 1, 0, 0, "sync: percpu objpool"},
+ {0, 0, NODE_VMALLOC, 1000, 0, 1, 0, 0, "sync: percpu objpool from vmalloc"},
+
+ /* sync & hrtimer */
+ {0, 0, NODE_COMPACT, 1000, 0, 1, 1, 4, "sync & hrtimer: percpu objpool"},
+ {0, 0, NODE_VMALLOC, 1000, 0, 1, 1, 4, "sync & hrtimer: percpu objpool from vmalloc"},
+
+ /* sync & overrun */
+ {0, 0, NODE_COMPACT, 1000, 0, 16, 0, 0, "sync overrun: percpu objpool"},
+ {0, 0, NODE_VMALLOC, 1000, 0, 16, 0, 0, "sync overrun: percpu objpool from vmalloc"},
+
+ /* async mode */
+ {1, 0, NODE_COMPACT, 1000, 100, 1, 0, 0, "async: percpu objpool"},
+ {1, 0, NODE_VMALLOC, 1000, 100, 1, 0, 0, "async: percpu objpool from vmalloc"},
+
+ /* async + hrtimer mode */
+ {1, 0, NODE_COMPACT, 1000, 0, 4, 4, 4, "async & hrtimer: percpu objpool"},
+ {1, 0, NODE_VMALLOC, 1000, 0, 4, 4, 4, "async & hrtimer: percpu objpool from vmalloc"},
+};
+
+static int __init ot_mod_init(void)
+{
+ int i;
+
+ /* perform testings */
+ for (i = 0; i < ARRAY_SIZE(g_testcases); i++) {
+ ot_init_data(&g_testcases[i].data);
+ if (g_testcases[i].async)
+ ot_start_async(&g_testcases[i]);
+ else
+ ot_start_sync(&g_testcases[i]);
+ }
+
+ /* show tests summary */
+ pr_info("\n");
+ pr_info("Summary of testcases:\n");
+ for (i = 0; i < ARRAY_SIZE(g_testcases); i++) {
+ pr_info(" duration: %lluus \thits: %10lu \tmiss: %10lu \t%s\n",
+ g_testcases[i].data.duration, g_testcases[i].data.objects.nhits,
+ g_testcases[i].data.objects.nmiss, g_testcases[i].name);
+ }
+
+ return -EAGAIN;
+}
+
+static void __exit ot_mod_exit(void)
+{
+}
+
+module_init(ot_mod_init);
+module_exit(ot_mod_exit);
+
+MODULE_LICENSE("GPL"); \ No newline at end of file
diff --git a/lib/test_overflow.c b/lib/test_overflow.c
deleted file mode 100644
index 7a4b6f6c5473..000000000000
--- a/lib/test_overflow.c
+++ /dev/null
@@ -1,614 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0 OR MIT
-/*
- * Test cases for arithmetic overflow checks.
- */
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/device.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/module.h>
-#include <linux/overflow.h>
-#include <linux/slab.h>
-#include <linux/types.h>
-#include <linux/vmalloc.h>
-
-#define DEFINE_TEST_ARRAY(t) \
- static const struct test_ ## t { \
- t a, b; \
- t sum, diff, prod; \
- bool s_of, d_of, p_of; \
- } t ## _tests[] __initconst
-
-DEFINE_TEST_ARRAY(u8) = {
- {0, 0, 0, 0, 0, false, false, false},
- {1, 1, 2, 0, 1, false, false, false},
- {0, 1, 1, U8_MAX, 0, false, true, false},
- {1, 0, 1, 1, 0, false, false, false},
- {0, U8_MAX, U8_MAX, 1, 0, false, true, false},
- {U8_MAX, 0, U8_MAX, U8_MAX, 0, false, false, false},
- {1, U8_MAX, 0, 2, U8_MAX, true, true, false},
- {U8_MAX, 1, 0, U8_MAX-1, U8_MAX, true, false, false},
- {U8_MAX, U8_MAX, U8_MAX-1, 0, 1, true, false, true},
-
- {U8_MAX, U8_MAX-1, U8_MAX-2, 1, 2, true, false, true},
- {U8_MAX-1, U8_MAX, U8_MAX-2, U8_MAX, 2, true, true, true},
-
- {1U << 3, 1U << 3, 1U << 4, 0, 1U << 6, false, false, false},
- {1U << 4, 1U << 4, 1U << 5, 0, 0, false, false, true},
- {1U << 4, 1U << 3, 3*(1U << 3), 1U << 3, 1U << 7, false, false, false},
- {1U << 7, 1U << 7, 0, 0, 0, true, false, true},
-
- {48, 32, 80, 16, 0, false, false, true},
- {128, 128, 0, 0, 0, true, false, true},
- {123, 234, 101, 145, 110, true, true, true},
-};
-DEFINE_TEST_ARRAY(u16) = {
- {0, 0, 0, 0, 0, false, false, false},
- {1, 1, 2, 0, 1, false, false, false},
- {0, 1, 1, U16_MAX, 0, false, true, false},
- {1, 0, 1, 1, 0, false, false, false},
- {0, U16_MAX, U16_MAX, 1, 0, false, true, false},
- {U16_MAX, 0, U16_MAX, U16_MAX, 0, false, false, false},
- {1, U16_MAX, 0, 2, U16_MAX, true, true, false},
- {U16_MAX, 1, 0, U16_MAX-1, U16_MAX, true, false, false},
- {U16_MAX, U16_MAX, U16_MAX-1, 0, 1, true, false, true},
-
- {U16_MAX, U16_MAX-1, U16_MAX-2, 1, 2, true, false, true},
- {U16_MAX-1, U16_MAX, U16_MAX-2, U16_MAX, 2, true, true, true},
-
- {1U << 7, 1U << 7, 1U << 8, 0, 1U << 14, false, false, false},
- {1U << 8, 1U << 8, 1U << 9, 0, 0, false, false, true},
- {1U << 8, 1U << 7, 3*(1U << 7), 1U << 7, 1U << 15, false, false, false},
- {1U << 15, 1U << 15, 0, 0, 0, true, false, true},
-
- {123, 234, 357, 65425, 28782, false, true, false},
- {1234, 2345, 3579, 64425, 10146, false, true, true},
-};
-DEFINE_TEST_ARRAY(u32) = {
- {0, 0, 0, 0, 0, false, false, false},
- {1, 1, 2, 0, 1, false, false, false},
- {0, 1, 1, U32_MAX, 0, false, true, false},
- {1, 0, 1, 1, 0, false, false, false},
- {0, U32_MAX, U32_MAX, 1, 0, false, true, false},
- {U32_MAX, 0, U32_MAX, U32_MAX, 0, false, false, false},
- {1, U32_MAX, 0, 2, U32_MAX, true, true, false},
- {U32_MAX, 1, 0, U32_MAX-1, U32_MAX, true, false, false},
- {U32_MAX, U32_MAX, U32_MAX-1, 0, 1, true, false, true},
-
- {U32_MAX, U32_MAX-1, U32_MAX-2, 1, 2, true, false, true},
- {U32_MAX-1, U32_MAX, U32_MAX-2, U32_MAX, 2, true, true, true},
-
- {1U << 15, 1U << 15, 1U << 16, 0, 1U << 30, false, false, false},
- {1U << 16, 1U << 16, 1U << 17, 0, 0, false, false, true},
- {1U << 16, 1U << 15, 3*(1U << 15), 1U << 15, 1U << 31, false, false, false},
- {1U << 31, 1U << 31, 0, 0, 0, true, false, true},
-
- {-2U, 1U, -1U, -3U, -2U, false, false, false},
- {-4U, 5U, 1U, -9U, -20U, true, false, true},
-};
-
-DEFINE_TEST_ARRAY(u64) = {
- {0, 0, 0, 0, 0, false, false, false},
- {1, 1, 2, 0, 1, false, false, false},
- {0, 1, 1, U64_MAX, 0, false, true, false},
- {1, 0, 1, 1, 0, false, false, false},
- {0, U64_MAX, U64_MAX, 1, 0, false, true, false},
- {U64_MAX, 0, U64_MAX, U64_MAX, 0, false, false, false},
- {1, U64_MAX, 0, 2, U64_MAX, true, true, false},
- {U64_MAX, 1, 0, U64_MAX-1, U64_MAX, true, false, false},
- {U64_MAX, U64_MAX, U64_MAX-1, 0, 1, true, false, true},
-
- {U64_MAX, U64_MAX-1, U64_MAX-2, 1, 2, true, false, true},
- {U64_MAX-1, U64_MAX, U64_MAX-2, U64_MAX, 2, true, true, true},
-
- {1ULL << 31, 1ULL << 31, 1ULL << 32, 0, 1ULL << 62, false, false, false},
- {1ULL << 32, 1ULL << 32, 1ULL << 33, 0, 0, false, false, true},
- {1ULL << 32, 1ULL << 31, 3*(1ULL << 31), 1ULL << 31, 1ULL << 63, false, false, false},
- {1ULL << 63, 1ULL << 63, 0, 0, 0, true, false, true},
- {1000000000ULL /* 10^9 */, 10000000000ULL /* 10^10 */,
- 11000000000ULL, 18446744064709551616ULL, 10000000000000000000ULL,
- false, true, false},
- {-15ULL, 10ULL, -5ULL, -25ULL, -150ULL, false, false, true},
-};
-
-DEFINE_TEST_ARRAY(s8) = {
- {0, 0, 0, 0, 0, false, false, false},
-
- {0, S8_MAX, S8_MAX, -S8_MAX, 0, false, false, false},
- {S8_MAX, 0, S8_MAX, S8_MAX, 0, false, false, false},
- {0, S8_MIN, S8_MIN, S8_MIN, 0, false, true, false},
- {S8_MIN, 0, S8_MIN, S8_MIN, 0, false, false, false},
-
- {-1, S8_MIN, S8_MAX, S8_MAX, S8_MIN, true, false, true},
- {S8_MIN, -1, S8_MAX, -S8_MAX, S8_MIN, true, false, true},
- {-1, S8_MAX, S8_MAX-1, S8_MIN, -S8_MAX, false, false, false},
- {S8_MAX, -1, S8_MAX-1, S8_MIN, -S8_MAX, false, true, false},
- {-1, -S8_MAX, S8_MIN, S8_MAX-1, S8_MAX, false, false, false},
- {-S8_MAX, -1, S8_MIN, S8_MIN+2, S8_MAX, false, false, false},
-
- {1, S8_MIN, -S8_MAX, -S8_MAX, S8_MIN, false, true, false},
- {S8_MIN, 1, -S8_MAX, S8_MAX, S8_MIN, false, true, false},
- {1, S8_MAX, S8_MIN, S8_MIN+2, S8_MAX, true, false, false},
- {S8_MAX, 1, S8_MIN, S8_MAX-1, S8_MAX, true, false, false},
-
- {S8_MIN, S8_MIN, 0, 0, 0, true, false, true},
- {S8_MAX, S8_MAX, -2, 0, 1, true, false, true},
-
- {-4, -32, -36, 28, -128, false, false, true},
- {-4, 32, 28, -36, -128, false, false, false},
-};
-
-DEFINE_TEST_ARRAY(s16) = {
- {0, 0, 0, 0, 0, false, false, false},
-
- {0, S16_MAX, S16_MAX, -S16_MAX, 0, false, false, false},
- {S16_MAX, 0, S16_MAX, S16_MAX, 0, false, false, false},
- {0, S16_MIN, S16_MIN, S16_MIN, 0, false, true, false},
- {S16_MIN, 0, S16_MIN, S16_MIN, 0, false, false, false},
-
- {-1, S16_MIN, S16_MAX, S16_MAX, S16_MIN, true, false, true},
- {S16_MIN, -1, S16_MAX, -S16_MAX, S16_MIN, true, false, true},
- {-1, S16_MAX, S16_MAX-1, S16_MIN, -S16_MAX, false, false, false},
- {S16_MAX, -1, S16_MAX-1, S16_MIN, -S16_MAX, false, true, false},
- {-1, -S16_MAX, S16_MIN, S16_MAX-1, S16_MAX, false, false, false},
- {-S16_MAX, -1, S16_MIN, S16_MIN+2, S16_MAX, false, false, false},
-
- {1, S16_MIN, -S16_MAX, -S16_MAX, S16_MIN, false, true, false},
- {S16_MIN, 1, -S16_MAX, S16_MAX, S16_MIN, false, true, false},
- {1, S16_MAX, S16_MIN, S16_MIN+2, S16_MAX, true, false, false},
- {S16_MAX, 1, S16_MIN, S16_MAX-1, S16_MAX, true, false, false},
-
- {S16_MIN, S16_MIN, 0, 0, 0, true, false, true},
- {S16_MAX, S16_MAX, -2, 0, 1, true, false, true},
-};
-DEFINE_TEST_ARRAY(s32) = {
- {0, 0, 0, 0, 0, false, false, false},
-
- {0, S32_MAX, S32_MAX, -S32_MAX, 0, false, false, false},
- {S32_MAX, 0, S32_MAX, S32_MAX, 0, false, false, false},
- {0, S32_MIN, S32_MIN, S32_MIN, 0, false, true, false},
- {S32_MIN, 0, S32_MIN, S32_MIN, 0, false, false, false},
-
- {-1, S32_MIN, S32_MAX, S32_MAX, S32_MIN, true, false, true},
- {S32_MIN, -1, S32_MAX, -S32_MAX, S32_MIN, true, false, true},
- {-1, S32_MAX, S32_MAX-1, S32_MIN, -S32_MAX, false, false, false},
- {S32_MAX, -1, S32_MAX-1, S32_MIN, -S32_MAX, false, true, false},
- {-1, -S32_MAX, S32_MIN, S32_MAX-1, S32_MAX, false, false, false},
- {-S32_MAX, -1, S32_MIN, S32_MIN+2, S32_MAX, false, false, false},
-
- {1, S32_MIN, -S32_MAX, -S32_MAX, S32_MIN, false, true, false},
- {S32_MIN, 1, -S32_MAX, S32_MAX, S32_MIN, false, true, false},
- {1, S32_MAX, S32_MIN, S32_MIN+2, S32_MAX, true, false, false},
- {S32_MAX, 1, S32_MIN, S32_MAX-1, S32_MAX, true, false, false},
-
- {S32_MIN, S32_MIN, 0, 0, 0, true, false, true},
- {S32_MAX, S32_MAX, -2, 0, 1, true, false, true},
-};
-DEFINE_TEST_ARRAY(s64) = {
- {0, 0, 0, 0, 0, false, false, false},
-
- {0, S64_MAX, S64_MAX, -S64_MAX, 0, false, false, false},
- {S64_MAX, 0, S64_MAX, S64_MAX, 0, false, false, false},
- {0, S64_MIN, S64_MIN, S64_MIN, 0, false, true, false},
- {S64_MIN, 0, S64_MIN, S64_MIN, 0, false, false, false},
-
- {-1, S64_MIN, S64_MAX, S64_MAX, S64_MIN, true, false, true},
- {S64_MIN, -1, S64_MAX, -S64_MAX, S64_MIN, true, false, true},
- {-1, S64_MAX, S64_MAX-1, S64_MIN, -S64_MAX, false, false, false},
- {S64_MAX, -1, S64_MAX-1, S64_MIN, -S64_MAX, false, true, false},
- {-1, -S64_MAX, S64_MIN, S64_MAX-1, S64_MAX, false, false, false},
- {-S64_MAX, -1, S64_MIN, S64_MIN+2, S64_MAX, false, false, false},
-
- {1, S64_MIN, -S64_MAX, -S64_MAX, S64_MIN, false, true, false},
- {S64_MIN, 1, -S64_MAX, S64_MAX, S64_MIN, false, true, false},
- {1, S64_MAX, S64_MIN, S64_MIN+2, S64_MAX, true, false, false},
- {S64_MAX, 1, S64_MIN, S64_MAX-1, S64_MAX, true, false, false},
-
- {S64_MIN, S64_MIN, 0, 0, 0, true, false, true},
- {S64_MAX, S64_MAX, -2, 0, 1, true, false, true},
-
- {-1, -1, -2, 0, 1, false, false, false},
- {-1, -128, -129, 127, 128, false, false, false},
- {-128, -1, -129, -127, 128, false, false, false},
- {0, -S64_MAX, -S64_MAX, S64_MAX, 0, false, false, false},
-};
-
-#define check_one_op(t, fmt, op, sym, a, b, r, of) do { \
- t _r; \
- bool _of; \
- \
- _of = check_ ## op ## _overflow(a, b, &_r); \
- if (_of != of) { \
- pr_warn("expected "fmt" "sym" "fmt \
- " to%s overflow (type %s)\n", \
- a, b, of ? "" : " not", #t); \
- err = 1; \
- } \
- if (_r != r) { \
- pr_warn("expected "fmt" "sym" "fmt" == " \
- fmt", got "fmt" (type %s)\n", \
- a, b, r, _r, #t); \
- err = 1; \
- } \
-} while (0)
-
-#define DEFINE_TEST_FUNC(t, fmt) \
-static int __init do_test_ ## t(const struct test_ ## t *p) \
-{ \
- int err = 0; \
- \
- check_one_op(t, fmt, add, "+", p->a, p->b, p->sum, p->s_of); \
- check_one_op(t, fmt, add, "+", p->b, p->a, p->sum, p->s_of); \
- check_one_op(t, fmt, sub, "-", p->a, p->b, p->diff, p->d_of); \
- check_one_op(t, fmt, mul, "*", p->a, p->b, p->prod, p->p_of); \
- check_one_op(t, fmt, mul, "*", p->b, p->a, p->prod, p->p_of); \
- \
- return err; \
-} \
- \
-static int __init test_ ## t ## _overflow(void) { \
- int err = 0; \
- unsigned i; \
- \
- pr_info("%-3s: %zu arithmetic tests\n", #t, \
- ARRAY_SIZE(t ## _tests)); \
- for (i = 0; i < ARRAY_SIZE(t ## _tests); ++i) \
- err |= do_test_ ## t(&t ## _tests[i]); \
- return err; \
-}
-
-DEFINE_TEST_FUNC(u8, "%d");
-DEFINE_TEST_FUNC(s8, "%d");
-DEFINE_TEST_FUNC(u16, "%d");
-DEFINE_TEST_FUNC(s16, "%d");
-DEFINE_TEST_FUNC(u32, "%u");
-DEFINE_TEST_FUNC(s32, "%d");
-#if BITS_PER_LONG == 64
-DEFINE_TEST_FUNC(u64, "%llu");
-DEFINE_TEST_FUNC(s64, "%lld");
-#endif
-
-static int __init test_overflow_calculation(void)
-{
- int err = 0;
-
- err |= test_u8_overflow();
- err |= test_s8_overflow();
- err |= test_u16_overflow();
- err |= test_s16_overflow();
- err |= test_u32_overflow();
- err |= test_s32_overflow();
-#if BITS_PER_LONG == 64
- err |= test_u64_overflow();
- err |= test_s64_overflow();
-#endif
-
- return err;
-}
-
-static int __init test_overflow_shift(void)
-{
- int err = 0;
-
-/* Args are: value, shift, type, expected result, overflow expected */
-#define TEST_ONE_SHIFT(a, s, t, expect, of) ({ \
- int __failed = 0; \
- typeof(a) __a = (a); \
- typeof(s) __s = (s); \
- t __e = (expect); \
- t __d; \
- bool __of = check_shl_overflow(__a, __s, &__d); \
- if (__of != of) { \
- pr_warn("expected (%s)(%s << %s) to%s overflow\n", \
- #t, #a, #s, of ? "" : " not"); \
- __failed = 1; \
- } else if (!__of && __d != __e) { \
- pr_warn("expected (%s)(%s << %s) == %s\n", \
- #t, #a, #s, #expect); \
- if ((t)-1 < 0) \
- pr_warn("got %lld\n", (s64)__d); \
- else \
- pr_warn("got %llu\n", (u64)__d); \
- __failed = 1; \
- } \
- if (!__failed) \
- pr_info("ok: (%s)(%s << %s) == %s\n", #t, #a, #s, \
- of ? "overflow" : #expect); \
- __failed; \
-})
-
- /* Sane shifts. */
- err |= TEST_ONE_SHIFT(1, 0, u8, 1 << 0, false);
- err |= TEST_ONE_SHIFT(1, 4, u8, 1 << 4, false);
- err |= TEST_ONE_SHIFT(1, 7, u8, 1 << 7, false);
- err |= TEST_ONE_SHIFT(0xF, 4, u8, 0xF << 4, false);
- err |= TEST_ONE_SHIFT(1, 0, u16, 1 << 0, false);
- err |= TEST_ONE_SHIFT(1, 10, u16, 1 << 10, false);
- err |= TEST_ONE_SHIFT(1, 15, u16, 1 << 15, false);
- err |= TEST_ONE_SHIFT(0xFF, 8, u16, 0xFF << 8, false);
- err |= TEST_ONE_SHIFT(1, 0, int, 1 << 0, false);
- err |= TEST_ONE_SHIFT(1, 16, int, 1 << 16, false);
- err |= TEST_ONE_SHIFT(1, 30, int, 1 << 30, false);
- err |= TEST_ONE_SHIFT(1, 0, s32, 1 << 0, false);
- err |= TEST_ONE_SHIFT(1, 16, s32, 1 << 16, false);
- err |= TEST_ONE_SHIFT(1, 30, s32, 1 << 30, false);
- err |= TEST_ONE_SHIFT(1, 0, unsigned int, 1U << 0, false);
- err |= TEST_ONE_SHIFT(1, 20, unsigned int, 1U << 20, false);
- err |= TEST_ONE_SHIFT(1, 31, unsigned int, 1U << 31, false);
- err |= TEST_ONE_SHIFT(0xFFFFU, 16, unsigned int, 0xFFFFU << 16, false);
- err |= TEST_ONE_SHIFT(1, 0, u32, 1U << 0, false);
- err |= TEST_ONE_SHIFT(1, 20, u32, 1U << 20, false);
- err |= TEST_ONE_SHIFT(1, 31, u32, 1U << 31, false);
- err |= TEST_ONE_SHIFT(0xFFFFU, 16, u32, 0xFFFFU << 16, false);
- err |= TEST_ONE_SHIFT(1, 0, u64, 1ULL << 0, false);
- err |= TEST_ONE_SHIFT(1, 40, u64, 1ULL << 40, false);
- err |= TEST_ONE_SHIFT(1, 63, u64, 1ULL << 63, false);
- err |= TEST_ONE_SHIFT(0xFFFFFFFFULL, 32, u64,
- 0xFFFFFFFFULL << 32, false);
-
- /* Sane shift: start and end with 0, without a too-wide shift. */
- err |= TEST_ONE_SHIFT(0, 7, u8, 0, false);
- err |= TEST_ONE_SHIFT(0, 15, u16, 0, false);
- err |= TEST_ONE_SHIFT(0, 31, unsigned int, 0, false);
- err |= TEST_ONE_SHIFT(0, 31, u32, 0, false);
- err |= TEST_ONE_SHIFT(0, 63, u64, 0, false);
-
- /* Sane shift: start and end with 0, without reaching signed bit. */
- err |= TEST_ONE_SHIFT(0, 6, s8, 0, false);
- err |= TEST_ONE_SHIFT(0, 14, s16, 0, false);
- err |= TEST_ONE_SHIFT(0, 30, int, 0, false);
- err |= TEST_ONE_SHIFT(0, 30, s32, 0, false);
- err |= TEST_ONE_SHIFT(0, 62, s64, 0, false);
-
- /* Overflow: shifted the bit off the end. */
- err |= TEST_ONE_SHIFT(1, 8, u8, 0, true);
- err |= TEST_ONE_SHIFT(1, 16, u16, 0, true);
- err |= TEST_ONE_SHIFT(1, 32, unsigned int, 0, true);
- err |= TEST_ONE_SHIFT(1, 32, u32, 0, true);
- err |= TEST_ONE_SHIFT(1, 64, u64, 0, true);
-
- /* Overflow: shifted into the signed bit. */
- err |= TEST_ONE_SHIFT(1, 7, s8, 0, true);
- err |= TEST_ONE_SHIFT(1, 15, s16, 0, true);
- err |= TEST_ONE_SHIFT(1, 31, int, 0, true);
- err |= TEST_ONE_SHIFT(1, 31, s32, 0, true);
- err |= TEST_ONE_SHIFT(1, 63, s64, 0, true);
-
- /* Overflow: high bit falls off unsigned types. */
- /* 10010110 */
- err |= TEST_ONE_SHIFT(150, 1, u8, 0, true);
- /* 1000100010010110 */
- err |= TEST_ONE_SHIFT(34966, 1, u16, 0, true);
- /* 10000100000010001000100010010110 */
- err |= TEST_ONE_SHIFT(2215151766U, 1, u32, 0, true);
- err |= TEST_ONE_SHIFT(2215151766U, 1, unsigned int, 0, true);
- /* 1000001000010000010000000100000010000100000010001000100010010110 */
- err |= TEST_ONE_SHIFT(9372061470395238550ULL, 1, u64, 0, true);
-
- /* Overflow: bit shifted into signed bit on signed types. */
- /* 01001011 */
- err |= TEST_ONE_SHIFT(75, 1, s8, 0, true);
- /* 0100010001001011 */
- err |= TEST_ONE_SHIFT(17483, 1, s16, 0, true);
- /* 01000010000001000100010001001011 */
- err |= TEST_ONE_SHIFT(1107575883, 1, s32, 0, true);
- err |= TEST_ONE_SHIFT(1107575883, 1, int, 0, true);
- /* 0100000100001000001000000010000001000010000001000100010001001011 */
- err |= TEST_ONE_SHIFT(4686030735197619275LL, 1, s64, 0, true);
-
- /* Overflow: bit shifted past signed bit on signed types. */
- /* 01001011 */
- err |= TEST_ONE_SHIFT(75, 2, s8, 0, true);
- /* 0100010001001011 */
- err |= TEST_ONE_SHIFT(17483, 2, s16, 0, true);
- /* 01000010000001000100010001001011 */
- err |= TEST_ONE_SHIFT(1107575883, 2, s32, 0, true);
- err |= TEST_ONE_SHIFT(1107575883, 2, int, 0, true);
- /* 0100000100001000001000000010000001000010000001000100010001001011 */
- err |= TEST_ONE_SHIFT(4686030735197619275LL, 2, s64, 0, true);
-
- /* Overflow: values larger than destination type. */
- err |= TEST_ONE_SHIFT(0x100, 0, u8, 0, true);
- err |= TEST_ONE_SHIFT(0xFF, 0, s8, 0, true);
- err |= TEST_ONE_SHIFT(0x10000U, 0, u16, 0, true);
- err |= TEST_ONE_SHIFT(0xFFFFU, 0, s16, 0, true);
- err |= TEST_ONE_SHIFT(0x100000000ULL, 0, u32, 0, true);
- err |= TEST_ONE_SHIFT(0x100000000ULL, 0, unsigned int, 0, true);
- err |= TEST_ONE_SHIFT(0xFFFFFFFFUL, 0, s32, 0, true);
- err |= TEST_ONE_SHIFT(0xFFFFFFFFUL, 0, int, 0, true);
- err |= TEST_ONE_SHIFT(0xFFFFFFFFFFFFFFFFULL, 0, s64, 0, true);
-
- /* Nonsense: negative initial value. */
- err |= TEST_ONE_SHIFT(-1, 0, s8, 0, true);
- err |= TEST_ONE_SHIFT(-1, 0, u8, 0, true);
- err |= TEST_ONE_SHIFT(-5, 0, s16, 0, true);
- err |= TEST_ONE_SHIFT(-5, 0, u16, 0, true);
- err |= TEST_ONE_SHIFT(-10, 0, int, 0, true);
- err |= TEST_ONE_SHIFT(-10, 0, unsigned int, 0, true);
- err |= TEST_ONE_SHIFT(-100, 0, s32, 0, true);
- err |= TEST_ONE_SHIFT(-100, 0, u32, 0, true);
- err |= TEST_ONE_SHIFT(-10000, 0, s64, 0, true);
- err |= TEST_ONE_SHIFT(-10000, 0, u64, 0, true);
-
- /* Nonsense: negative shift values. */
- err |= TEST_ONE_SHIFT(0, -5, s8, 0, true);
- err |= TEST_ONE_SHIFT(0, -5, u8, 0, true);
- err |= TEST_ONE_SHIFT(0, -10, s16, 0, true);
- err |= TEST_ONE_SHIFT(0, -10, u16, 0, true);
- err |= TEST_ONE_SHIFT(0, -15, int, 0, true);
- err |= TEST_ONE_SHIFT(0, -15, unsigned int, 0, true);
- err |= TEST_ONE_SHIFT(0, -20, s32, 0, true);
- err |= TEST_ONE_SHIFT(0, -20, u32, 0, true);
- err |= TEST_ONE_SHIFT(0, -30, s64, 0, true);
- err |= TEST_ONE_SHIFT(0, -30, u64, 0, true);
-
- /* Overflow: shifted at or beyond entire type's bit width. */
- err |= TEST_ONE_SHIFT(0, 8, u8, 0, true);
- err |= TEST_ONE_SHIFT(0, 9, u8, 0, true);
- err |= TEST_ONE_SHIFT(0, 8, s8, 0, true);
- err |= TEST_ONE_SHIFT(0, 9, s8, 0, true);
- err |= TEST_ONE_SHIFT(0, 16, u16, 0, true);
- err |= TEST_ONE_SHIFT(0, 17, u16, 0, true);
- err |= TEST_ONE_SHIFT(0, 16, s16, 0, true);
- err |= TEST_ONE_SHIFT(0, 17, s16, 0, true);
- err |= TEST_ONE_SHIFT(0, 32, u32, 0, true);
- err |= TEST_ONE_SHIFT(0, 33, u32, 0, true);
- err |= TEST_ONE_SHIFT(0, 32, int, 0, true);
- err |= TEST_ONE_SHIFT(0, 33, int, 0, true);
- err |= TEST_ONE_SHIFT(0, 32, s32, 0, true);
- err |= TEST_ONE_SHIFT(0, 33, s32, 0, true);
- err |= TEST_ONE_SHIFT(0, 64, u64, 0, true);
- err |= TEST_ONE_SHIFT(0, 65, u64, 0, true);
- err |= TEST_ONE_SHIFT(0, 64, s64, 0, true);
- err |= TEST_ONE_SHIFT(0, 65, s64, 0, true);
-
- /*
- * Corner case: for unsigned types, we fail when we've shifted
- * through the entire width of bits. For signed types, we might
- * want to match this behavior, but that would mean noticing if
- * we shift through all but the signed bit, and this is not
- * currently detected (but we'll notice an overflow into the
- * signed bit). So, for now, we will test this condition but
- * mark it as not expected to overflow.
- */
- err |= TEST_ONE_SHIFT(0, 7, s8, 0, false);
- err |= TEST_ONE_SHIFT(0, 15, s16, 0, false);
- err |= TEST_ONE_SHIFT(0, 31, int, 0, false);
- err |= TEST_ONE_SHIFT(0, 31, s32, 0, false);
- err |= TEST_ONE_SHIFT(0, 63, s64, 0, false);
-
- return err;
-}
-
-/*
- * Deal with the various forms of allocator arguments. See comments above
- * the DEFINE_TEST_ALLOC() instances for mapping of the "bits".
- */
-#define alloc_GFP (GFP_KERNEL | __GFP_NOWARN)
-#define alloc010(alloc, arg, sz) alloc(sz, alloc_GFP)
-#define alloc011(alloc, arg, sz) alloc(sz, alloc_GFP, NUMA_NO_NODE)
-#define alloc000(alloc, arg, sz) alloc(sz)
-#define alloc001(alloc, arg, sz) alloc(sz, NUMA_NO_NODE)
-#define alloc110(alloc, arg, sz) alloc(arg, sz, alloc_GFP)
-#define free0(free, arg, ptr) free(ptr)
-#define free1(free, arg, ptr) free(arg, ptr)
-
-/* Wrap around to 16K */
-#define TEST_SIZE (5 * 4096)
-
-#define DEFINE_TEST_ALLOC(func, free_func, want_arg, want_gfp, want_node)\
-static int __init test_ ## func (void *arg) \
-{ \
- volatile size_t a = TEST_SIZE; \
- volatile size_t b = (SIZE_MAX / TEST_SIZE) + 1; \
- void *ptr; \
- \
- /* Tiny allocation test. */ \
- ptr = alloc ## want_arg ## want_gfp ## want_node (func, arg, 1);\
- if (!ptr) { \
- pr_warn(#func " failed regular allocation?!\n"); \
- return 1; \
- } \
- free ## want_arg (free_func, arg, ptr); \
- \
- /* Wrapped allocation test. */ \
- ptr = alloc ## want_arg ## want_gfp ## want_node (func, arg, \
- a * b); \
- if (!ptr) { \
- pr_warn(#func " unexpectedly failed bad wrapping?!\n"); \
- return 1; \
- } \
- free ## want_arg (free_func, arg, ptr); \
- \
- /* Saturated allocation test. */ \
- ptr = alloc ## want_arg ## want_gfp ## want_node (func, arg, \
- array_size(a, b)); \
- if (ptr) { \
- pr_warn(#func " missed saturation!\n"); \
- free ## want_arg (free_func, arg, ptr); \
- return 1; \
- } \
- pr_info(#func " detected saturation\n"); \
- return 0; \
-}
-
-/*
- * Allocator uses a trailing node argument --------+ (e.g. kmalloc_node())
- * Allocator uses the gfp_t argument -----------+ | (e.g. kmalloc())
- * Allocator uses a special leading argument + | | (e.g. devm_kmalloc())
- * | | |
- */
-DEFINE_TEST_ALLOC(kmalloc, kfree, 0, 1, 0);
-DEFINE_TEST_ALLOC(kmalloc_node, kfree, 0, 1, 1);
-DEFINE_TEST_ALLOC(kzalloc, kfree, 0, 1, 0);
-DEFINE_TEST_ALLOC(kzalloc_node, kfree, 0, 1, 1);
-DEFINE_TEST_ALLOC(vmalloc, vfree, 0, 0, 0);
-DEFINE_TEST_ALLOC(vmalloc_node, vfree, 0, 0, 1);
-DEFINE_TEST_ALLOC(vzalloc, vfree, 0, 0, 0);
-DEFINE_TEST_ALLOC(vzalloc_node, vfree, 0, 0, 1);
-DEFINE_TEST_ALLOC(kvmalloc, kvfree, 0, 1, 0);
-DEFINE_TEST_ALLOC(kvmalloc_node, kvfree, 0, 1, 1);
-DEFINE_TEST_ALLOC(kvzalloc, kvfree, 0, 1, 0);
-DEFINE_TEST_ALLOC(kvzalloc_node, kvfree, 0, 1, 1);
-DEFINE_TEST_ALLOC(devm_kmalloc, devm_kfree, 1, 1, 0);
-DEFINE_TEST_ALLOC(devm_kzalloc, devm_kfree, 1, 1, 0);
-
-static int __init test_overflow_allocation(void)
-{
- const char device_name[] = "overflow-test";
- struct device *dev;
- int err = 0;
-
- /* Create dummy device for devm_kmalloc()-family tests. */
- dev = root_device_register(device_name);
- if (IS_ERR(dev)) {
- pr_warn("Cannot register test device\n");
- return 1;
- }
-
- err |= test_kmalloc(NULL);
- err |= test_kmalloc_node(NULL);
- err |= test_kzalloc(NULL);
- err |= test_kzalloc_node(NULL);
- err |= test_kvmalloc(NULL);
- err |= test_kvmalloc_node(NULL);
- err |= test_kvzalloc(NULL);
- err |= test_kvzalloc_node(NULL);
- err |= test_vmalloc(NULL);
- err |= test_vmalloc_node(NULL);
- err |= test_vzalloc(NULL);
- err |= test_vzalloc_node(NULL);
- err |= test_devm_kmalloc(dev);
- err |= test_devm_kzalloc(dev);
-
- device_unregister(dev);
-
- return err;
-}
-
-static int __init test_module_init(void)
-{
- int err = 0;
-
- err |= test_overflow_calculation();
- err |= test_overflow_shift();
- err |= test_overflow_allocation();
-
- if (err) {
- pr_warn("FAIL!\n");
- err = -EINVAL;
- } else {
- pr_info("all tests passed\n");
- }
-
- return err;
-}
-
-static void __exit test_module_exit(void)
-{ }
-
-module_init(test_module_init);
-module_exit(test_module_exit);
-MODULE_LICENSE("Dual MIT/GPL");
diff --git a/lib/test_printf.c b/lib/test_printf.c
index 944eb50f3862..69b6a5e177f2 100644
--- a/lib/test_printf.c
+++ b/lib/test_printf.c
@@ -12,6 +12,7 @@
#include <linux/random.h>
#include <linux/rtc.h>
#include <linux/slab.h>
+#include <linux/sprintf.h>
#include <linux/string.h>
#include <linux/bitmap.h>
@@ -22,14 +23,22 @@
#include <linux/gfp.h>
#include <linux/mm.h>
+#include <linux/property.h>
+
#include "../tools/testing/selftests/kselftest_module.h"
#define BUF_SIZE 256
#define PAD_SIZE 16
#define FILL_CHAR '$'
-static unsigned total_tests __initdata;
-static unsigned failed_tests __initdata;
+#define NOWARN(option, comment, block) \
+ __diag_push(); \
+ __diag_ignore_all(#option, comment); \
+ block \
+ __diag_pop();
+
+KSTM_MODULE_GLOBALS();
+
static char *test_buffer __initdata;
static char *alloced_buffer __initdata;
@@ -74,12 +83,17 @@ do_test(int bufsize, const char *expect, int elen,
return 1;
}
- if (memchr_inv(test_buffer + written + 1, FILL_CHAR, BUF_SIZE + PAD_SIZE - (written + 1))) {
+ if (memchr_inv(test_buffer + written + 1, FILL_CHAR, bufsize - (written + 1))) {
pr_warn("vsnprintf(buf, %d, \"%s\", ...) wrote beyond the nul-terminator\n",
bufsize, fmt);
return 1;
}
+ if (memchr_inv(test_buffer + bufsize, FILL_CHAR, BUF_SIZE + PAD_SIZE - bufsize)) {
+ pr_warn("vsnprintf(buf, %d, \"%s\", ...) wrote beyond buffer\n", bufsize, fmt);
+ return 1;
+ }
+
if (memcmp(test_buffer, expect, written)) {
pr_warn("vsnprintf(buf, %d, \"%s\", ...) wrote '%s', expected '%.*s'\n",
bufsize, fmt, test_buffer, written, expect);
@@ -111,7 +125,7 @@ __test(const char *expect, int elen, const char *fmt, ...)
* be able to print it as expected.
*/
failed_tests += do_test(BUF_SIZE, expect, elen, fmt, ap);
- rand = 1 + prandom_u32_max(elen+1);
+ rand = get_random_u32_inclusive(1, elen + 1);
/* Since elen < BUF_SIZE, we have 1 <= rand <= BUF_SIZE. */
failed_tests += do_test(rand, expect, elen, fmt, ap);
failed_tests += do_test(0, expect, elen, fmt, ap);
@@ -150,9 +164,11 @@ test_number(void)
test("0x1234abcd ", "%#-12x", 0x1234abcd);
test(" 0x1234abcd", "%#12x", 0x1234abcd);
test("0|001| 12|+123| 1234|-123|-1234", "%d|%03d|%3d|%+d|% d|%+d|% d", 0, 1, 12, 123, 1234, -123, -1234);
- test("0|1|1|128|255", "%hhu|%hhu|%hhu|%hhu|%hhu", 0, 1, 257, 128, -1);
- test("0|1|1|-128|-1", "%hhd|%hhd|%hhd|%hhd|%hhd", 0, 1, 257, 128, -1);
- test("2015122420151225", "%ho%ho%#ho", 1037, 5282, -11627);
+ NOWARN(-Wformat, "Intentionally test narrowing conversion specifiers.", {
+ test("0|1|1|128|255", "%hhu|%hhu|%hhu|%hhu|%hhu", 0, 1, 257, 128, -1);
+ test("0|1|1|-128|-1", "%hhd|%hhd|%hhd|%hhd|%hhd", 0, 1, 257, 128, -1);
+ test("2015122420151225", "%ho%ho%#ho", 1037, 5282, -11627);
+ })
/*
* POSIX/C99: »The result of converting zero with an explicit
* precision of zero shall be no characters.« Hence the output
@@ -162,18 +178,6 @@ test_number(void)
* behaviour.
*/
test("00|0|0|0|0", "%.2d|%.1d|%.0d|%.*d|%1.0d", 0, 0, 0, 0, 0, 0);
-#ifndef __CHAR_UNSIGNED__
- {
- /*
- * Passing a 'char' to a %02x specifier doesn't do
- * what was presumably the intention when char is
- * signed and the value is negative. One must either &
- * with 0xff or cast to u8.
- */
- char val = -16;
- test("0xfffffff0|0xf0|0xf0", "%#02x|%#02x|%#02x", val, val & 0xff, (u8)val);
- }
-#endif
}
static void __init
@@ -212,6 +216,7 @@ test_string(void)
#define PTR_STR "ffff0123456789ab"
#define PTR_VAL_NO_CRNG "(____ptrval____)"
#define ZEROS "00000000" /* hex 32 zero bits */
+#define ONES "ffffffff" /* hex 32 one bits */
static int __init
plain_format(void)
@@ -243,6 +248,7 @@ plain_format(void)
#define PTR_STR "456789ab"
#define PTR_VAL_NO_CRNG "(ptrval)"
#define ZEROS ""
+#define ONES ""
static int __init
plain_format(void)
@@ -297,6 +303,12 @@ plain(void)
{
int err;
+ if (no_hash_pointers) {
+ pr_warn("skipping plain 'p' tests");
+ skipped_tests += 2;
+ return;
+ }
+
err = plain_hash();
if (err) {
pr_warn("plain 'p' does not appear to be hashed\n");
@@ -328,14 +340,28 @@ test_hashed(const char *fmt, const void *p)
test(buf, fmt, p);
}
+/*
+ * NULL pointers aren't hashed.
+ */
static void __init
null_pointer(void)
{
- test_hashed("%p", NULL);
+ test(ZEROS "00000000", "%p", NULL);
test(ZEROS "00000000", "%px", NULL);
test("(null)", "%pE", NULL);
}
+/*
+ * Error pointers aren't hashed.
+ */
+static void __init
+error_pointer(void)
+{
+ test(ONES "fffffff5", "%p", ERR_PTR(-11));
+ test(ONES "fffffff5", "%px", ERR_PTR(-11));
+ test("(efault)", "%pE", ERR_PTR(-11));
+}
+
#define PTR_INVALID ((void *)0x000000ab)
static void __init
@@ -455,6 +481,11 @@ dentry(void)
test("foo", "%pd", &test_dentry[0]);
test("foo", "%pd2", &test_dentry[0]);
+ test("(null)", "%pd", NULL);
+ test("(efault)", "%pd", PTR_INVALID);
+ test("(null)", "%pD", NULL);
+ test("(efault)", "%pD", PTR_INVALID);
+
test("romeo", "%pd", &test_dentry[3]);
test("alfa/romeo", "%pd2", &test_dentry[3]);
test("bravo/alfa/romeo", "%pd3", &test_dentry[3]);
@@ -471,7 +502,7 @@ struct_va_format(void)
}
static void __init
-struct_rtc_time(void)
+time_and_date(void)
{
/* 1543210543 */
const struct rtc_time tm = {
@@ -482,14 +513,26 @@ struct_rtc_time(void)
.tm_mon = 10,
.tm_year = 118,
};
+ /* 2019-01-04T15:32:23 */
+ time64_t t = 1546615943;
- test("(%ptR?)", "%pt", &tm);
+ test("(%pt?)", "%pt", &tm);
test("2018-11-26T05:35:43", "%ptR", &tm);
test("0118-10-26T05:35:43", "%ptRr", &tm);
test("05:35:43|2018-11-26", "%ptRt|%ptRd", &tm, &tm);
test("05:35:43|0118-10-26", "%ptRtr|%ptRdr", &tm, &tm);
test("05:35:43|2018-11-26", "%ptRttr|%ptRdtr", &tm, &tm);
test("05:35:43 tr|2018-11-26 tr", "%ptRt tr|%ptRd tr", &tm, &tm);
+
+ test("2019-01-04T15:32:23", "%ptT", &t);
+ test("0119-00-04T15:32:23", "%ptTr", &t);
+ test("15:32:23|2019-01-04", "%ptTt|%ptTd", &t, &t);
+ test("15:32:23|0119-00-04", "%ptTtr|%ptTdr", &t, &t);
+
+ test("2019-01-04 15:32:23", "%ptTs", &t);
+ test("0119-00-04 15:32:23", "%ptTsr", &t);
+ test("15:32:23|2019-01-04", "%ptTts|%ptTds", &t, &t);
+ test("15:32:23|0119-00-04", "%ptTtrs|%ptTdrs", &t, &t);
}
static void __init
@@ -539,28 +582,104 @@ netdev_features(void)
{
}
+struct page_flags_test {
+ int width;
+ int shift;
+ int mask;
+ const char *fmt;
+ const char *name;
+};
+
+static const struct page_flags_test pft[] = {
+ {SECTIONS_WIDTH, SECTIONS_PGSHIFT, SECTIONS_MASK,
+ "%d", "section"},
+ {NODES_WIDTH, NODES_PGSHIFT, NODES_MASK,
+ "%d", "node"},
+ {ZONES_WIDTH, ZONES_PGSHIFT, ZONES_MASK,
+ "%d", "zone"},
+ {LAST_CPUPID_WIDTH, LAST_CPUPID_PGSHIFT, LAST_CPUPID_MASK,
+ "%#x", "lastcpupid"},
+ {KASAN_TAG_WIDTH, KASAN_TAG_PGSHIFT, KASAN_TAG_MASK,
+ "%#x", "kasantag"},
+};
+
+static void __init
+page_flags_test(int section, int node, int zone, int last_cpupid,
+ int kasan_tag, unsigned long flags, const char *name,
+ char *cmp_buf)
+{
+ unsigned long values[] = {section, node, zone, last_cpupid, kasan_tag};
+ unsigned long size;
+ bool append = false;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(values); i++)
+ flags |= (values[i] & pft[i].mask) << pft[i].shift;
+
+ size = scnprintf(cmp_buf, BUF_SIZE, "%#lx(", flags);
+ if (flags & PAGEFLAGS_MASK) {
+ size += scnprintf(cmp_buf + size, BUF_SIZE - size, "%s", name);
+ append = true;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(pft); i++) {
+ if (!pft[i].width)
+ continue;
+
+ if (append)
+ size += scnprintf(cmp_buf + size, BUF_SIZE - size, "|");
+
+ size += scnprintf(cmp_buf + size, BUF_SIZE - size, "%s=",
+ pft[i].name);
+ size += scnprintf(cmp_buf + size, BUF_SIZE - size, pft[i].fmt,
+ values[i] & pft[i].mask);
+ append = true;
+ }
+
+ snprintf(cmp_buf + size, BUF_SIZE - size, ")");
+
+ test(cmp_buf, "%pGp", &flags);
+}
+
+static void __init page_type_test(unsigned int page_type, const char *name,
+ char *cmp_buf)
+{
+ unsigned long size;
+
+ size = scnprintf(cmp_buf, BUF_SIZE, "%#x(", page_type);
+ if (page_type_has_type(page_type))
+ size += scnprintf(cmp_buf + size, BUF_SIZE - size, "%s", name);
+
+ snprintf(cmp_buf + size, BUF_SIZE - size, ")");
+ test(cmp_buf, "%pGt", &page_type);
+}
+
static void __init
flags(void)
{
unsigned long flags;
- gfp_t gfp;
char *cmp_buffer;
+ gfp_t gfp;
+ unsigned int page_type;
+
+ cmp_buffer = kmalloc(BUF_SIZE, GFP_KERNEL);
+ if (!cmp_buffer)
+ return;
flags = 0;
- test("", "%pGp", &flags);
+ page_flags_test(0, 0, 0, 0, 0, flags, "", cmp_buffer);
- /* Page flags should filter the zone id */
flags = 1UL << NR_PAGEFLAGS;
- test("", "%pGp", &flags);
+ page_flags_test(0, 0, 0, 0, 0, flags, "", cmp_buffer);
flags |= 1UL << PG_uptodate | 1UL << PG_dirty | 1UL << PG_lru
| 1UL << PG_active | 1UL << PG_swapbacked;
- test("uptodate|dirty|lru|active|swapbacked", "%pGp", &flags);
-
+ page_flags_test(1, 1, 1, 0x1fffff, 1, flags,
+ "uptodate|dirty|lru|active|swapbacked",
+ cmp_buffer);
- flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC
- | VM_DENYWRITE;
- test("read|exec|mayread|maywrite|mayexec|denywrite", "%pGv", &flags);
+ flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
+ test("read|exec|mayread|maywrite|mayexec", "%pGv", &flags);
gfp = GFP_TRANSHUGE;
test("GFP_TRANSHUGE", "%pGg", &gfp);
@@ -568,31 +687,104 @@ flags(void)
gfp = GFP_ATOMIC|__GFP_DMA;
test("GFP_ATOMIC|GFP_DMA", "%pGg", &gfp);
- gfp = __GFP_ATOMIC;
- test("__GFP_ATOMIC", "%pGg", &gfp);
-
- cmp_buffer = kmalloc(BUF_SIZE, GFP_KERNEL);
- if (!cmp_buffer)
- return;
+ gfp = __GFP_HIGH;
+ test("__GFP_HIGH", "%pGg", &gfp);
/* Any flags not translated by the table should remain numeric */
gfp = ~__GFP_BITS_MASK;
snprintf(cmp_buffer, BUF_SIZE, "%#lx", (unsigned long) gfp);
test(cmp_buffer, "%pGg", &gfp);
- snprintf(cmp_buffer, BUF_SIZE, "__GFP_ATOMIC|%#lx",
+ snprintf(cmp_buffer, BUF_SIZE, "__GFP_HIGH|%#lx",
(unsigned long) gfp);
- gfp |= __GFP_ATOMIC;
+ gfp |= __GFP_HIGH;
test(cmp_buffer, "%pGg", &gfp);
+ page_type = ~0;
+ page_type_test(page_type, "", cmp_buffer);
+
+ page_type = 10;
+ page_type_test(page_type, "", cmp_buffer);
+
+ page_type = ~PG_buddy;
+ page_type_test(page_type, "buddy", cmp_buffer);
+
+ page_type = ~(PG_table | PG_buddy);
+ page_type_test(page_type, "table|buddy", cmp_buffer);
+
kfree(cmp_buffer);
}
+static void __init fwnode_pointer(void)
+{
+ const struct software_node first = { .name = "first" };
+ const struct software_node second = { .name = "second", .parent = &first };
+ const struct software_node third = { .name = "third", .parent = &second };
+ const struct software_node *group[] = { &first, &second, &third, NULL };
+ const char * const full_name_second = "first/second";
+ const char * const full_name_third = "first/second/third";
+ const char * const second_name = "second";
+ const char * const third_name = "third";
+ int rval;
+
+ rval = software_node_register_node_group(group);
+ if (rval) {
+ pr_warn("cannot register softnodes; rval %d\n", rval);
+ return;
+ }
+
+ test(full_name_second, "%pfw", software_node_fwnode(&second));
+ test(full_name_third, "%pfw", software_node_fwnode(&third));
+ test(full_name_third, "%pfwf", software_node_fwnode(&third));
+ test(second_name, "%pfwP", software_node_fwnode(&second));
+ test(third_name, "%pfwP", software_node_fwnode(&third));
+
+ software_node_unregister_node_group(group);
+}
+
+static void __init fourcc_pointer(void)
+{
+ struct {
+ u32 code;
+ char *str;
+ } const try[] = {
+ { 0x3231564e, "NV12 little-endian (0x3231564e)", },
+ { 0xb231564e, "NV12 big-endian (0xb231564e)", },
+ { 0x10111213, ".... little-endian (0x10111213)", },
+ { 0x20303159, "Y10 little-endian (0x20303159)", },
+ };
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(try); i++)
+ test(try[i].str, "%p4cc", &try[i].code);
+}
+
+static void __init
+errptr(void)
+{
+ test("-1234", "%pe", ERR_PTR(-1234));
+
+ /* Check that %pe with a non-ERR_PTR gets treated as ordinary %p. */
+ BUILD_BUG_ON(IS_ERR(PTR));
+ test_hashed("%pe", PTR);
+
+#ifdef CONFIG_SYMBOLIC_ERRNAME
+ test("(-ENOTSOCK)", "(%pe)", ERR_PTR(-ENOTSOCK));
+ test("(-EAGAIN)", "(%pe)", ERR_PTR(-EAGAIN));
+ BUILD_BUG_ON(EAGAIN != EWOULDBLOCK);
+ test("(-EAGAIN)", "(%pe)", ERR_PTR(-EWOULDBLOCK));
+ test("[-EIO ]", "[%-8pe]", ERR_PTR(-EIO));
+ test("[ -EIO]", "[%8pe]", ERR_PTR(-EIO));
+ test("-EPROBE_DEFER", "%pe", ERR_PTR(-EPROBE_DEFER));
+#endif
+}
+
static void __init
test_pointer(void)
{
plain();
null_pointer();
+ error_pointer();
invalid_pointer();
symbol_ptr();
kernel_ptr();
@@ -605,11 +797,14 @@ test_pointer(void)
uuid();
dentry();
struct_va_format();
- struct_rtc_time();
+ time_and_date();
struct_clk();
bitmap();
netdev_features();
flags();
+ errptr();
+ fwnode_pointer();
+ fourcc_pointer();
}
static void __init selftest(void)
diff --git a/lib/test_ref_tracker.c b/lib/test_ref_tracker.c
new file mode 100644
index 000000000000..49970a7c96f3
--- /dev/null
+++ b/lib/test_ref_tracker.c
@@ -0,0 +1,115 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Referrence tracker self test.
+ *
+ * Copyright (c) 2021 Eric Dumazet <edumazet@google.com>
+ */
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/ref_tracker.h>
+#include <linux/slab.h>
+#include <linux/timer.h>
+
+static struct ref_tracker_dir ref_dir;
+static struct ref_tracker *tracker[20];
+
+#define TRT_ALLOC(X) static noinline void \
+ alloctest_ref_tracker_alloc##X(struct ref_tracker_dir *dir, \
+ struct ref_tracker **trackerp) \
+ { \
+ ref_tracker_alloc(dir, trackerp, GFP_KERNEL); \
+ }
+
+TRT_ALLOC(1)
+TRT_ALLOC(2)
+TRT_ALLOC(3)
+TRT_ALLOC(4)
+TRT_ALLOC(5)
+TRT_ALLOC(6)
+TRT_ALLOC(7)
+TRT_ALLOC(8)
+TRT_ALLOC(9)
+TRT_ALLOC(10)
+TRT_ALLOC(11)
+TRT_ALLOC(12)
+TRT_ALLOC(13)
+TRT_ALLOC(14)
+TRT_ALLOC(15)
+TRT_ALLOC(16)
+TRT_ALLOC(17)
+TRT_ALLOC(18)
+TRT_ALLOC(19)
+
+#undef TRT_ALLOC
+
+static noinline void
+alloctest_ref_tracker_free(struct ref_tracker_dir *dir,
+ struct ref_tracker **trackerp)
+{
+ ref_tracker_free(dir, trackerp);
+}
+
+
+static struct timer_list test_ref_tracker_timer;
+static atomic_t test_ref_timer_done = ATOMIC_INIT(0);
+
+static void test_ref_tracker_timer_func(struct timer_list *t)
+{
+ ref_tracker_alloc(&ref_dir, &tracker[0], GFP_ATOMIC);
+ atomic_set(&test_ref_timer_done, 1);
+}
+
+static int __init test_ref_tracker_init(void)
+{
+ int i;
+
+ ref_tracker_dir_init(&ref_dir, 100, "selftest");
+
+ timer_setup(&test_ref_tracker_timer, test_ref_tracker_timer_func, 0);
+ mod_timer(&test_ref_tracker_timer, jiffies + 1);
+
+ alloctest_ref_tracker_alloc1(&ref_dir, &tracker[1]);
+ alloctest_ref_tracker_alloc2(&ref_dir, &tracker[2]);
+ alloctest_ref_tracker_alloc3(&ref_dir, &tracker[3]);
+ alloctest_ref_tracker_alloc4(&ref_dir, &tracker[4]);
+ alloctest_ref_tracker_alloc5(&ref_dir, &tracker[5]);
+ alloctest_ref_tracker_alloc6(&ref_dir, &tracker[6]);
+ alloctest_ref_tracker_alloc7(&ref_dir, &tracker[7]);
+ alloctest_ref_tracker_alloc8(&ref_dir, &tracker[8]);
+ alloctest_ref_tracker_alloc9(&ref_dir, &tracker[9]);
+ alloctest_ref_tracker_alloc10(&ref_dir, &tracker[10]);
+ alloctest_ref_tracker_alloc11(&ref_dir, &tracker[11]);
+ alloctest_ref_tracker_alloc12(&ref_dir, &tracker[12]);
+ alloctest_ref_tracker_alloc13(&ref_dir, &tracker[13]);
+ alloctest_ref_tracker_alloc14(&ref_dir, &tracker[14]);
+ alloctest_ref_tracker_alloc15(&ref_dir, &tracker[15]);
+ alloctest_ref_tracker_alloc16(&ref_dir, &tracker[16]);
+ alloctest_ref_tracker_alloc17(&ref_dir, &tracker[17]);
+ alloctest_ref_tracker_alloc18(&ref_dir, &tracker[18]);
+ alloctest_ref_tracker_alloc19(&ref_dir, &tracker[19]);
+
+ /* free all trackers but first 0 and 1. */
+ for (i = 2; i < ARRAY_SIZE(tracker); i++)
+ alloctest_ref_tracker_free(&ref_dir, &tracker[i]);
+
+ /* Attempt to free an already freed tracker. */
+ alloctest_ref_tracker_free(&ref_dir, &tracker[2]);
+
+ while (!atomic_read(&test_ref_timer_done))
+ msleep(1);
+
+ /* This should warn about tracker[0] & tracker[1] being not freed. */
+ ref_tracker_dir_exit(&ref_dir);
+
+ return 0;
+}
+
+static void __exit test_ref_tracker_exit(void)
+{
+}
+
+module_init(test_ref_tracker_init);
+module_exit(test_ref_tracker_exit);
+
+MODULE_LICENSE("GPL v2");
diff --git a/lib/test_rhashtable.c b/lib/test_rhashtable.c
index c5a6fef7b45d..42b585208249 100644
--- a/lib/test_rhashtable.c
+++ b/lib/test_rhashtable.c
@@ -16,6 +16,7 @@
#include <linux/kthread.h>
#include <linux/module.h>
#include <linux/rcupdate.h>
+#include <linux/rcupdate_wait.h>
#include <linux/rhashtable.h>
#include <linux/slab.h>
#include <linux/sched.h>
@@ -291,7 +292,7 @@ static int __init test_rhltable(unsigned int entries)
if (WARN_ON(err))
goto out_free;
- k = prandom_u32();
+ k = get_random_u32();
ret = 0;
for (i = 0; i < entries; i++) {
rhl_test_objects[i].value.id = k;
@@ -368,19 +369,11 @@ static int __init test_rhltable(unsigned int entries)
pr_info("test %d random rhlist add/delete operations\n", entries);
for (j = 0; j < entries; j++) {
- u32 i = prandom_u32_max(entries);
- u32 prand = prandom_u32();
+ u32 i = get_random_u32_below(entries);
+ u32 prand = get_random_u32_below(4);
cond_resched();
- if (prand == 0)
- prand = prandom_u32();
-
- if (prand & 1) {
- prand >>= 1;
- continue;
- }
-
err = rhltable_remove(&rhlt, &rhl_test_objects[i].list_node, test_rht_params);
if (test_bit(i, obj_in_table)) {
clear_bit(i, obj_in_table);
@@ -393,35 +386,29 @@ static int __init test_rhltable(unsigned int entries)
}
if (prand & 1) {
- prand >>= 1;
- continue;
- }
-
- err = rhltable_insert(&rhlt, &rhl_test_objects[i].list_node, test_rht_params);
- if (err == 0) {
- if (WARN(test_and_set_bit(i, obj_in_table), "succeeded to insert same object %d", i))
- continue;
- } else {
- if (WARN(!test_bit(i, obj_in_table), "failed to insert object %d", i))
- continue;
- }
-
- if (prand & 1) {
- prand >>= 1;
- continue;
+ err = rhltable_insert(&rhlt, &rhl_test_objects[i].list_node, test_rht_params);
+ if (err == 0) {
+ if (WARN(test_and_set_bit(i, obj_in_table), "succeeded to insert same object %d", i))
+ continue;
+ } else {
+ if (WARN(!test_bit(i, obj_in_table), "failed to insert object %d", i))
+ continue;
+ }
}
- i = prandom_u32_max(entries);
- if (test_bit(i, obj_in_table)) {
- err = rhltable_remove(&rhlt, &rhl_test_objects[i].list_node, test_rht_params);
- WARN(err, "cannot remove element at slot %d", i);
- if (err == 0)
- clear_bit(i, obj_in_table);
- } else {
- err = rhltable_insert(&rhlt, &rhl_test_objects[i].list_node, test_rht_params);
- WARN(err, "failed to insert object %d", i);
- if (err == 0)
- set_bit(i, obj_in_table);
+ if (prand & 2) {
+ i = get_random_u32_below(entries);
+ if (test_bit(i, obj_in_table)) {
+ err = rhltable_remove(&rhlt, &rhl_test_objects[i].list_node, test_rht_params);
+ WARN(err, "cannot remove element at slot %d", i);
+ if (err == 0)
+ clear_bit(i, obj_in_table);
+ } else {
+ err = rhltable_insert(&rhlt, &rhl_test_objects[i].list_node, test_rht_params);
+ WARN(err, "failed to insert object %d", i);
+ if (err == 0)
+ set_bit(i, obj_in_table);
+ }
}
}
@@ -434,7 +421,7 @@ static int __init test_rhltable(unsigned int entries)
} else {
if (WARN(err != -ENOENT, "removed non-existent element, error %d not %d",
err, -ENOENT))
- continue;
+ continue;
}
}
@@ -448,7 +435,7 @@ out_free:
static int __init test_rhashtable_max(struct test_obj *array,
unsigned int entries)
{
- unsigned int i, insert_retries = 0;
+ unsigned int i;
int err;
test_rht_params.max_size = roundup_pow_of_two(entries / 8);
@@ -461,9 +448,7 @@ static int __init test_rhashtable_max(struct test_obj *array,
obj->value.id = i * 2;
err = insert_retry(&ht, obj, test_rht_params);
- if (err > 0)
- insert_retries += err;
- else if (err)
+ if (err < 0)
return err;
}
@@ -487,6 +472,7 @@ static unsigned int __init print_ht(struct rhltable *rhlt)
struct rhashtable *ht;
const struct bucket_table *tbl;
char buff[512] = "";
+ int offset = 0;
unsigned int i, cnt = 0;
ht = &rhlt->ht;
@@ -501,18 +487,18 @@ static unsigned int __init print_ht(struct rhltable *rhlt)
next = !rht_is_a_nulls(pos) ? rht_dereference(pos->next, ht) : NULL;
if (!rht_is_a_nulls(pos)) {
- sprintf(buff, "%s\nbucket[%d] -> ", buff, i);
+ offset += sprintf(buff + offset, "\nbucket[%d] -> ", i);
}
while (!rht_is_a_nulls(pos)) {
struct rhlist_head *list = container_of(pos, struct rhlist_head, rhead);
- sprintf(buff, "%s[[", buff);
+ offset += sprintf(buff + offset, "[[");
do {
pos = &list->rhead;
list = rht_dereference(list->next, ht);
p = rht_obj(ht, pos);
- sprintf(buff, "%s val %d (tid=%d)%s", buff, p->value.id, p->value.tid,
+ offset += sprintf(buff + offset, " val %d (tid=%d)%s", p->value.id, p->value.tid,
list? ", " : " ");
cnt++;
} while (list);
@@ -521,7 +507,7 @@ static unsigned int __init print_ht(struct rhltable *rhlt)
next = !rht_is_a_nulls(pos) ?
rht_dereference(pos->next, ht) : NULL;
- sprintf(buff, "%s]]%s", buff, !rht_is_a_nulls(pos) ? " -> " : "");
+ offset += sprintf(buff + offset, "]]%s", !rht_is_a_nulls(pos) ? " -> " : "");
}
}
printk(KERN_ERR "\n---- ht: ----%s\n-------------\n", buff);
diff --git a/lib/test_scanf.c b/lib/test_scanf.c
new file mode 100644
index 000000000000..a2707af2951a
--- /dev/null
+++ b/lib/test_scanf.c
@@ -0,0 +1,813 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Test cases for sscanf facility.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/bitops.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/overflow.h>
+#include <linux/printk.h>
+#include <linux/random.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+
+#include "../tools/testing/selftests/kselftest_module.h"
+
+#define BUF_SIZE 1024
+
+KSTM_MODULE_GLOBALS();
+static char *test_buffer __initdata;
+static char *fmt_buffer __initdata;
+static struct rnd_state rnd_state __initdata;
+
+typedef int (*check_fn)(const void *check_data, const char *string,
+ const char *fmt, int n_args, va_list ap);
+
+static void __scanf(4, 6) __init
+_test(check_fn fn, const void *check_data, const char *string, const char *fmt,
+ int n_args, ...)
+{
+ va_list ap, ap_copy;
+ int ret;
+
+ total_tests++;
+
+ va_start(ap, n_args);
+ va_copy(ap_copy, ap);
+ ret = vsscanf(string, fmt, ap_copy);
+ va_end(ap_copy);
+
+ if (ret != n_args) {
+ pr_warn("vsscanf(\"%s\", \"%s\", ...) returned %d expected %d\n",
+ string, fmt, ret, n_args);
+ goto fail;
+ }
+
+ ret = (*fn)(check_data, string, fmt, n_args, ap);
+ if (ret)
+ goto fail;
+
+ va_end(ap);
+
+ return;
+
+fail:
+ failed_tests++;
+ va_end(ap);
+}
+
+#define _check_numbers_template(arg_fmt, expect, str, fmt, n_args, ap) \
+do { \
+ pr_debug("\"%s\", \"%s\" ->\n", str, fmt); \
+ for (; n_args > 0; n_args--, expect++) { \
+ typeof(*expect) got = *va_arg(ap, typeof(expect)); \
+ pr_debug("\t" arg_fmt "\n", got); \
+ if (got != *expect) { \
+ pr_warn("vsscanf(\"%s\", \"%s\", ...) expected " arg_fmt " got " arg_fmt "\n", \
+ str, fmt, *expect, got); \
+ return 1; \
+ } \
+ } \
+ return 0; \
+} while (0)
+
+static int __init check_ull(const void *check_data, const char *string,
+ const char *fmt, int n_args, va_list ap)
+{
+ const unsigned long long *pval = check_data;
+
+ _check_numbers_template("%llu", pval, string, fmt, n_args, ap);
+}
+
+static int __init check_ll(const void *check_data, const char *string,
+ const char *fmt, int n_args, va_list ap)
+{
+ const long long *pval = check_data;
+
+ _check_numbers_template("%lld", pval, string, fmt, n_args, ap);
+}
+
+static int __init check_ulong(const void *check_data, const char *string,
+ const char *fmt, int n_args, va_list ap)
+{
+ const unsigned long *pval = check_data;
+
+ _check_numbers_template("%lu", pval, string, fmt, n_args, ap);
+}
+
+static int __init check_long(const void *check_data, const char *string,
+ const char *fmt, int n_args, va_list ap)
+{
+ const long *pval = check_data;
+
+ _check_numbers_template("%ld", pval, string, fmt, n_args, ap);
+}
+
+static int __init check_uint(const void *check_data, const char *string,
+ const char *fmt, int n_args, va_list ap)
+{
+ const unsigned int *pval = check_data;
+
+ _check_numbers_template("%u", pval, string, fmt, n_args, ap);
+}
+
+static int __init check_int(const void *check_data, const char *string,
+ const char *fmt, int n_args, va_list ap)
+{
+ const int *pval = check_data;
+
+ _check_numbers_template("%d", pval, string, fmt, n_args, ap);
+}
+
+static int __init check_ushort(const void *check_data, const char *string,
+ const char *fmt, int n_args, va_list ap)
+{
+ const unsigned short *pval = check_data;
+
+ _check_numbers_template("%hu", pval, string, fmt, n_args, ap);
+}
+
+static int __init check_short(const void *check_data, const char *string,
+ const char *fmt, int n_args, va_list ap)
+{
+ const short *pval = check_data;
+
+ _check_numbers_template("%hd", pval, string, fmt, n_args, ap);
+}
+
+static int __init check_uchar(const void *check_data, const char *string,
+ const char *fmt, int n_args, va_list ap)
+{
+ const unsigned char *pval = check_data;
+
+ _check_numbers_template("%hhu", pval, string, fmt, n_args, ap);
+}
+
+static int __init check_char(const void *check_data, const char *string,
+ const char *fmt, int n_args, va_list ap)
+{
+ const signed char *pval = check_data;
+
+ _check_numbers_template("%hhd", pval, string, fmt, n_args, ap);
+}
+
+/* Selection of interesting numbers to test, copied from test-kstrtox.c */
+static const unsigned long long numbers[] __initconst = {
+ 0x0ULL,
+ 0x1ULL,
+ 0x7fULL,
+ 0x80ULL,
+ 0x81ULL,
+ 0xffULL,
+ 0x100ULL,
+ 0x101ULL,
+ 0x7fffULL,
+ 0x8000ULL,
+ 0x8001ULL,
+ 0xffffULL,
+ 0x10000ULL,
+ 0x10001ULL,
+ 0x7fffffffULL,
+ 0x80000000ULL,
+ 0x80000001ULL,
+ 0xffffffffULL,
+ 0x100000000ULL,
+ 0x100000001ULL,
+ 0x7fffffffffffffffULL,
+ 0x8000000000000000ULL,
+ 0x8000000000000001ULL,
+ 0xfffffffffffffffeULL,
+ 0xffffffffffffffffULL,
+};
+
+#define value_representable_in_type(T, val) \
+(is_signed_type(T) \
+ ? ((long long)(val) >= type_min(T)) && ((long long)(val) <= type_max(T)) \
+ : ((unsigned long long)(val) <= type_max(T)))
+
+
+#define test_one_number(T, gen_fmt, scan_fmt, val, fn) \
+do { \
+ const T expect_val = (T)(val); \
+ T result = ~expect_val; /* should be overwritten */ \
+ \
+ snprintf(test_buffer, BUF_SIZE, gen_fmt, expect_val); \
+ _test(fn, &expect_val, test_buffer, "%" scan_fmt, 1, &result); \
+} while (0)
+
+#define simple_numbers_loop(T, gen_fmt, scan_fmt, fn) \
+do { \
+ int i; \
+ \
+ for (i = 0; i < ARRAY_SIZE(numbers); i++) { \
+ if (value_representable_in_type(T, numbers[i])) \
+ test_one_number(T, gen_fmt, scan_fmt, \
+ numbers[i], fn); \
+ \
+ if (value_representable_in_type(T, -numbers[i])) \
+ test_one_number(T, gen_fmt, scan_fmt, \
+ -numbers[i], fn); \
+ } \
+} while (0)
+
+static void __init numbers_simple(void)
+{
+ simple_numbers_loop(unsigned long long, "%llu", "llu", check_ull);
+ simple_numbers_loop(long long, "%lld", "lld", check_ll);
+ simple_numbers_loop(long long, "%lld", "lli", check_ll);
+ simple_numbers_loop(unsigned long long, "%llx", "llx", check_ull);
+ simple_numbers_loop(long long, "%llx", "llx", check_ll);
+ simple_numbers_loop(long long, "0x%llx", "lli", check_ll);
+ simple_numbers_loop(unsigned long long, "0x%llx", "llx", check_ull);
+ simple_numbers_loop(long long, "0x%llx", "llx", check_ll);
+
+ simple_numbers_loop(unsigned long, "%lu", "lu", check_ulong);
+ simple_numbers_loop(long, "%ld", "ld", check_long);
+ simple_numbers_loop(long, "%ld", "li", check_long);
+ simple_numbers_loop(unsigned long, "%lx", "lx", check_ulong);
+ simple_numbers_loop(long, "%lx", "lx", check_long);
+ simple_numbers_loop(long, "0x%lx", "li", check_long);
+ simple_numbers_loop(unsigned long, "0x%lx", "lx", check_ulong);
+ simple_numbers_loop(long, "0x%lx", "lx", check_long);
+
+ simple_numbers_loop(unsigned int, "%u", "u", check_uint);
+ simple_numbers_loop(int, "%d", "d", check_int);
+ simple_numbers_loop(int, "%d", "i", check_int);
+ simple_numbers_loop(unsigned int, "%x", "x", check_uint);
+ simple_numbers_loop(int, "%x", "x", check_int);
+ simple_numbers_loop(int, "0x%x", "i", check_int);
+ simple_numbers_loop(unsigned int, "0x%x", "x", check_uint);
+ simple_numbers_loop(int, "0x%x", "x", check_int);
+
+ simple_numbers_loop(unsigned short, "%hu", "hu", check_ushort);
+ simple_numbers_loop(short, "%hd", "hd", check_short);
+ simple_numbers_loop(short, "%hd", "hi", check_short);
+ simple_numbers_loop(unsigned short, "%hx", "hx", check_ushort);
+ simple_numbers_loop(short, "%hx", "hx", check_short);
+ simple_numbers_loop(short, "0x%hx", "hi", check_short);
+ simple_numbers_loop(unsigned short, "0x%hx", "hx", check_ushort);
+ simple_numbers_loop(short, "0x%hx", "hx", check_short);
+
+ simple_numbers_loop(unsigned char, "%hhu", "hhu", check_uchar);
+ simple_numbers_loop(signed char, "%hhd", "hhd", check_char);
+ simple_numbers_loop(signed char, "%hhd", "hhi", check_char);
+ simple_numbers_loop(unsigned char, "%hhx", "hhx", check_uchar);
+ simple_numbers_loop(signed char, "%hhx", "hhx", check_char);
+ simple_numbers_loop(signed char, "0x%hhx", "hhi", check_char);
+ simple_numbers_loop(unsigned char, "0x%hhx", "hhx", check_uchar);
+ simple_numbers_loop(signed char, "0x%hhx", "hhx", check_char);
+}
+
+/*
+ * This gives a better variety of number "lengths" in a small sample than
+ * the raw prandom*() functions (Not mathematically rigorous!!).
+ * Variabilty of length and value is more important than perfect randomness.
+ */
+static u32 __init next_test_random(u32 max_bits)
+{
+ u32 n_bits = hweight32(prandom_u32_state(&rnd_state)) % (max_bits + 1);
+
+ return prandom_u32_state(&rnd_state) & GENMASK(n_bits, 0);
+}
+
+static unsigned long long __init next_test_random_ull(void)
+{
+ u32 rand1 = prandom_u32_state(&rnd_state);
+ u32 n_bits = (hweight32(rand1) * 3) % 64;
+ u64 val = (u64)prandom_u32_state(&rnd_state) * rand1;
+
+ return val & GENMASK_ULL(n_bits, 0);
+}
+
+#define random_for_type(T) \
+ ((T)(sizeof(T) <= sizeof(u32) \
+ ? next_test_random(BITS_PER_TYPE(T)) \
+ : next_test_random_ull()))
+
+/*
+ * Define a pattern of negative and positive numbers to ensure we get
+ * some of both within the small number of samples in a test string.
+ */
+#define NEGATIVES_PATTERN 0x3246 /* 00110010 01000110 */
+
+#define fill_random_array(arr) \
+do { \
+ unsigned int neg_pattern = NEGATIVES_PATTERN; \
+ int i; \
+ \
+ for (i = 0; i < ARRAY_SIZE(arr); i++, neg_pattern >>= 1) { \
+ (arr)[i] = random_for_type(typeof((arr)[0])); \
+ if (is_signed_type(typeof((arr)[0])) && (neg_pattern & 1)) \
+ (arr)[i] = -(arr)[i]; \
+ } \
+} while (0)
+
+/*
+ * Convenience wrapper around snprintf() to append at buf_pos in buf,
+ * updating buf_pos and returning the number of characters appended.
+ * On error buf_pos is not changed and return value is 0.
+ */
+static int __init __printf(4, 5)
+append_fmt(char *buf, int *buf_pos, int buf_len, const char *val_fmt, ...)
+{
+ va_list ap;
+ int field_len;
+
+ va_start(ap, val_fmt);
+ field_len = vsnprintf(buf + *buf_pos, buf_len - *buf_pos, val_fmt, ap);
+ va_end(ap);
+
+ if (field_len < 0)
+ field_len = 0;
+
+ *buf_pos += field_len;
+
+ return field_len;
+}
+
+/*
+ * Convenience function to append the field delimiter string
+ * to both the value string and format string buffers.
+ */
+static void __init append_delim(char *str_buf, int *str_buf_pos, int str_buf_len,
+ char *fmt_buf, int *fmt_buf_pos, int fmt_buf_len,
+ const char *delim_str)
+{
+ append_fmt(str_buf, str_buf_pos, str_buf_len, delim_str);
+ append_fmt(fmt_buf, fmt_buf_pos, fmt_buf_len, delim_str);
+}
+
+#define test_array_8(fn, check_data, string, fmt, arr) \
+do { \
+ BUILD_BUG_ON(ARRAY_SIZE(arr) != 8); \
+ _test(fn, check_data, string, fmt, 8, \
+ &(arr)[0], &(arr)[1], &(arr)[2], &(arr)[3], \
+ &(arr)[4], &(arr)[5], &(arr)[6], &(arr)[7]); \
+} while (0)
+
+#define numbers_list_8(T, gen_fmt, field_sep, scan_fmt, fn) \
+do { \
+ int i, pos = 0, fmt_pos = 0; \
+ T expect[8], result[8]; \
+ \
+ fill_random_array(expect); \
+ \
+ for (i = 0; i < ARRAY_SIZE(expect); i++) { \
+ if (i != 0) \
+ append_delim(test_buffer, &pos, BUF_SIZE, \
+ fmt_buffer, &fmt_pos, BUF_SIZE, \
+ field_sep); \
+ \
+ append_fmt(test_buffer, &pos, BUF_SIZE, gen_fmt, expect[i]); \
+ append_fmt(fmt_buffer, &fmt_pos, BUF_SIZE, "%%%s", scan_fmt); \
+ } \
+ \
+ test_array_8(fn, expect, test_buffer, fmt_buffer, result); \
+} while (0)
+
+#define numbers_list_fix_width(T, gen_fmt, field_sep, width, scan_fmt, fn) \
+do { \
+ char full_fmt[16]; \
+ \
+ snprintf(full_fmt, sizeof(full_fmt), "%u%s", width, scan_fmt); \
+ numbers_list_8(T, gen_fmt, field_sep, full_fmt, fn); \
+} while (0)
+
+#define numbers_list_val_width(T, gen_fmt, field_sep, scan_fmt, fn) \
+do { \
+ int i, val_len, pos = 0, fmt_pos = 0; \
+ T expect[8], result[8]; \
+ \
+ fill_random_array(expect); \
+ \
+ for (i = 0; i < ARRAY_SIZE(expect); i++) { \
+ if (i != 0) \
+ append_delim(test_buffer, &pos, BUF_SIZE, \
+ fmt_buffer, &fmt_pos, BUF_SIZE, field_sep);\
+ \
+ val_len = append_fmt(test_buffer, &pos, BUF_SIZE, gen_fmt, \
+ expect[i]); \
+ append_fmt(fmt_buffer, &fmt_pos, BUF_SIZE, \
+ "%%%u%s", val_len, scan_fmt); \
+ } \
+ \
+ test_array_8(fn, expect, test_buffer, fmt_buffer, result); \
+} while (0)
+
+static void __init numbers_list_ll(const char *delim)
+{
+ numbers_list_8(unsigned long long, "%llu", delim, "llu", check_ull);
+ numbers_list_8(long long, "%lld", delim, "lld", check_ll);
+ numbers_list_8(long long, "%lld", delim, "lli", check_ll);
+ numbers_list_8(unsigned long long, "%llx", delim, "llx", check_ull);
+ numbers_list_8(unsigned long long, "0x%llx", delim, "llx", check_ull);
+ numbers_list_8(long long, "0x%llx", delim, "lli", check_ll);
+}
+
+static void __init numbers_list_l(const char *delim)
+{
+ numbers_list_8(unsigned long, "%lu", delim, "lu", check_ulong);
+ numbers_list_8(long, "%ld", delim, "ld", check_long);
+ numbers_list_8(long, "%ld", delim, "li", check_long);
+ numbers_list_8(unsigned long, "%lx", delim, "lx", check_ulong);
+ numbers_list_8(unsigned long, "0x%lx", delim, "lx", check_ulong);
+ numbers_list_8(long, "0x%lx", delim, "li", check_long);
+}
+
+static void __init numbers_list_d(const char *delim)
+{
+ numbers_list_8(unsigned int, "%u", delim, "u", check_uint);
+ numbers_list_8(int, "%d", delim, "d", check_int);
+ numbers_list_8(int, "%d", delim, "i", check_int);
+ numbers_list_8(unsigned int, "%x", delim, "x", check_uint);
+ numbers_list_8(unsigned int, "0x%x", delim, "x", check_uint);
+ numbers_list_8(int, "0x%x", delim, "i", check_int);
+}
+
+static void __init numbers_list_h(const char *delim)
+{
+ numbers_list_8(unsigned short, "%hu", delim, "hu", check_ushort);
+ numbers_list_8(short, "%hd", delim, "hd", check_short);
+ numbers_list_8(short, "%hd", delim, "hi", check_short);
+ numbers_list_8(unsigned short, "%hx", delim, "hx", check_ushort);
+ numbers_list_8(unsigned short, "0x%hx", delim, "hx", check_ushort);
+ numbers_list_8(short, "0x%hx", delim, "hi", check_short);
+}
+
+static void __init numbers_list_hh(const char *delim)
+{
+ numbers_list_8(unsigned char, "%hhu", delim, "hhu", check_uchar);
+ numbers_list_8(signed char, "%hhd", delim, "hhd", check_char);
+ numbers_list_8(signed char, "%hhd", delim, "hhi", check_char);
+ numbers_list_8(unsigned char, "%hhx", delim, "hhx", check_uchar);
+ numbers_list_8(unsigned char, "0x%hhx", delim, "hhx", check_uchar);
+ numbers_list_8(signed char, "0x%hhx", delim, "hhi", check_char);
+}
+
+static void __init numbers_list(const char *delim)
+{
+ numbers_list_ll(delim);
+ numbers_list_l(delim);
+ numbers_list_d(delim);
+ numbers_list_h(delim);
+ numbers_list_hh(delim);
+}
+
+static void __init numbers_list_field_width_ll(const char *delim)
+{
+ numbers_list_fix_width(unsigned long long, "%llu", delim, 20, "llu", check_ull);
+ numbers_list_fix_width(long long, "%lld", delim, 20, "lld", check_ll);
+ numbers_list_fix_width(long long, "%lld", delim, 20, "lli", check_ll);
+ numbers_list_fix_width(unsigned long long, "%llx", delim, 16, "llx", check_ull);
+ numbers_list_fix_width(unsigned long long, "0x%llx", delim, 18, "llx", check_ull);
+ numbers_list_fix_width(long long, "0x%llx", delim, 18, "lli", check_ll);
+}
+
+static void __init numbers_list_field_width_l(const char *delim)
+{
+#if BITS_PER_LONG == 64
+ numbers_list_fix_width(unsigned long, "%lu", delim, 20, "lu", check_ulong);
+ numbers_list_fix_width(long, "%ld", delim, 20, "ld", check_long);
+ numbers_list_fix_width(long, "%ld", delim, 20, "li", check_long);
+ numbers_list_fix_width(unsigned long, "%lx", delim, 16, "lx", check_ulong);
+ numbers_list_fix_width(unsigned long, "0x%lx", delim, 18, "lx", check_ulong);
+ numbers_list_fix_width(long, "0x%lx", delim, 18, "li", check_long);
+#else
+ numbers_list_fix_width(unsigned long, "%lu", delim, 10, "lu", check_ulong);
+ numbers_list_fix_width(long, "%ld", delim, 11, "ld", check_long);
+ numbers_list_fix_width(long, "%ld", delim, 11, "li", check_long);
+ numbers_list_fix_width(unsigned long, "%lx", delim, 8, "lx", check_ulong);
+ numbers_list_fix_width(unsigned long, "0x%lx", delim, 10, "lx", check_ulong);
+ numbers_list_fix_width(long, "0x%lx", delim, 10, "li", check_long);
+#endif
+}
+
+static void __init numbers_list_field_width_d(const char *delim)
+{
+ numbers_list_fix_width(unsigned int, "%u", delim, 10, "u", check_uint);
+ numbers_list_fix_width(int, "%d", delim, 11, "d", check_int);
+ numbers_list_fix_width(int, "%d", delim, 11, "i", check_int);
+ numbers_list_fix_width(unsigned int, "%x", delim, 8, "x", check_uint);
+ numbers_list_fix_width(unsigned int, "0x%x", delim, 10, "x", check_uint);
+ numbers_list_fix_width(int, "0x%x", delim, 10, "i", check_int);
+}
+
+static void __init numbers_list_field_width_h(const char *delim)
+{
+ numbers_list_fix_width(unsigned short, "%hu", delim, 5, "hu", check_ushort);
+ numbers_list_fix_width(short, "%hd", delim, 6, "hd", check_short);
+ numbers_list_fix_width(short, "%hd", delim, 6, "hi", check_short);
+ numbers_list_fix_width(unsigned short, "%hx", delim, 4, "hx", check_ushort);
+ numbers_list_fix_width(unsigned short, "0x%hx", delim, 6, "hx", check_ushort);
+ numbers_list_fix_width(short, "0x%hx", delim, 6, "hi", check_short);
+}
+
+static void __init numbers_list_field_width_hh(const char *delim)
+{
+ numbers_list_fix_width(unsigned char, "%hhu", delim, 3, "hhu", check_uchar);
+ numbers_list_fix_width(signed char, "%hhd", delim, 4, "hhd", check_char);
+ numbers_list_fix_width(signed char, "%hhd", delim, 4, "hhi", check_char);
+ numbers_list_fix_width(unsigned char, "%hhx", delim, 2, "hhx", check_uchar);
+ numbers_list_fix_width(unsigned char, "0x%hhx", delim, 4, "hhx", check_uchar);
+ numbers_list_fix_width(signed char, "0x%hhx", delim, 4, "hhi", check_char);
+}
+
+/*
+ * List of numbers separated by delim. Each field width specifier is the
+ * maximum possible digits for the given type and base.
+ */
+static void __init numbers_list_field_width_typemax(const char *delim)
+{
+ numbers_list_field_width_ll(delim);
+ numbers_list_field_width_l(delim);
+ numbers_list_field_width_d(delim);
+ numbers_list_field_width_h(delim);
+ numbers_list_field_width_hh(delim);
+}
+
+static void __init numbers_list_field_width_val_ll(const char *delim)
+{
+ numbers_list_val_width(unsigned long long, "%llu", delim, "llu", check_ull);
+ numbers_list_val_width(long long, "%lld", delim, "lld", check_ll);
+ numbers_list_val_width(long long, "%lld", delim, "lli", check_ll);
+ numbers_list_val_width(unsigned long long, "%llx", delim, "llx", check_ull);
+ numbers_list_val_width(unsigned long long, "0x%llx", delim, "llx", check_ull);
+ numbers_list_val_width(long long, "0x%llx", delim, "lli", check_ll);
+}
+
+static void __init numbers_list_field_width_val_l(const char *delim)
+{
+ numbers_list_val_width(unsigned long, "%lu", delim, "lu", check_ulong);
+ numbers_list_val_width(long, "%ld", delim, "ld", check_long);
+ numbers_list_val_width(long, "%ld", delim, "li", check_long);
+ numbers_list_val_width(unsigned long, "%lx", delim, "lx", check_ulong);
+ numbers_list_val_width(unsigned long, "0x%lx", delim, "lx", check_ulong);
+ numbers_list_val_width(long, "0x%lx", delim, "li", check_long);
+}
+
+static void __init numbers_list_field_width_val_d(const char *delim)
+{
+ numbers_list_val_width(unsigned int, "%u", delim, "u", check_uint);
+ numbers_list_val_width(int, "%d", delim, "d", check_int);
+ numbers_list_val_width(int, "%d", delim, "i", check_int);
+ numbers_list_val_width(unsigned int, "%x", delim, "x", check_uint);
+ numbers_list_val_width(unsigned int, "0x%x", delim, "x", check_uint);
+ numbers_list_val_width(int, "0x%x", delim, "i", check_int);
+}
+
+static void __init numbers_list_field_width_val_h(const char *delim)
+{
+ numbers_list_val_width(unsigned short, "%hu", delim, "hu", check_ushort);
+ numbers_list_val_width(short, "%hd", delim, "hd", check_short);
+ numbers_list_val_width(short, "%hd", delim, "hi", check_short);
+ numbers_list_val_width(unsigned short, "%hx", delim, "hx", check_ushort);
+ numbers_list_val_width(unsigned short, "0x%hx", delim, "hx", check_ushort);
+ numbers_list_val_width(short, "0x%hx", delim, "hi", check_short);
+}
+
+static void __init numbers_list_field_width_val_hh(const char *delim)
+{
+ numbers_list_val_width(unsigned char, "%hhu", delim, "hhu", check_uchar);
+ numbers_list_val_width(signed char, "%hhd", delim, "hhd", check_char);
+ numbers_list_val_width(signed char, "%hhd", delim, "hhi", check_char);
+ numbers_list_val_width(unsigned char, "%hhx", delim, "hhx", check_uchar);
+ numbers_list_val_width(unsigned char, "0x%hhx", delim, "hhx", check_uchar);
+ numbers_list_val_width(signed char, "0x%hhx", delim, "hhi", check_char);
+}
+
+/*
+ * List of numbers separated by delim. Each field width specifier is the
+ * exact length of the corresponding value digits in the string being scanned.
+ */
+static void __init numbers_list_field_width_val_width(const char *delim)
+{
+ numbers_list_field_width_val_ll(delim);
+ numbers_list_field_width_val_l(delim);
+ numbers_list_field_width_val_d(delim);
+ numbers_list_field_width_val_h(delim);
+ numbers_list_field_width_val_hh(delim);
+}
+
+/*
+ * Slice a continuous string of digits without field delimiters, containing
+ * numbers of varying length, using the field width to extract each group
+ * of digits. For example the hex values c0,3,bf01,303 would have a
+ * string representation of "c03bf01303" and extracted with "%2x%1x%4x%3x".
+ */
+static void __init numbers_slice(void)
+{
+ numbers_list_field_width_val_width("");
+}
+
+#define test_number_prefix(T, str, scan_fmt, expect0, expect1, n_args, fn) \
+do { \
+ const T expect[2] = { expect0, expect1 }; \
+ T result[2] = { (T)~expect[0], (T)~expect[1] }; \
+ \
+ _test(fn, &expect, str, scan_fmt, n_args, &result[0], &result[1]); \
+} while (0)
+
+/*
+ * Number prefix is >= field width.
+ * Expected behaviour is derived from testing userland sscanf.
+ */
+static void __init numbers_prefix_overflow(void)
+{
+ /*
+ * Negative decimal with a field of width 1, should quit scanning
+ * and return 0.
+ */
+ test_number_prefix(long long, "-1 1", "%1lld %lld", 0, 0, 0, check_ll);
+ test_number_prefix(long, "-1 1", "%1ld %ld", 0, 0, 0, check_long);
+ test_number_prefix(int, "-1 1", "%1d %d", 0, 0, 0, check_int);
+ test_number_prefix(short, "-1 1", "%1hd %hd", 0, 0, 0, check_short);
+ test_number_prefix(signed char, "-1 1", "%1hhd %hhd", 0, 0, 0, check_char);
+
+ test_number_prefix(long long, "-1 1", "%1lli %lli", 0, 0, 0, check_ll);
+ test_number_prefix(long, "-1 1", "%1li %li", 0, 0, 0, check_long);
+ test_number_prefix(int, "-1 1", "%1i %i", 0, 0, 0, check_int);
+ test_number_prefix(short, "-1 1", "%1hi %hi", 0, 0, 0, check_short);
+ test_number_prefix(signed char, "-1 1", "%1hhi %hhi", 0, 0, 0, check_char);
+
+ /*
+ * 0x prefix in a field of width 1: 0 is a valid digit so should
+ * convert. Next field scan starts at the 'x' which isn't a digit so
+ * scan quits with one field converted.
+ */
+ test_number_prefix(unsigned long long, "0xA7", "%1llx%llx", 0, 0, 1, check_ull);
+ test_number_prefix(unsigned long, "0xA7", "%1lx%lx", 0, 0, 1, check_ulong);
+ test_number_prefix(unsigned int, "0xA7", "%1x%x", 0, 0, 1, check_uint);
+ test_number_prefix(unsigned short, "0xA7", "%1hx%hx", 0, 0, 1, check_ushort);
+ test_number_prefix(unsigned char, "0xA7", "%1hhx%hhx", 0, 0, 1, check_uchar);
+ test_number_prefix(long long, "0xA7", "%1lli%llx", 0, 0, 1, check_ll);
+ test_number_prefix(long, "0xA7", "%1li%lx", 0, 0, 1, check_long);
+ test_number_prefix(int, "0xA7", "%1i%x", 0, 0, 1, check_int);
+ test_number_prefix(short, "0xA7", "%1hi%hx", 0, 0, 1, check_short);
+ test_number_prefix(char, "0xA7", "%1hhi%hhx", 0, 0, 1, check_char);
+
+ /*
+ * 0x prefix in a field of width 2 using %x conversion: first field
+ * converts to 0. Next field scan starts at the character after "0x".
+ * Both fields will convert.
+ */
+ test_number_prefix(unsigned long long, "0xA7", "%2llx%llx", 0, 0xa7, 2, check_ull);
+ test_number_prefix(unsigned long, "0xA7", "%2lx%lx", 0, 0xa7, 2, check_ulong);
+ test_number_prefix(unsigned int, "0xA7", "%2x%x", 0, 0xa7, 2, check_uint);
+ test_number_prefix(unsigned short, "0xA7", "%2hx%hx", 0, 0xa7, 2, check_ushort);
+ test_number_prefix(unsigned char, "0xA7", "%2hhx%hhx", 0, 0xa7, 2, check_uchar);
+
+ /*
+ * 0x prefix in a field of width 2 using %i conversion: first field
+ * converts to 0. Next field scan starts at the character after "0x",
+ * which will convert if can be interpreted as decimal but will fail
+ * if it contains any hex digits (since no 0x prefix).
+ */
+ test_number_prefix(long long, "0x67", "%2lli%lli", 0, 67, 2, check_ll);
+ test_number_prefix(long, "0x67", "%2li%li", 0, 67, 2, check_long);
+ test_number_prefix(int, "0x67", "%2i%i", 0, 67, 2, check_int);
+ test_number_prefix(short, "0x67", "%2hi%hi", 0, 67, 2, check_short);
+ test_number_prefix(char, "0x67", "%2hhi%hhi", 0, 67, 2, check_char);
+
+ test_number_prefix(long long, "0xA7", "%2lli%lli", 0, 0, 1, check_ll);
+ test_number_prefix(long, "0xA7", "%2li%li", 0, 0, 1, check_long);
+ test_number_prefix(int, "0xA7", "%2i%i", 0, 0, 1, check_int);
+ test_number_prefix(short, "0xA7", "%2hi%hi", 0, 0, 1, check_short);
+ test_number_prefix(char, "0xA7", "%2hhi%hhi", 0, 0, 1, check_char);
+}
+
+#define _test_simple_strtoxx(T, fn, gen_fmt, expect, base) \
+do { \
+ T got; \
+ char *endp; \
+ int len; \
+ bool fail = false; \
+ \
+ total_tests++; \
+ len = snprintf(test_buffer, BUF_SIZE, gen_fmt, expect); \
+ got = (fn)(test_buffer, &endp, base); \
+ pr_debug(#fn "(\"%s\", %d) -> " gen_fmt "\n", test_buffer, base, got); \
+ if (got != (expect)) { \
+ fail = true; \
+ pr_warn(#fn "(\"%s\", %d): got " gen_fmt " expected " gen_fmt "\n", \
+ test_buffer, base, got, expect); \
+ } else if (endp != test_buffer + len) { \
+ fail = true; \
+ pr_warn(#fn "(\"%s\", %d) startp=0x%px got endp=0x%px expected 0x%px\n", \
+ test_buffer, base, test_buffer, \
+ test_buffer + len, endp); \
+ } \
+ \
+ if (fail) \
+ failed_tests++; \
+} while (0)
+
+#define test_simple_strtoxx(T, fn, gen_fmt, base) \
+do { \
+ int i; \
+ \
+ for (i = 0; i < ARRAY_SIZE(numbers); i++) { \
+ _test_simple_strtoxx(T, fn, gen_fmt, (T)numbers[i], base); \
+ \
+ if (is_signed_type(T)) \
+ _test_simple_strtoxx(T, fn, gen_fmt, \
+ -(T)numbers[i], base); \
+ } \
+} while (0)
+
+static void __init test_simple_strtoull(void)
+{
+ test_simple_strtoxx(unsigned long long, simple_strtoull, "%llu", 10);
+ test_simple_strtoxx(unsigned long long, simple_strtoull, "%llu", 0);
+ test_simple_strtoxx(unsigned long long, simple_strtoull, "%llx", 16);
+ test_simple_strtoxx(unsigned long long, simple_strtoull, "0x%llx", 16);
+ test_simple_strtoxx(unsigned long long, simple_strtoull, "0x%llx", 0);
+}
+
+static void __init test_simple_strtoll(void)
+{
+ test_simple_strtoxx(long long, simple_strtoll, "%lld", 10);
+ test_simple_strtoxx(long long, simple_strtoll, "%lld", 0);
+ test_simple_strtoxx(long long, simple_strtoll, "%llx", 16);
+ test_simple_strtoxx(long long, simple_strtoll, "0x%llx", 16);
+ test_simple_strtoxx(long long, simple_strtoll, "0x%llx", 0);
+}
+
+static void __init test_simple_strtoul(void)
+{
+ test_simple_strtoxx(unsigned long, simple_strtoul, "%lu", 10);
+ test_simple_strtoxx(unsigned long, simple_strtoul, "%lu", 0);
+ test_simple_strtoxx(unsigned long, simple_strtoul, "%lx", 16);
+ test_simple_strtoxx(unsigned long, simple_strtoul, "0x%lx", 16);
+ test_simple_strtoxx(unsigned long, simple_strtoul, "0x%lx", 0);
+}
+
+static void __init test_simple_strtol(void)
+{
+ test_simple_strtoxx(long, simple_strtol, "%ld", 10);
+ test_simple_strtoxx(long, simple_strtol, "%ld", 0);
+ test_simple_strtoxx(long, simple_strtol, "%lx", 16);
+ test_simple_strtoxx(long, simple_strtol, "0x%lx", 16);
+ test_simple_strtoxx(long, simple_strtol, "0x%lx", 0);
+}
+
+/* Selection of common delimiters/separators between numbers in a string. */
+static const char * const number_delimiters[] __initconst = {
+ " ", ":", ",", "-", "/",
+};
+
+static void __init test_numbers(void)
+{
+ int i;
+
+ /* String containing only one number. */
+ numbers_simple();
+
+ /* String with multiple numbers separated by delimiter. */
+ for (i = 0; i < ARRAY_SIZE(number_delimiters); i++) {
+ numbers_list(number_delimiters[i]);
+
+ /* Field width may be longer than actual field digits. */
+ numbers_list_field_width_typemax(number_delimiters[i]);
+
+ /* Each field width exactly length of actual field digits. */
+ numbers_list_field_width_val_width(number_delimiters[i]);
+ }
+
+ /* Slice continuous sequence of digits using field widths. */
+ numbers_slice();
+
+ numbers_prefix_overflow();
+}
+
+static void __init selftest(void)
+{
+ test_buffer = kmalloc(BUF_SIZE, GFP_KERNEL);
+ if (!test_buffer)
+ return;
+
+ fmt_buffer = kmalloc(BUF_SIZE, GFP_KERNEL);
+ if (!fmt_buffer) {
+ kfree(test_buffer);
+ return;
+ }
+
+ prandom_seed_state(&rnd_state, 3141592653589793238ULL);
+
+ test_numbers();
+
+ test_simple_strtoull();
+ test_simple_strtoll();
+ test_simple_strtoul();
+ test_simple_strtol();
+
+ kfree(fmt_buffer);
+ kfree(test_buffer);
+}
+
+KSTM_MODULE_LOADERS(test_scanf);
+MODULE_AUTHOR("Richard Fitzgerald <rf@opensource.cirrus.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/lib/test_sort.c b/lib/test_sort.c
index 52edbe10f2e5..be02e3a098cf 100644
--- a/lib/test_sort.c
+++ b/lib/test_sort.c
@@ -1,4 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
+
+#include <kunit/test.h>
+
#include <linux/sort.h>
#include <linux/slab.h>
#include <linux/module.h>
@@ -7,18 +10,17 @@
#define TEST_LEN 1000
-static int __init cmpint(const void *a, const void *b)
+static int cmpint(const void *a, const void *b)
{
return *(int *)a - *(int *)b;
}
-static int __init test_sort_init(void)
+static void test_sort(struct kunit *test)
{
- int *a, i, r = 1, err = -ENOMEM;
+ int *a, i, r = 1;
- a = kmalloc_array(TEST_LEN, sizeof(*a), GFP_KERNEL);
- if (!a)
- return err;
+ a = kunit_kmalloc_array(test, TEST_LEN, sizeof(*a), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, a);
for (i = 0; i < TEST_LEN; i++) {
r = (r * 725861) % 6599;
@@ -27,24 +29,20 @@ static int __init test_sort_init(void)
sort(a, TEST_LEN, sizeof(*a), cmpint, NULL);
- err = -EINVAL;
for (i = 0; i < TEST_LEN-1; i++)
- if (a[i] > a[i+1]) {
- pr_err("test has failed\n");
- goto exit;
- }
- err = 0;
- pr_info("test passed\n");
-exit:
- kfree(a);
- return err;
+ KUNIT_ASSERT_LE(test, a[i], a[i + 1]);
}
-static void __exit test_sort_exit(void)
-{
-}
+static struct kunit_case sort_test_cases[] = {
+ KUNIT_CASE(test_sort),
+ {}
+};
+
+static struct kunit_suite sort_test_suite = {
+ .name = "lib_sort",
+ .test_cases = sort_test_cases,
+};
-module_init(test_sort_init);
-module_exit(test_sort_exit);
+kunit_test_suites(&sort_test_suite);
MODULE_LICENSE("GPL");
diff --git a/lib/test_stackinit.c b/lib/test_stackinit.c
deleted file mode 100644
index 2d7d257a430e..000000000000
--- a/lib/test_stackinit.c
+++ /dev/null
@@ -1,387 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Test cases for compiler-based stack variable zeroing via future
- * compiler flags or CONFIG_GCC_PLUGIN_STRUCTLEAK*.
- */
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/string.h>
-
-/* Exfiltration buffer. */
-#define MAX_VAR_SIZE 128
-static u8 check_buf[MAX_VAR_SIZE];
-
-/* Character array to trigger stack protector in all functions. */
-#define VAR_BUFFER 32
-
-/* Volatile mask to convince compiler to copy memory with 0xff. */
-static volatile u8 forced_mask = 0xff;
-
-/* Location and size tracking to validate fill and test are colocated. */
-static void *fill_start, *target_start;
-static size_t fill_size, target_size;
-
-static bool range_contains(char *haystack_start, size_t haystack_size,
- char *needle_start, size_t needle_size)
-{
- if (needle_start >= haystack_start &&
- needle_start + needle_size <= haystack_start + haystack_size)
- return true;
- return false;
-}
-
-#define DO_NOTHING_TYPE_SCALAR(var_type) var_type
-#define DO_NOTHING_TYPE_STRING(var_type) void
-#define DO_NOTHING_TYPE_STRUCT(var_type) void
-
-#define DO_NOTHING_RETURN_SCALAR(ptr) *(ptr)
-#define DO_NOTHING_RETURN_STRING(ptr) /**/
-#define DO_NOTHING_RETURN_STRUCT(ptr) /**/
-
-#define DO_NOTHING_CALL_SCALAR(var, name) \
- (var) = do_nothing_ ## name(&(var))
-#define DO_NOTHING_CALL_STRING(var, name) \
- do_nothing_ ## name(var)
-#define DO_NOTHING_CALL_STRUCT(var, name) \
- do_nothing_ ## name(&(var))
-
-#define FETCH_ARG_SCALAR(var) &var
-#define FETCH_ARG_STRING(var) var
-#define FETCH_ARG_STRUCT(var) &var
-
-#define FILL_SIZE_STRING 16
-
-#define INIT_CLONE_SCALAR /**/
-#define INIT_CLONE_STRING [FILL_SIZE_STRING]
-#define INIT_CLONE_STRUCT /**/
-
-#define INIT_SCALAR_none /**/
-#define INIT_SCALAR_zero = 0
-
-#define INIT_STRING_none [FILL_SIZE_STRING] /**/
-#define INIT_STRING_zero [FILL_SIZE_STRING] = { }
-
-#define INIT_STRUCT_none /**/
-#define INIT_STRUCT_zero = { }
-#define INIT_STRUCT_static_partial = { .two = 0, }
-#define INIT_STRUCT_static_all = { .one = arg->one, \
- .two = arg->two, \
- .three = arg->three, \
- .four = arg->four, \
- }
-#define INIT_STRUCT_dynamic_partial = { .two = arg->two, }
-#define INIT_STRUCT_dynamic_all = { .one = arg->one, \
- .two = arg->two, \
- .three = arg->three, \
- .four = arg->four, \
- }
-#define INIT_STRUCT_runtime_partial ; \
- var.two = 0
-#define INIT_STRUCT_runtime_all ; \
- var.one = 0; \
- var.two = 0; \
- var.three = 0; \
- memset(&var.four, 0, \
- sizeof(var.four))
-
-/*
- * @name: unique string name for the test
- * @var_type: type to be tested for zeroing initialization
- * @which: is this a SCALAR, STRING, or STRUCT type?
- * @init_level: what kind of initialization is performed
- */
-#define DEFINE_TEST_DRIVER(name, var_type, which) \
-/* Returns 0 on success, 1 on failure. */ \
-static noinline __init int test_ ## name (void) \
-{ \
- var_type zero INIT_CLONE_ ## which; \
- int ignored; \
- u8 sum = 0, i; \
- \
- /* Notice when a new test is larger than expected. */ \
- BUILD_BUG_ON(sizeof(zero) > MAX_VAR_SIZE); \
- \
- /* Fill clone type with zero for per-field init. */ \
- memset(&zero, 0x00, sizeof(zero)); \
- /* Clear entire check buffer for 0xFF overlap test. */ \
- memset(check_buf, 0x00, sizeof(check_buf)); \
- /* Fill stack with 0xFF. */ \
- ignored = leaf_ ##name((unsigned long)&ignored, 1, \
- FETCH_ARG_ ## which(zero)); \
- /* Verify all bytes overwritten with 0xFF. */ \
- for (sum = 0, i = 0; i < target_size; i++) \
- sum += (check_buf[i] != 0xFF); \
- if (sum) { \
- pr_err(#name ": leaf fill was not 0xFF!?\n"); \
- return 1; \
- } \
- /* Clear entire check buffer for later bit tests. */ \
- memset(check_buf, 0x00, sizeof(check_buf)); \
- /* Extract stack-defined variable contents. */ \
- ignored = leaf_ ##name((unsigned long)&ignored, 0, \
- FETCH_ARG_ ## which(zero)); \
- \
- /* Validate that compiler lined up fill and target. */ \
- if (!range_contains(fill_start, fill_size, \
- target_start, target_size)) { \
- pr_err(#name ": stack fill missed target!?\n"); \
- pr_err(#name ": fill %zu wide\n", fill_size); \
- pr_err(#name ": target offset by %d\n", \
- (int)((ssize_t)(uintptr_t)fill_start - \
- (ssize_t)(uintptr_t)target_start)); \
- return 1; \
- } \
- \
- /* Look for any bytes still 0xFF in check region. */ \
- for (sum = 0, i = 0; i < target_size; i++) \
- sum += (check_buf[i] == 0xFF); \
- \
- if (sum == 0) \
- pr_info(#name " ok\n"); \
- else \
- pr_warn(#name " FAIL (uninit bytes: %d)\n", \
- sum); \
- \
- return (sum != 0); \
-}
-#define DEFINE_TEST(name, var_type, which, init_level) \
-/* no-op to force compiler into ignoring "uninitialized" vars */\
-static noinline __init DO_NOTHING_TYPE_ ## which(var_type) \
-do_nothing_ ## name(var_type *ptr) \
-{ \
- /* Will always be true, but compiler doesn't know. */ \
- if ((unsigned long)ptr > 0x2) \
- return DO_NOTHING_RETURN_ ## which(ptr); \
- else \
- return DO_NOTHING_RETURN_ ## which(ptr + 1); \
-} \
-static noinline __init int leaf_ ## name(unsigned long sp, \
- bool fill, \
- var_type *arg) \
-{ \
- char buf[VAR_BUFFER]; \
- var_type var INIT_ ## which ## _ ## init_level; \
- \
- target_start = &var; \
- target_size = sizeof(var); \
- /* \
- * Keep this buffer around to make sure we've got a \
- * stack frame of SOME kind... \
- */ \
- memset(buf, (char)(sp & 0xff), sizeof(buf)); \
- /* Fill variable with 0xFF. */ \
- if (fill) { \
- fill_start = &var; \
- fill_size = sizeof(var); \
- memset(fill_start, \
- (char)((sp & 0xff) | forced_mask), \
- fill_size); \
- } \
- \
- /* Silence "never initialized" warnings. */ \
- DO_NOTHING_CALL_ ## which(var, name); \
- \
- /* Exfiltrate "var". */ \
- memcpy(check_buf, target_start, target_size); \
- \
- return (int)buf[0] | (int)buf[sizeof(buf) - 1]; \
-} \
-DEFINE_TEST_DRIVER(name, var_type, which)
-
-/* Structure with no padding. */
-struct test_packed {
- unsigned long one;
- unsigned long two;
- unsigned long three;
- unsigned long four;
-};
-
-/* Simple structure with padding likely to be covered by compiler. */
-struct test_small_hole {
- size_t one;
- char two;
- /* 3 byte padding hole here. */
- int three;
- unsigned long four;
-};
-
-/* Try to trigger unhandled padding in a structure. */
-struct test_aligned {
- u32 internal1;
- u64 internal2;
-} __aligned(64);
-
-struct test_big_hole {
- u8 one;
- u8 two;
- u8 three;
- /* 61 byte padding hole here. */
- struct test_aligned four;
-} __aligned(64);
-
-struct test_trailing_hole {
- char *one;
- char *two;
- char *three;
- char four;
- /* "sizeof(unsigned long) - 1" byte padding hole here. */
-};
-
-/* Test if STRUCTLEAK is clearing structs with __user fields. */
-struct test_user {
- u8 one;
- unsigned long two;
- char __user *three;
- unsigned long four;
-};
-
-#define DEFINE_SCALAR_TEST(name, init) \
- DEFINE_TEST(name ## _ ## init, name, SCALAR, init)
-
-#define DEFINE_SCALAR_TESTS(init) \
- DEFINE_SCALAR_TEST(u8, init); \
- DEFINE_SCALAR_TEST(u16, init); \
- DEFINE_SCALAR_TEST(u32, init); \
- DEFINE_SCALAR_TEST(u64, init); \
- DEFINE_TEST(char_array_ ## init, unsigned char, STRING, init)
-
-#define DEFINE_STRUCT_TEST(name, init) \
- DEFINE_TEST(name ## _ ## init, \
- struct test_ ## name, STRUCT, init)
-
-#define DEFINE_STRUCT_TESTS(init) \
- DEFINE_STRUCT_TEST(small_hole, init); \
- DEFINE_STRUCT_TEST(big_hole, init); \
- DEFINE_STRUCT_TEST(trailing_hole, init); \
- DEFINE_STRUCT_TEST(packed, init)
-
-/* These should be fully initialized all the time! */
-DEFINE_SCALAR_TESTS(zero);
-DEFINE_STRUCT_TESTS(zero);
-/* Static initialization: padding may be left uninitialized. */
-DEFINE_STRUCT_TESTS(static_partial);
-DEFINE_STRUCT_TESTS(static_all);
-/* Dynamic initialization: padding may be left uninitialized. */
-DEFINE_STRUCT_TESTS(dynamic_partial);
-DEFINE_STRUCT_TESTS(dynamic_all);
-/* Runtime initialization: padding may be left uninitialized. */
-DEFINE_STRUCT_TESTS(runtime_partial);
-DEFINE_STRUCT_TESTS(runtime_all);
-/* No initialization without compiler instrumentation. */
-DEFINE_SCALAR_TESTS(none);
-DEFINE_STRUCT_TESTS(none);
-DEFINE_TEST(user, struct test_user, STRUCT, none);
-
-/*
- * Check two uses through a variable declaration outside either path,
- * which was noticed as a special case in porting earlier stack init
- * compiler logic.
- */
-static int noinline __leaf_switch_none(int path, bool fill)
-{
- switch (path) {
- uint64_t var;
-
- case 1:
- target_start = &var;
- target_size = sizeof(var);
- if (fill) {
- fill_start = &var;
- fill_size = sizeof(var);
-
- memset(fill_start, forced_mask | 0x55, fill_size);
- }
- memcpy(check_buf, target_start, target_size);
- break;
- case 2:
- target_start = &var;
- target_size = sizeof(var);
- if (fill) {
- fill_start = &var;
- fill_size = sizeof(var);
-
- memset(fill_start, forced_mask | 0xaa, fill_size);
- }
- memcpy(check_buf, target_start, target_size);
- break;
- default:
- var = 5;
- return var & forced_mask;
- }
- return 0;
-}
-
-static noinline __init int leaf_switch_1_none(unsigned long sp, bool fill,
- uint64_t *arg)
-{
- return __leaf_switch_none(1, fill);
-}
-
-static noinline __init int leaf_switch_2_none(unsigned long sp, bool fill,
- uint64_t *arg)
-{
- return __leaf_switch_none(2, fill);
-}
-
-DEFINE_TEST_DRIVER(switch_1_none, uint64_t, SCALAR);
-DEFINE_TEST_DRIVER(switch_2_none, uint64_t, SCALAR);
-
-static int __init test_stackinit_init(void)
-{
- unsigned int failures = 0;
-
-#define test_scalars(init) do { \
- failures += test_u8_ ## init (); \
- failures += test_u16_ ## init (); \
- failures += test_u32_ ## init (); \
- failures += test_u64_ ## init (); \
- failures += test_char_array_ ## init (); \
- } while (0)
-
-#define test_structs(init) do { \
- failures += test_small_hole_ ## init (); \
- failures += test_big_hole_ ## init (); \
- failures += test_trailing_hole_ ## init (); \
- failures += test_packed_ ## init (); \
- } while (0)
-
- /* These are explicitly initialized and should always pass. */
- test_scalars(zero);
- test_structs(zero);
- /* Padding here appears to be accidentally always initialized? */
- test_structs(dynamic_partial);
- /* Padding initialization depends on compiler behaviors. */
- test_structs(static_partial);
- test_structs(static_all);
- test_structs(dynamic_all);
- test_structs(runtime_partial);
- test_structs(runtime_all);
-
- /* STRUCTLEAK_BYREF_ALL should cover everything from here down. */
- test_scalars(none);
- failures += test_switch_1_none();
- failures += test_switch_2_none();
-
- /* STRUCTLEAK_BYREF should cover from here down. */
- test_structs(none);
-
- /* STRUCTLEAK will only cover this. */
- failures += test_user();
-
- if (failures == 0)
- pr_info("all tests passed!\n");
- else
- pr_err("failures: %u\n", failures);
-
- return failures ? -EINVAL : 0;
-}
-module_init(test_stackinit_init);
-
-static void __exit test_stackinit_exit(void)
-{ }
-module_exit(test_stackinit_exit);
-
-MODULE_LICENSE("GPL");
diff --git a/lib/test_string.c b/lib/test_string.c
index b5117ae59693..c5cb92fb710e 100644
--- a/lib/test_string.c
+++ b/lib/test_string.c
@@ -112,6 +112,105 @@ fail:
return 0;
}
+static __init int strchr_selftest(void)
+{
+ const char *test_string = "abcdefghijkl";
+ const char *empty_string = "";
+ char *result;
+ int i;
+
+ for (i = 0; i < strlen(test_string) + 1; i++) {
+ result = strchr(test_string, test_string[i]);
+ if (result - test_string != i)
+ return i + 'a';
+ }
+
+ result = strchr(empty_string, '\0');
+ if (result != empty_string)
+ return 0x101;
+
+ result = strchr(empty_string, 'a');
+ if (result)
+ return 0x102;
+
+ result = strchr(test_string, 'z');
+ if (result)
+ return 0x103;
+
+ return 0;
+}
+
+static __init int strnchr_selftest(void)
+{
+ const char *test_string = "abcdefghijkl";
+ const char *empty_string = "";
+ char *result;
+ int i, j;
+
+ for (i = 0; i < strlen(test_string) + 1; i++) {
+ for (j = 0; j < strlen(test_string) + 2; j++) {
+ result = strnchr(test_string, j, test_string[i]);
+ if (j <= i) {
+ if (!result)
+ continue;
+ return ((i + 'a') << 8) | j;
+ }
+ if (result - test_string != i)
+ return ((i + 'a') << 8) | j;
+ }
+ }
+
+ result = strnchr(empty_string, 0, '\0');
+ if (result)
+ return 0x10001;
+
+ result = strnchr(empty_string, 1, '\0');
+ if (result != empty_string)
+ return 0x10002;
+
+ result = strnchr(empty_string, 1, 'a');
+ if (result)
+ return 0x10003;
+
+ result = strnchr(NULL, 0, '\0');
+ if (result)
+ return 0x10004;
+
+ return 0;
+}
+
+static __init int strspn_selftest(void)
+{
+ static const struct strspn_test {
+ const char str[16];
+ const char accept[16];
+ const char reject[16];
+ unsigned a;
+ unsigned r;
+ } tests[] __initconst = {
+ { "foobar", "", "", 0, 6 },
+ { "abba", "abc", "ABBA", 4, 4 },
+ { "abba", "a", "b", 1, 1 },
+ { "", "abc", "abc", 0, 0},
+ };
+ const struct strspn_test *s = tests;
+ size_t i, res;
+
+ for (i = 0; i < ARRAY_SIZE(tests); ++i, ++s) {
+ res = strspn(s->str, s->accept);
+ if (res != s->a)
+ return 0x100 + 2*i;
+ res = strcspn(s->str, s->reject);
+ if (res != s->r)
+ return 0x100 + 2*i + 1;
+ }
+ return 0;
+}
+
+static __exit void string_selftest_remove(void)
+{
+}
+
static __init int string_selftest_init(void)
{
int test, subtest;
@@ -131,6 +230,21 @@ static __init int string_selftest_init(void)
if (subtest)
goto fail;
+ test = 4;
+ subtest = strchr_selftest();
+ if (subtest)
+ goto fail;
+
+ test = 5;
+ subtest = strnchr_selftest();
+ if (subtest)
+ goto fail;
+
+ test = 6;
+ subtest = strspn_selftest();
+ if (subtest)
+ goto fail;
+
pr_info("String selftests succeeded\n");
return 0;
fail:
@@ -139,4 +253,5 @@ fail:
}
module_init(string_selftest_init);
+module_exit(string_selftest_remove);
MODULE_LICENSE("GPL v2");
diff --git a/lib/test_strscpy.c b/lib/test_strscpy.c
deleted file mode 100644
index a827f94601f5..000000000000
--- a/lib/test_strscpy.c
+++ /dev/null
@@ -1,150 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/string.h>
-
-#include "../tools/testing/selftests/kselftest_module.h"
-
-/*
- * Kernel module for testing 'strscpy' family of functions.
- */
-
-KSTM_MODULE_GLOBALS();
-
-/*
- * tc() - Run a specific test case.
- * @src: Source string, argument to strscpy_pad()
- * @count: Size of destination buffer, argument to strscpy_pad()
- * @expected: Expected return value from call to strscpy_pad()
- * @terminator: 1 if there should be a terminating null byte 0 otherwise.
- * @chars: Number of characters from the src string expected to be
- * written to the dst buffer.
- * @pad: Number of pad characters expected (in the tail of dst buffer).
- * (@pad does not include the null terminator byte.)
- *
- * Calls strscpy_pad() and verifies the return value and state of the
- * destination buffer after the call returns.
- */
-static int __init tc(char *src, int count, int expected,
- int chars, int terminator, int pad)
-{
- int nr_bytes_poison;
- int max_expected;
- int max_count;
- int written;
- char buf[6];
- int index, i;
- const char POISON = 'z';
-
- total_tests++;
-
- if (!src) {
- pr_err("null source string not supported\n");
- return -1;
- }
-
- memset(buf, POISON, sizeof(buf));
- /* Future proofing test suite, validate args */
- max_count = sizeof(buf) - 2; /* Space for null and to verify overflow */
- max_expected = count - 1; /* Space for the null */
- if (count > max_count) {
- pr_err("count (%d) is too big (%d) ... aborting", count, max_count);
- return -1;
- }
- if (expected > max_expected) {
- pr_warn("expected (%d) is bigger than can possibly be returned (%d)",
- expected, max_expected);
- }
-
- written = strscpy_pad(buf, src, count);
- if ((written) != (expected)) {
- pr_err("%d != %d (written, expected)\n", written, expected);
- goto fail;
- }
-
- if (count && written == -E2BIG) {
- if (strncmp(buf, src, count - 1) != 0) {
- pr_err("buffer state invalid for -E2BIG\n");
- goto fail;
- }
- if (buf[count - 1] != '\0') {
- pr_err("too big string is not null terminated correctly\n");
- goto fail;
- }
- }
-
- for (i = 0; i < chars; i++) {
- if (buf[i] != src[i]) {
- pr_err("buf[i]==%c != src[i]==%c\n", buf[i], src[i]);
- goto fail;
- }
- }
-
- if (terminator) {
- if (buf[count - 1] != '\0') {
- pr_err("string is not null terminated correctly\n");
- goto fail;
- }
- }
-
- for (i = 0; i < pad; i++) {
- index = chars + terminator + i;
- if (buf[index] != '\0') {
- pr_err("padding missing at index: %d\n", i);
- goto fail;
- }
- }
-
- nr_bytes_poison = sizeof(buf) - chars - terminator - pad;
- for (i = 0; i < nr_bytes_poison; i++) {
- index = sizeof(buf) - 1 - i; /* Check from the end back */
- if (buf[index] != POISON) {
- pr_err("poison value missing at index: %d\n", i);
- goto fail;
- }
- }
-
- return 0;
-fail:
- failed_tests++;
- return -1;
-}
-
-static void __init selftest(void)
-{
- /*
- * tc() uses a destination buffer of size 6 and needs at
- * least 2 characters spare (one for null and one to check for
- * overflow). This means we should only call tc() with
- * strings up to a maximum of 4 characters long and 'count'
- * should not exceed 4. To test with longer strings increase
- * the buffer size in tc().
- */
-
- /* tc(src, count, expected, chars, terminator, pad) */
- KSTM_CHECK_ZERO(tc("a", 0, -E2BIG, 0, 0, 0));
- KSTM_CHECK_ZERO(tc("", 0, -E2BIG, 0, 0, 0));
-
- KSTM_CHECK_ZERO(tc("a", 1, -E2BIG, 0, 1, 0));
- KSTM_CHECK_ZERO(tc("", 1, 0, 0, 1, 0));
-
- KSTM_CHECK_ZERO(tc("ab", 2, -E2BIG, 1, 1, 0));
- KSTM_CHECK_ZERO(tc("a", 2, 1, 1, 1, 0));
- KSTM_CHECK_ZERO(tc("", 2, 0, 0, 1, 1));
-
- KSTM_CHECK_ZERO(tc("abc", 3, -E2BIG, 2, 1, 0));
- KSTM_CHECK_ZERO(tc("ab", 3, 2, 2, 1, 0));
- KSTM_CHECK_ZERO(tc("a", 3, 1, 1, 1, 1));
- KSTM_CHECK_ZERO(tc("", 3, 0, 0, 1, 2));
-
- KSTM_CHECK_ZERO(tc("abcd", 4, -E2BIG, 3, 1, 0));
- KSTM_CHECK_ZERO(tc("abc", 4, 3, 3, 1, 0));
- KSTM_CHECK_ZERO(tc("ab", 4, 2, 2, 1, 1));
- KSTM_CHECK_ZERO(tc("a", 4, 1, 1, 1, 2));
- KSTM_CHECK_ZERO(tc("", 4, 0, 0, 1, 3));
-}
-
-KSTM_MODULE_LOADERS(test_strscpy);
-MODULE_AUTHOR("Tobin C. Harding <tobin@kernel.org>");
-MODULE_LICENSE("GPL");
diff --git a/lib/test_sysctl.c b/lib/test_sysctl.c
index 566dad3f4196..9321d850931f 100644
--- a/lib/test_sysctl.c
+++ b/lib/test_sysctl.c
@@ -1,22 +1,12 @@
+// SPDX-License-Identifier: GPL-2.0-or-later OR copyleft-next-0.3.1
/*
* proc sysctl test driver
*
* Copyright (C) 2017 Luis R. Rodriguez <mcgrof@kernel.org>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or at your option any
- * later version; or, when distributed separately from the Linux kernel or
- * when incorporated into other software packages, subject to the following
- * license:
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of copyleft-next (version 0.3.1 or later) as published
- * at http://copyleft-next.org/.
*/
/*
- * This module provides an interface to the the proc sysctl interfaces. This
+ * This module provides an interface to the proc sysctl interfaces. This
* driver requires CONFIG_PROC_SYSCTL. It will not normally be loaded by the
* system unless explicitly requested by name. You can also build this driver
* into your kernel.
@@ -38,12 +28,24 @@
static int i_zero;
static int i_one_hundred = 100;
+static int match_int_ok = 1;
+
+
+static struct {
+ struct ctl_table_header *test_h_setup_node;
+ struct ctl_table_header *test_h_mnt;
+ struct ctl_table_header *test_h_mnterror;
+ struct ctl_table_header *empty_add;
+ struct ctl_table_header *empty;
+} sysctl_test_headers;
struct test_sysctl_data {
int int_0001;
int int_0002;
int int_0003[4];
+ int boot_int;
+
unsigned int uint_0001;
char string_0001[65];
@@ -61,6 +63,8 @@ static struct test_sysctl_data test_data = {
.int_0003[2] = 2,
.int_0003[3] = 3,
+ .boot_int = 0,
+
.uint_0001 = 314,
.string_0001 = "(none)",
@@ -92,6 +96,22 @@ static struct ctl_table test_table[] = {
.proc_handler = proc_dointvec,
},
{
+ .procname = "match_int",
+ .data = &match_int_ok,
+ .maxlen = sizeof(match_int_ok),
+ .mode = 0444,
+ .proc_handler = proc_dointvec,
+ },
+ {
+ .procname = "boot_int",
+ .data = &test_data.boot_int,
+ .maxlen = sizeof(test_data.boot_int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = SYSCTL_ONE,
+ },
+ {
.procname = "uint_0001",
.data = &test_data.uint_0001,
.maxlen = sizeof(unsigned int),
@@ -112,50 +132,149 @@ static struct ctl_table test_table[] = {
.mode = 0644,
.proc_handler = proc_do_large_bitmap,
},
- { }
};
-static struct ctl_table test_sysctl_table[] = {
- {
- .procname = "test_sysctl",
- .maxlen = 0,
- .mode = 0555,
- .child = test_table,
- },
- { }
-};
+static void test_sysctl_calc_match_int_ok(void)
+{
+ int i;
-static struct ctl_table test_sysctl_root_table[] = {
- {
- .procname = "debug",
- .maxlen = 0,
- .mode = 0555,
- .child = test_sysctl_table,
- },
- { }
-};
+ struct {
+ int defined;
+ int wanted;
+ } match_int[] = {
+ {.defined = *(int *)SYSCTL_ZERO, .wanted = 0},
+ {.defined = *(int *)SYSCTL_ONE, .wanted = 1},
+ {.defined = *(int *)SYSCTL_TWO, .wanted = 2},
+ {.defined = *(int *)SYSCTL_THREE, .wanted = 3},
+ {.defined = *(int *)SYSCTL_FOUR, .wanted = 4},
+ {.defined = *(int *)SYSCTL_ONE_HUNDRED, .wanted = 100},
+ {.defined = *(int *)SYSCTL_TWO_HUNDRED, .wanted = 200},
+ {.defined = *(int *)SYSCTL_ONE_THOUSAND, .wanted = 1000},
+ {.defined = *(int *)SYSCTL_THREE_THOUSAND, .wanted = 3000},
+ {.defined = *(int *)SYSCTL_INT_MAX, .wanted = INT_MAX},
+ {.defined = *(int *)SYSCTL_MAXOLDUID, .wanted = 65535},
+ {.defined = *(int *)SYSCTL_NEG_ONE, .wanted = -1},
+ };
-static struct ctl_table_header *test_sysctl_header;
+ for (i = 0; i < ARRAY_SIZE(match_int); i++)
+ if (match_int[i].defined != match_int[i].wanted)
+ match_int_ok = 0;
+}
-static int __init test_sysctl_init(void)
+static int test_sysctl_setup_node_tests(void)
{
+ test_sysctl_calc_match_int_ok();
test_data.bitmap_0001 = kzalloc(SYSCTL_TEST_BITMAP_SIZE/8, GFP_KERNEL);
if (!test_data.bitmap_0001)
return -ENOMEM;
- test_sysctl_header = register_sysctl_table(test_sysctl_root_table);
- if (!test_sysctl_header) {
+ sysctl_test_headers.test_h_setup_node = register_sysctl("debug/test_sysctl", test_table);
+ if (!sysctl_test_headers.test_h_setup_node) {
kfree(test_data.bitmap_0001);
return -ENOMEM;
}
+
return 0;
}
-late_initcall(test_sysctl_init);
+
+/* Used to test that unregister actually removes the directory */
+static struct ctl_table test_table_unregister[] = {
+ {
+ .procname = "unregister_error",
+ .data = &test_data.int_0001,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ },
+};
+
+static int test_sysctl_run_unregister_nested(void)
+{
+ struct ctl_table_header *unregister;
+
+ unregister = register_sysctl("debug/test_sysctl/unregister_error",
+ test_table_unregister);
+ if (!unregister)
+ return -ENOMEM;
+
+ unregister_sysctl_table(unregister);
+ return 0;
+}
+
+static int test_sysctl_run_register_mount_point(void)
+{
+ sysctl_test_headers.test_h_mnt
+ = register_sysctl_mount_point("debug/test_sysctl/mnt");
+ if (!sysctl_test_headers.test_h_mnt)
+ return -ENOMEM;
+
+ sysctl_test_headers.test_h_mnterror
+ = register_sysctl("debug/test_sysctl/mnt/mnt_error",
+ test_table_unregister);
+ /*
+ * Don't check the result.:
+ * If it fails (expected behavior), return 0.
+ * If successful (missbehavior of register mount point), we want to see
+ * mnt_error when we run the sysctl test script
+ */
+
+ return 0;
+}
+
+static struct ctl_table test_table_empty[] = { };
+
+static int test_sysctl_run_register_empty(void)
+{
+ /* Tets that an empty dir can be created */
+ sysctl_test_headers.empty_add
+ = register_sysctl("debug/test_sysctl/empty_add", test_table_empty);
+ if (!sysctl_test_headers.empty_add)
+ return -ENOMEM;
+
+ /* Test that register on top of an empty dir works */
+ sysctl_test_headers.empty
+ = register_sysctl("debug/test_sysctl/empty_add/empty", test_table_empty);
+ if (!sysctl_test_headers.empty)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int __init test_sysctl_init(void)
+{
+ int err;
+
+ err = test_sysctl_setup_node_tests();
+ if (err)
+ goto out;
+
+ err = test_sysctl_run_unregister_nested();
+ if (err)
+ goto out;
+
+ err = test_sysctl_run_register_mount_point();
+ if (err)
+ goto out;
+
+ err = test_sysctl_run_register_empty();
+
+out:
+ return err;
+}
+module_init(test_sysctl_init);
static void __exit test_sysctl_exit(void)
{
kfree(test_data.bitmap_0001);
- if (test_sysctl_header)
- unregister_sysctl_table(test_sysctl_header);
+ if (sysctl_test_headers.test_h_setup_node)
+ unregister_sysctl_table(sysctl_test_headers.test_h_setup_node);
+ if (sysctl_test_headers.test_h_mnt)
+ unregister_sysctl_table(sysctl_test_headers.test_h_mnt);
+ if (sysctl_test_headers.test_h_mnterror)
+ unregister_sysctl_table(sysctl_test_headers.test_h_mnterror);
+ if (sysctl_test_headers.empty)
+ unregister_sysctl_table(sysctl_test_headers.empty);
+ if (sysctl_test_headers.empty_add)
+ unregister_sysctl_table(sysctl_test_headers.empty_add);
}
module_exit(test_sysctl_exit);
diff --git a/lib/test_ubsan.c b/lib/test_ubsan.c
index 9ea10adf7a66..2062be1f2e80 100644
--- a/lib/test_ubsan.c
+++ b/lib/test_ubsan.c
@@ -5,79 +5,78 @@
typedef void(*test_ubsan_fp)(void);
-static void test_ubsan_add_overflow(void)
-{
- volatile int val = INT_MAX;
-
- val += 2;
-}
-
-static void test_ubsan_sub_overflow(void)
-{
- volatile int val = INT_MIN;
- volatile int val2 = 2;
-
- val -= val2;
-}
-
-static void test_ubsan_mul_overflow(void)
-{
- volatile int val = INT_MAX / 2;
-
- val *= 3;
-}
-
-static void test_ubsan_negate_overflow(void)
-{
- volatile int val = INT_MIN;
-
- val = -val;
-}
+#define UBSAN_TEST(config, ...) do { \
+ pr_info("%s " __VA_ARGS__ "%s(%s=%s)\n", __func__, \
+ sizeof(" " __VA_ARGS__) > 2 ? " " : "", \
+ #config, IS_ENABLED(config) ? "y" : "n"); \
+ } while (0)
static void test_ubsan_divrem_overflow(void)
{
volatile int val = 16;
volatile int val2 = 0;
+ UBSAN_TEST(CONFIG_UBSAN_DIV_ZERO);
val /= val2;
}
static void test_ubsan_shift_out_of_bounds(void)
{
- volatile int val = -1;
- int val2 = 10;
+ volatile int neg = -1, wrap = 4;
+ int val1 = 10;
+ int val2 = INT_MAX;
+
+ UBSAN_TEST(CONFIG_UBSAN_SHIFT, "negative exponent");
+ val1 <<= neg;
- val2 <<= val;
+ UBSAN_TEST(CONFIG_UBSAN_SHIFT, "left overflow");
+ val2 <<= wrap;
}
static void test_ubsan_out_of_bounds(void)
{
- volatile int i = 4, j = 5;
+ volatile int i = 4, j = 5, k = -1;
+ volatile char above[4] = { }; /* Protect surrounding memory. */
volatile int arr[4];
+ volatile char below[4] = { }; /* Protect surrounding memory. */
+ above[0] = below[0];
+
+ UBSAN_TEST(CONFIG_UBSAN_BOUNDS, "above");
arr[j] = i;
+
+ UBSAN_TEST(CONFIG_UBSAN_BOUNDS, "below");
+ arr[k] = i;
}
+enum ubsan_test_enum {
+ UBSAN_TEST_ZERO = 0,
+ UBSAN_TEST_ONE,
+ UBSAN_TEST_MAX,
+};
+
static void test_ubsan_load_invalid_value(void)
{
volatile char *dst, *src;
bool val, val2, *ptr;
- char c = 4;
+ enum ubsan_test_enum eval, eval2, *eptr;
+ unsigned char c = 0xff;
+ UBSAN_TEST(CONFIG_UBSAN_BOOL, "bool");
dst = (char *)&val;
src = &c;
*dst = *src;
ptr = &val2;
val2 = val;
-}
-static void test_ubsan_null_ptr_deref(void)
-{
- volatile int *ptr = NULL;
- int val;
+ UBSAN_TEST(CONFIG_UBSAN_ENUM, "enum");
+ dst = (char *)&eval;
+ src = &c;
+ *dst = *src;
- val = *ptr;
+ eptr = &eval2;
+ eval2 = eval;
}
static void test_ubsan_misaligned_access(void)
@@ -85,32 +84,21 @@ static void test_ubsan_misaligned_access(void)
volatile char arr[5] __aligned(4) = {1, 2, 3, 4, 5};
volatile int *ptr, val = 6;
+ UBSAN_TEST(CONFIG_UBSAN_ALIGNMENT);
ptr = (int *)(arr + 1);
*ptr = val;
}
-static void test_ubsan_object_size_mismatch(void)
-{
- /* "((aligned(8)))" helps this not into be misaligned for ptr-access. */
- volatile int val __aligned(8) = 4;
- volatile long long *ptr, val2;
-
- ptr = (long long *)&val;
- val2 = *ptr;
-}
-
static const test_ubsan_fp test_ubsan_array[] = {
- test_ubsan_add_overflow,
- test_ubsan_sub_overflow,
- test_ubsan_mul_overflow,
- test_ubsan_negate_overflow,
- test_ubsan_divrem_overflow,
test_ubsan_shift_out_of_bounds,
test_ubsan_out_of_bounds,
test_ubsan_load_invalid_value,
- //test_ubsan_null_ptr_deref, /* exclude it because there is a crash */
test_ubsan_misaligned_access,
- test_ubsan_object_size_mismatch,
+};
+
+/* Excluded because they Oops the module. */
+static const test_ubsan_fp skip_ubsan_array[] = {
+ test_ubsan_divrem_overflow,
};
static int __init test_ubsan_init(void)
@@ -120,7 +108,6 @@ static int __init test_ubsan_init(void)
for (i = 0; i < ARRAY_SIZE(test_ubsan_array); i++)
test_ubsan_array[i]();
- (void)test_ubsan_null_ptr_deref; /* to avoid unsed-function warning */
return 0;
}
module_init(test_ubsan_init);
diff --git a/lib/test_user_copy.c b/lib/test_user_copy.c
index 67bcd5dfd847..5ff04d8fe971 100644
--- a/lib/test_user_copy.c
+++ b/lib/test_user_copy.c
@@ -31,14 +31,152 @@
# define TEST_U64
#endif
-#define test(condition, msg) \
-({ \
- int cond = (condition); \
- if (cond) \
- pr_warn("%s\n", msg); \
- cond; \
+#define test(condition, msg, ...) \
+({ \
+ int cond = (condition); \
+ if (cond) \
+ pr_warn("[%d] " msg "\n", __LINE__, ##__VA_ARGS__); \
+ cond; \
})
+static bool is_zeroed(void *from, size_t size)
+{
+ return memchr_inv(from, 0x0, size) == NULL;
+}
+
+static int test_check_nonzero_user(char *kmem, char __user *umem, size_t size)
+{
+ int ret = 0;
+ size_t start, end, i, zero_start, zero_end;
+
+ if (test(size < 2 * PAGE_SIZE, "buffer too small"))
+ return -EINVAL;
+
+ /*
+ * We want to cross a page boundary to exercise the code more
+ * effectively. We also don't want to make the size we scan too large,
+ * otherwise the test can take a long time and cause soft lockups. So
+ * scan a 1024 byte region across the page boundary.
+ */
+ size = 1024;
+ start = PAGE_SIZE - (size / 2);
+
+ kmem += start;
+ umem += start;
+
+ zero_start = size / 4;
+ zero_end = size - zero_start;
+
+ /*
+ * We conduct a series of check_nonzero_user() tests on a block of
+ * memory with the following byte-pattern (trying every possible
+ * [start,end] pair):
+ *
+ * [ 00 ff 00 ff ... 00 00 00 00 ... ff 00 ff 00 ]
+ *
+ * And we verify that check_nonzero_user() acts identically to
+ * memchr_inv().
+ */
+
+ memset(kmem, 0x0, size);
+ for (i = 1; i < zero_start; i += 2)
+ kmem[i] = 0xff;
+ for (i = zero_end; i < size; i += 2)
+ kmem[i] = 0xff;
+
+ ret |= test(copy_to_user(umem, kmem, size),
+ "legitimate copy_to_user failed");
+
+ for (start = 0; start <= size; start++) {
+ for (end = start; end <= size; end++) {
+ size_t len = end - start;
+ int retval = check_zeroed_user(umem + start, len);
+ int expected = is_zeroed(kmem + start, len);
+
+ ret |= test(retval != expected,
+ "check_nonzero_user(=%d) != memchr_inv(=%d) mismatch (start=%zu, end=%zu)",
+ retval, expected, start, end);
+ }
+ }
+
+ return ret;
+}
+
+static int test_copy_struct_from_user(char *kmem, char __user *umem,
+ size_t size)
+{
+ int ret = 0;
+ char *umem_src = NULL, *expected = NULL;
+ size_t ksize, usize;
+
+ umem_src = kmalloc(size, GFP_KERNEL);
+ ret = test(umem_src == NULL, "kmalloc failed");
+ if (ret)
+ goto out_free;
+
+ expected = kmalloc(size, GFP_KERNEL);
+ ret = test(expected == NULL, "kmalloc failed");
+ if (ret)
+ goto out_free;
+
+ /* Fill umem with a fixed byte pattern. */
+ memset(umem_src, 0x3e, size);
+ ret |= test(copy_to_user(umem, umem_src, size),
+ "legitimate copy_to_user failed");
+
+ /* Check basic case -- (usize == ksize). */
+ ksize = size;
+ usize = size;
+
+ memcpy(expected, umem_src, ksize);
+
+ memset(kmem, 0x0, size);
+ ret |= test(copy_struct_from_user(kmem, ksize, umem, usize),
+ "copy_struct_from_user(usize == ksize) failed");
+ ret |= test(memcmp(kmem, expected, ksize),
+ "copy_struct_from_user(usize == ksize) gives unexpected copy");
+
+ /* Old userspace case -- (usize < ksize). */
+ ksize = size;
+ usize = size / 2;
+
+ memcpy(expected, umem_src, usize);
+ memset(expected + usize, 0x0, ksize - usize);
+
+ memset(kmem, 0x0, size);
+ ret |= test(copy_struct_from_user(kmem, ksize, umem, usize),
+ "copy_struct_from_user(usize < ksize) failed");
+ ret |= test(memcmp(kmem, expected, ksize),
+ "copy_struct_from_user(usize < ksize) gives unexpected copy");
+
+ /* New userspace (-E2BIG) case -- (usize > ksize). */
+ ksize = size / 2;
+ usize = size;
+
+ memset(kmem, 0x0, size);
+ ret |= test(copy_struct_from_user(kmem, ksize, umem, usize) != -E2BIG,
+ "copy_struct_from_user(usize > ksize) didn't give E2BIG");
+
+ /* New userspace (success) case -- (usize > ksize). */
+ ksize = size / 2;
+ usize = size;
+
+ memcpy(expected, umem_src, ksize);
+ ret |= test(clear_user(umem + ksize, usize - ksize),
+ "legitimate clear_user failed");
+
+ memset(kmem, 0x0, size);
+ ret |= test(copy_struct_from_user(kmem, ksize, umem, usize),
+ "copy_struct_from_user(usize > ksize) failed");
+ ret |= test(memcmp(kmem, expected, ksize),
+ "copy_struct_from_user(usize > ksize) gives unexpected copy");
+
+out_free:
+ kfree(expected);
+ kfree(umem_src);
+ return ret;
+}
+
static int __init test_user_copy_init(void)
{
int ret = 0;
@@ -106,6 +244,11 @@ static int __init test_user_copy_init(void)
#endif
#undef test_legit
+ /* Test usage of check_nonzero_user(). */
+ ret |= test_check_nonzero_user(kmem, usermem, 2 * PAGE_SIZE);
+ /* Test usage of copy_struct_from_user(). */
+ ret |= test_copy_struct_from_user(kmem, usermem, 2 * PAGE_SIZE);
+
/*
* Invalid usage: none of these copies should succeed.
*/
diff --git a/lib/test_vmalloc.c b/lib/test_vmalloc.c
index 8bbefcaddfe8..3718d9886407 100644
--- a/lib/test_vmalloc.c
+++ b/lib/test_vmalloc.c
@@ -15,14 +15,16 @@
#include <linux/delay.h>
#include <linux/rwsem.h>
#include <linux/mm.h>
+#include <linux/rcupdate.h>
+#include <linux/slab.h>
#define __param(type, name, init, msg) \
static type name = init; \
module_param(name, type, 0444); \
MODULE_PARM_DESC(name, msg) \
-__param(bool, single_cpu_test, false,
- "Use single first online CPU to run tests");
+__param(int, nr_threads, 0,
+ "Number of workers to perform tests(min: 1 max: USHRT_MAX)");
__param(bool, sequential_test_order, false,
"Use sequential stress tests order");
@@ -33,27 +35,29 @@ __param(int, test_repeat_count, 1,
__param(int, test_loop_count, 1000000,
"Set test loop counter");
+__param(int, nr_pages, 0,
+ "Set number of pages for fix_size_alloc_test(default: 1)");
+
+__param(bool, use_huge, false,
+ "Use vmalloc_huge in fix_size_alloc_test");
+
__param(int, run_test_mask, INT_MAX,
"Set tests specified in the mask.\n\n"
- "\t\tid: 1, name: fix_size_alloc_test\n"
- "\t\tid: 2, name: full_fit_alloc_test\n"
- "\t\tid: 4, name: long_busy_list_alloc_test\n"
- "\t\tid: 8, name: random_size_alloc_test\n"
- "\t\tid: 16, name: fix_align_alloc_test\n"
- "\t\tid: 32, name: random_size_align_alloc_test\n"
- "\t\tid: 64, name: align_shift_alloc_test\n"
- "\t\tid: 128, name: pcpu_alloc_test\n"
+ "\t\tid: 1, name: fix_size_alloc_test\n"
+ "\t\tid: 2, name: full_fit_alloc_test\n"
+ "\t\tid: 4, name: long_busy_list_alloc_test\n"
+ "\t\tid: 8, name: random_size_alloc_test\n"
+ "\t\tid: 16, name: fix_align_alloc_test\n"
+ "\t\tid: 32, name: random_size_align_alloc_test\n"
+ "\t\tid: 64, name: align_shift_alloc_test\n"
+ "\t\tid: 128, name: pcpu_alloc_test\n"
+ "\t\tid: 256, name: kvfree_rcu_1_arg_vmalloc_test\n"
+ "\t\tid: 512, name: kvfree_rcu_2_arg_vmalloc_test\n"
+ "\t\tid: 1024, name: vm_map_ram_test\n"
/* Add a new test case description here. */
);
/*
- * Depends on single_cpu_test parameter. If it is true, then
- * use first online CPU to trigger a test on, otherwise go with
- * all online CPUs.
- */
-static cpumask_t cpus_run_test_mask = CPU_MASK_NONE;
-
-/*
* Read write semaphore for synchronization of setup
* phase that is done in main thread and workers.
*/
@@ -74,12 +78,13 @@ test_report_one_done(void)
static int random_size_align_alloc_test(void)
{
- unsigned long size, align, rnd;
+ unsigned long size, align;
+ unsigned int rnd;
void *ptr;
int i;
for (i = 0; i < test_loop_count; i++) {
- get_random_bytes(&rnd, sizeof(rnd));
+ rnd = get_random_u8();
/*
* Maximum 1024 pages, if PAGE_SIZE is 4096.
@@ -91,12 +96,8 @@ static int random_size_align_alloc_test(void)
*/
size = ((rnd % 10) + 1) * PAGE_SIZE;
- ptr = __vmalloc_node_range(size, align,
- VMALLOC_START, VMALLOC_END,
- GFP_KERNEL | __GFP_ZERO,
- PAGE_KERNEL,
- 0, 0, __builtin_return_address(0));
-
+ ptr = __vmalloc_node(size, align, GFP_KERNEL | __GFP_ZERO, 0,
+ __builtin_return_address(0));
if (!ptr)
return -1;
@@ -118,12 +119,8 @@ static int align_shift_alloc_test(void)
for (i = 0; i < BITS_PER_LONG; i++) {
align = ((unsigned long) 1) << i;
- ptr = __vmalloc_node_range(PAGE_SIZE, align,
- VMALLOC_START, VMALLOC_END,
- GFP_KERNEL | __GFP_ZERO,
- PAGE_KERNEL,
- 0, 0, __builtin_return_address(0));
-
+ ptr = __vmalloc_node(PAGE_SIZE, align, GFP_KERNEL|__GFP_ZERO, 0,
+ __builtin_return_address(0));
if (!ptr)
return -1;
@@ -139,13 +136,9 @@ static int fix_align_alloc_test(void)
int i;
for (i = 0; i < test_loop_count; i++) {
- ptr = __vmalloc_node_range(5 * PAGE_SIZE,
- THREAD_ALIGN << 1,
- VMALLOC_START, VMALLOC_END,
- GFP_KERNEL | __GFP_ZERO,
- PAGE_KERNEL,
- 0, 0, __builtin_return_address(0));
-
+ ptr = __vmalloc_node(5 * PAGE_SIZE, THREAD_ALIGN << 1,
+ GFP_KERNEL | __GFP_ZERO, 0,
+ __builtin_return_address(0));
if (!ptr)
return -1;
@@ -162,9 +155,7 @@ static int random_size_alloc_test(void)
int i;
for (i = 0; i < test_loop_count; i++) {
- get_random_bytes(&n, sizeof(i));
- n = (n % 100) + 1;
-
+ n = get_random_u32_inclusive(1, 100);
p = vmalloc(n * PAGE_SIZE);
if (!p)
@@ -277,7 +268,10 @@ static int fix_size_alloc_test(void)
int i;
for (i = 0; i < test_loop_count; i++) {
- ptr = vmalloc(3 * PAGE_SIZE);
+ if (use_huge)
+ ptr = vmalloc_huge((nr_pages > 0 ? nr_pages:1) * PAGE_SIZE, GFP_KERNEL);
+ else
+ ptr = vmalloc((nr_pages > 0 ? nr_pages:1) * PAGE_SIZE);
if (!ptr)
return -1;
@@ -304,16 +298,12 @@ pcpu_alloc_test(void)
return -1;
for (i = 0; i < 35000; i++) {
- unsigned int r;
-
- get_random_bytes(&r, sizeof(i));
- size = (r % (PAGE_SIZE / 4)) + 1;
+ size = get_random_u32_inclusive(1, PAGE_SIZE / 4);
/*
* Maximum PAGE_SIZE
*/
- get_random_bytes(&r, sizeof(i));
- align = 1 << ((i % 11) + 1);
+ align = 1 << get_random_u32_inclusive(1, 11);
pcpu[i] = __alloc_percpu(size, align);
if (!pcpu[i])
@@ -328,6 +318,82 @@ pcpu_alloc_test(void)
return rv;
}
+struct test_kvfree_rcu {
+ struct rcu_head rcu;
+ unsigned char array[20];
+};
+
+static int
+kvfree_rcu_1_arg_vmalloc_test(void)
+{
+ struct test_kvfree_rcu *p;
+ int i;
+
+ for (i = 0; i < test_loop_count; i++) {
+ p = vmalloc(1 * PAGE_SIZE);
+ if (!p)
+ return -1;
+
+ p->array[0] = 'a';
+ kvfree_rcu_mightsleep(p);
+ }
+
+ return 0;
+}
+
+static int
+kvfree_rcu_2_arg_vmalloc_test(void)
+{
+ struct test_kvfree_rcu *p;
+ int i;
+
+ for (i = 0; i < test_loop_count; i++) {
+ p = vmalloc(1 * PAGE_SIZE);
+ if (!p)
+ return -1;
+
+ p->array[0] = 'a';
+ kvfree_rcu(p, rcu);
+ }
+
+ return 0;
+}
+
+static int
+vm_map_ram_test(void)
+{
+ unsigned long nr_allocated;
+ unsigned int map_nr_pages;
+ unsigned char *v_ptr;
+ struct page **pages;
+ int i;
+
+ map_nr_pages = nr_pages > 0 ? nr_pages:1;
+ pages = kcalloc(map_nr_pages, sizeof(struct page *), GFP_KERNEL);
+ if (!pages)
+ return -1;
+
+ nr_allocated = alloc_pages_bulk_array(GFP_KERNEL, map_nr_pages, pages);
+ if (nr_allocated != map_nr_pages)
+ goto cleanup;
+
+ /* Run the test loop. */
+ for (i = 0; i < test_loop_count; i++) {
+ v_ptr = vm_map_ram(pages, map_nr_pages, NUMA_NO_NODE);
+ *v_ptr = 'a';
+ vm_unmap_ram(v_ptr, map_nr_pages);
+ }
+
+cleanup:
+ for (i = 0; i < nr_allocated; i++)
+ __free_page(pages[i]);
+
+ kfree(pages);
+
+ /* 0 indicates success. */
+ return nr_allocated != map_nr_pages;
+}
+
struct test_case_desc {
const char *test_name;
int (*test_func)(void);
@@ -342,6 +408,9 @@ static struct test_case_desc test_case_array[] = {
{ "random_size_align_alloc_test", random_size_align_alloc_test },
{ "align_shift_alloc_test", align_shift_alloc_test },
{ "pcpu_alloc_test", pcpu_alloc_test },
+ { "kvfree_rcu_1_arg_vmalloc_test", kvfree_rcu_1_arg_vmalloc_test },
+ { "kvfree_rcu_2_arg_vmalloc_test", kvfree_rcu_2_arg_vmalloc_test },
+ { "vm_map_ram_test", vm_map_ram_test },
/* Add a new test case here. */
};
@@ -351,32 +420,24 @@ struct test_case_data {
u64 time;
};
-/* Split it to get rid of: WARNING: line over 80 characters */
-static struct test_case_data
- per_cpu_test_data[NR_CPUS][ARRAY_SIZE(test_case_array)];
-
static struct test_driver {
struct task_struct *task;
+ struct test_case_data data[ARRAY_SIZE(test_case_array)];
+
unsigned long start;
unsigned long stop;
- int cpu;
-} per_cpu_test_driver[NR_CPUS];
+} *tdriver;
static void shuffle_array(int *arr, int n)
{
- unsigned int rnd;
- int i, j, x;
+ int i, j;
for (i = n - 1; i > 0; i--) {
- get_random_bytes(&rnd, sizeof(rnd));
-
/* Cut the range. */
- j = rnd % i;
+ j = get_random_u32_below(i);
/* Swap indexes. */
- x = arr[i];
- arr[i] = arr[j];
- arr[j] = x;
+ swap(arr[i], arr[j]);
}
}
@@ -388,9 +449,6 @@ static int test_func(void *private)
ktime_t kt;
u64 delta;
- if (set_cpus_allowed_ptr(current, cpumask_of(t->cpu)) < 0)
- pr_err("Failed to set affinity to %d CPU\n", t->cpu);
-
for (i = 0; i < ARRAY_SIZE(test_case_array); i++)
random_array[i] = i;
@@ -415,9 +473,9 @@ static int test_func(void *private)
kt = ktime_get();
for (j = 0; j < test_repeat_count; j++) {
if (!test_case_array[index].test_func())
- per_cpu_test_data[t->cpu][index].test_passed++;
+ t->data[index].test_passed++;
else
- per_cpu_test_data[t->cpu][index].test_failed++;
+ t->data[index].test_failed++;
}
/*
@@ -426,7 +484,7 @@ static int test_func(void *private)
delta = (u64) ktime_us_delta(ktime_get(), kt);
do_div(delta, (u32) test_repeat_count);
- per_cpu_test_data[t->cpu][index].time = delta;
+ t->data[index].time = delta;
}
t->stop = get_cycles();
@@ -442,53 +500,56 @@ static int test_func(void *private)
return 0;
}
-static void
+static int
init_test_configurtion(void)
{
/*
- * Reset all data of all CPUs.
+ * A maximum number of workers is defined as hard-coded
+ * value and set to USHRT_MAX. We add such gap just in
+ * case and for potential heavy stressing.
*/
- memset(per_cpu_test_data, 0, sizeof(per_cpu_test_data));
+ nr_threads = clamp(nr_threads, 1, (int) USHRT_MAX);
- if (single_cpu_test)
- cpumask_set_cpu(cpumask_first(cpu_online_mask),
- &cpus_run_test_mask);
- else
- cpumask_and(&cpus_run_test_mask, cpu_online_mask,
- cpu_online_mask);
+ /* Allocate the space for test instances. */
+ tdriver = kvcalloc(nr_threads, sizeof(*tdriver), GFP_KERNEL);
+ if (tdriver == NULL)
+ return -1;
if (test_repeat_count <= 0)
test_repeat_count = 1;
if (test_loop_count <= 0)
test_loop_count = 1;
+
+ return 0;
}
static void do_concurrent_test(void)
{
- int cpu, ret;
+ int i, ret;
/*
* Set some basic configurations plus sanity check.
*/
- init_test_configurtion();
+ ret = init_test_configurtion();
+ if (ret < 0)
+ return;
/*
* Put on hold all workers.
*/
down_write(&prepare_for_test_rwsem);
- for_each_cpu(cpu, &cpus_run_test_mask) {
- struct test_driver *t = &per_cpu_test_driver[cpu];
+ for (i = 0; i < nr_threads; i++) {
+ struct test_driver *t = &tdriver[i];
- t->cpu = cpu;
- t->task = kthread_run(test_func, t, "vmalloc_test/%d", cpu);
+ t->task = kthread_run(test_func, t, "vmalloc_test/%d", i);
if (!IS_ERR(t->task))
/* Success. */
atomic_inc(&test_n_undone);
else
- pr_err("Failed to start kthread for %d CPU\n", cpu);
+ pr_err("Failed to start %d kthread\n", i);
}
/*
@@ -506,29 +567,31 @@ static void do_concurrent_test(void)
ret = wait_for_completion_timeout(&test_all_done_comp, HZ);
} while (!ret);
- for_each_cpu(cpu, &cpus_run_test_mask) {
- struct test_driver *t = &per_cpu_test_driver[cpu];
- int i;
+ for (i = 0; i < nr_threads; i++) {
+ struct test_driver *t = &tdriver[i];
+ int j;
if (!IS_ERR(t->task))
kthread_stop(t->task);
- for (i = 0; i < ARRAY_SIZE(test_case_array); i++) {
- if (!((run_test_mask & (1 << i)) >> i))
+ for (j = 0; j < ARRAY_SIZE(test_case_array); j++) {
+ if (!((run_test_mask & (1 << j)) >> j))
continue;
pr_info(
"Summary: %s passed: %d failed: %d repeat: %d loops: %d avg: %llu usec\n",
- test_case_array[i].test_name,
- per_cpu_test_data[cpu][i].test_passed,
- per_cpu_test_data[cpu][i].test_failed,
+ test_case_array[j].test_name,
+ t->data[j].test_passed,
+ t->data[j].test_failed,
test_repeat_count, test_loop_count,
- per_cpu_test_data[cpu][i].time);
+ t->data[j].time);
}
- pr_info("All test took CPU%d=%lu cycles\n",
- cpu, t->stop - t->start);
+ pr_info("All test took worker%d=%lu cycles\n",
+ i, t->stop - t->start);
}
+
+ kvfree(tdriver);
}
static int vmalloc_test_init(void)
diff --git a/lib/test_xarray.c b/lib/test_xarray.c
index 9d631a7b6a70..e77d4856442c 100644
--- a/lib/test_xarray.c
+++ b/lib/test_xarray.c
@@ -2,6 +2,7 @@
/*
* test_xarray.c: Test the XArray API
* Copyright (c) 2017-2018 Microsoft Corporation
+ * Copyright (c) 2019-2020 Oracle
* Author: Matthew Wilcox <willy@infradead.org>
*/
@@ -11,6 +12,9 @@
static unsigned int tests_run;
static unsigned int tests_passed;
+static const unsigned int order_limit =
+ IS_ENABLED(CONFIG_XARRAY_MULTI) ? BITS_PER_LONG : 1;
+
#ifndef XA_DEBUG
# ifdef __KERNEL__
void xa_dump(const struct xarray *xa) { }
@@ -285,6 +289,27 @@ static noinline void check_xa_mark_2(struct xarray *xa)
xa_destroy(xa);
}
+static noinline void check_xa_mark_3(struct xarray *xa)
+{
+#ifdef CONFIG_XARRAY_MULTI
+ XA_STATE(xas, xa, 0x41);
+ void *entry;
+ int count = 0;
+
+ xa_store_order(xa, 0x40, 2, xa_mk_index(0x40), GFP_KERNEL);
+ xa_set_mark(xa, 0x41, XA_MARK_0);
+
+ rcu_read_lock();
+ xas_for_each_marked(&xas, entry, ULONG_MAX, XA_MARK_0) {
+ count++;
+ XA_BUG_ON(xa, entry != xa_mk_index(0x40));
+ }
+ XA_BUG_ON(xa, count != 1);
+ rcu_read_unlock();
+ xa_destroy(xa);
+#endif
+}
+
static noinline void check_xa_mark(struct xarray *xa)
{
unsigned long index;
@@ -293,6 +318,7 @@ static noinline void check_xa_mark(struct xarray *xa)
check_xa_mark_1(xa, index);
check_xa_mark_2(xa);
+ check_xa_mark_3(xa);
}
static noinline void check_xa_shrink(struct xarray *xa)
@@ -389,6 +415,9 @@ static noinline void check_cmpxchg(struct xarray *xa)
XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, FIVE, LOTS, GFP_KERNEL) != FIVE);
XA_BUG_ON(xa, xa_cmpxchg(xa, 5, FIVE, NULL, GFP_KERNEL) != NULL);
XA_BUG_ON(xa, xa_cmpxchg(xa, 5, NULL, FIVE, GFP_KERNEL) != NULL);
+ XA_BUG_ON(xa, xa_insert(xa, 5, FIVE, GFP_KERNEL) != -EBUSY);
+ XA_BUG_ON(xa, xa_cmpxchg(xa, 5, FIVE, NULL, GFP_KERNEL) != FIVE);
+ XA_BUG_ON(xa, xa_insert(xa, 5, FIVE, GFP_KERNEL) == -EBUSY);
xa_erase_index(xa, 12345678);
xa_erase_index(xa, 5);
XA_BUG_ON(xa, !xa_empty(xa));
@@ -902,28 +931,34 @@ static noinline void check_store_iter(struct xarray *xa)
XA_BUG_ON(xa, !xa_empty(xa));
}
-static noinline void check_multi_find(struct xarray *xa)
+static noinline void check_multi_find_1(struct xarray *xa, unsigned order)
{
#ifdef CONFIG_XARRAY_MULTI
+ unsigned long multi = 3 << order;
+ unsigned long next = 4 << order;
unsigned long index;
- xa_store_order(xa, 12, 2, xa_mk_value(12), GFP_KERNEL);
- XA_BUG_ON(xa, xa_store_index(xa, 16, GFP_KERNEL) != NULL);
+ xa_store_order(xa, multi, order, xa_mk_value(multi), GFP_KERNEL);
+ XA_BUG_ON(xa, xa_store_index(xa, next, GFP_KERNEL) != NULL);
+ XA_BUG_ON(xa, xa_store_index(xa, next + 1, GFP_KERNEL) != NULL);
index = 0;
XA_BUG_ON(xa, xa_find(xa, &index, ULONG_MAX, XA_PRESENT) !=
- xa_mk_value(12));
- XA_BUG_ON(xa, index != 12);
- index = 13;
+ xa_mk_value(multi));
+ XA_BUG_ON(xa, index != multi);
+ index = multi + 1;
XA_BUG_ON(xa, xa_find(xa, &index, ULONG_MAX, XA_PRESENT) !=
- xa_mk_value(12));
- XA_BUG_ON(xa, (index < 12) || (index >= 16));
+ xa_mk_value(multi));
+ XA_BUG_ON(xa, (index < multi) || (index >= next));
XA_BUG_ON(xa, xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT) !=
- xa_mk_value(16));
- XA_BUG_ON(xa, index != 16);
-
- xa_erase_index(xa, 12);
- xa_erase_index(xa, 16);
+ xa_mk_value(next));
+ XA_BUG_ON(xa, index != next);
+ XA_BUG_ON(xa, xa_find_after(xa, &index, next, XA_PRESENT) != NULL);
+ XA_BUG_ON(xa, index != next);
+
+ xa_erase_index(xa, multi);
+ xa_erase_index(xa, next);
+ xa_erase_index(xa, next + 1);
XA_BUG_ON(xa, !xa_empty(xa));
#endif
}
@@ -952,6 +987,20 @@ static noinline void check_multi_find_2(struct xarray *xa)
}
}
+static noinline void check_multi_find_3(struct xarray *xa)
+{
+ unsigned int order;
+
+ for (order = 5; order < order_limit; order++) {
+ unsigned long index = 1UL << (order - 5);
+
+ XA_BUG_ON(xa, !xa_empty(xa));
+ xa_store_order(xa, 0, order - 4, xa_mk_index(0), GFP_KERNEL);
+ XA_BUG_ON(xa, xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT));
+ xa_erase_index(xa, 0);
+ }
+}
+
static noinline void check_find_1(struct xarray *xa)
{
unsigned long i, j, k;
@@ -1046,13 +1095,35 @@ static noinline void check_find_3(struct xarray *xa)
xa_destroy(xa);
}
+static noinline void check_find_4(struct xarray *xa)
+{
+ unsigned long index = 0;
+ void *entry;
+
+ xa_store_index(xa, ULONG_MAX, GFP_KERNEL);
+
+ entry = xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT);
+ XA_BUG_ON(xa, entry != xa_mk_index(ULONG_MAX));
+
+ entry = xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT);
+ XA_BUG_ON(xa, entry);
+
+ xa_erase_index(xa, ULONG_MAX);
+}
+
static noinline void check_find(struct xarray *xa)
{
+ unsigned i;
+
check_find_1(xa);
check_find_2(xa);
check_find_3(xa);
- check_multi_find(xa);
+ check_find_4(xa);
+
+ for (i = 2; i < 10; i++)
+ check_multi_find_1(xa, i);
check_multi_find_2(xa);
+ check_multi_find_3(xa);
}
/* See find_swap_entry() in mm/shmem.c */
@@ -1110,6 +1181,85 @@ static noinline void check_find_entry(struct xarray *xa)
XA_BUG_ON(xa, !xa_empty(xa));
}
+static noinline void check_pause(struct xarray *xa)
+{
+ XA_STATE(xas, xa, 0);
+ void *entry;
+ unsigned int order;
+ unsigned long index = 1;
+ unsigned int count = 0;
+
+ for (order = 0; order < order_limit; order++) {
+ XA_BUG_ON(xa, xa_store_order(xa, index, order,
+ xa_mk_index(index), GFP_KERNEL));
+ index += 1UL << order;
+ }
+
+ rcu_read_lock();
+ xas_for_each(&xas, entry, ULONG_MAX) {
+ XA_BUG_ON(xa, entry != xa_mk_index(1UL << count));
+ count++;
+ }
+ rcu_read_unlock();
+ XA_BUG_ON(xa, count != order_limit);
+
+ count = 0;
+ xas_set(&xas, 0);
+ rcu_read_lock();
+ xas_for_each(&xas, entry, ULONG_MAX) {
+ XA_BUG_ON(xa, entry != xa_mk_index(1UL << count));
+ count++;
+ xas_pause(&xas);
+ }
+ rcu_read_unlock();
+ XA_BUG_ON(xa, count != order_limit);
+
+ xa_destroy(xa);
+}
+
+static noinline void check_move_tiny(struct xarray *xa)
+{
+ XA_STATE(xas, xa, 0);
+
+ XA_BUG_ON(xa, !xa_empty(xa));
+ rcu_read_lock();
+ XA_BUG_ON(xa, xas_next(&xas) != NULL);
+ XA_BUG_ON(xa, xas_next(&xas) != NULL);
+ rcu_read_unlock();
+ xa_store_index(xa, 0, GFP_KERNEL);
+ rcu_read_lock();
+ xas_set(&xas, 0);
+ XA_BUG_ON(xa, xas_next(&xas) != xa_mk_index(0));
+ XA_BUG_ON(xa, xas_next(&xas) != NULL);
+ xas_set(&xas, 0);
+ XA_BUG_ON(xa, xas_prev(&xas) != xa_mk_index(0));
+ XA_BUG_ON(xa, xas_prev(&xas) != NULL);
+ rcu_read_unlock();
+ xa_erase_index(xa, 0);
+ XA_BUG_ON(xa, !xa_empty(xa));
+}
+
+static noinline void check_move_max(struct xarray *xa)
+{
+ XA_STATE(xas, xa, 0);
+
+ xa_store_index(xa, ULONG_MAX, GFP_KERNEL);
+ rcu_read_lock();
+ XA_BUG_ON(xa, xas_find(&xas, ULONG_MAX) != xa_mk_index(ULONG_MAX));
+ XA_BUG_ON(xa, xas_find(&xas, ULONG_MAX) != NULL);
+ rcu_read_unlock();
+
+ xas_set(&xas, 0);
+ rcu_read_lock();
+ XA_BUG_ON(xa, xas_find(&xas, ULONG_MAX) != xa_mk_index(ULONG_MAX));
+ xas_pause(&xas);
+ XA_BUG_ON(xa, xas_find(&xas, ULONG_MAX) != NULL);
+ rcu_read_unlock();
+
+ xa_erase_index(xa, ULONG_MAX);
+ XA_BUG_ON(xa, !xa_empty(xa));
+}
+
static noinline void check_move_small(struct xarray *xa, unsigned long idx)
{
XA_STATE(xas, xa, 0);
@@ -1217,6 +1367,9 @@ static noinline void check_move(struct xarray *xa)
xa_destroy(xa);
+ check_move_tiny(xa);
+ check_move_max(xa);
+
for (i = 0; i < 16; i++)
check_move_small(xa, 1UL << i);
@@ -1310,6 +1463,25 @@ unlock:
XA_BUG_ON(xa, !xa_empty(xa));
}
+static noinline void check_create_range_5(struct xarray *xa,
+ unsigned long index, unsigned int order)
+{
+ XA_STATE_ORDER(xas, xa, index, order);
+ unsigned int i;
+
+ xa_store_order(xa, index, order, xa_mk_index(index), GFP_KERNEL);
+
+ for (i = 0; i < order + 10; i++) {
+ do {
+ xas_lock(&xas);
+ xas_create_range(&xas);
+ xas_unlock(&xas);
+ } while (xas_nomem(&xas, GFP_KERNEL));
+ }
+
+ xa_destroy(xa);
+}
+
static noinline void check_create_range(struct xarray *xa)
{
unsigned int order;
@@ -1337,6 +1509,9 @@ static noinline void check_create_range(struct xarray *xa)
check_create_range_4(xa, (3U << order) + 1, order);
check_create_range_4(xa, (3U << order) - 1, order);
check_create_range_4(xa, (1U << 24) + 1, order);
+
+ check_create_range_5(xa, 0, order);
+ check_create_range_5(xa, (1U << order), order);
}
check_create_range_3();
@@ -1375,6 +1550,51 @@ static noinline void check_store_range(struct xarray *xa)
}
}
+#ifdef CONFIG_XARRAY_MULTI
+static void check_split_1(struct xarray *xa, unsigned long index,
+ unsigned int order, unsigned int new_order)
+{
+ XA_STATE_ORDER(xas, xa, index, new_order);
+ unsigned int i;
+
+ xa_store_order(xa, index, order, xa, GFP_KERNEL);
+
+ xas_split_alloc(&xas, xa, order, GFP_KERNEL);
+ xas_lock(&xas);
+ xas_split(&xas, xa, order);
+ for (i = 0; i < (1 << order); i += (1 << new_order))
+ __xa_store(xa, index + i, xa_mk_index(index + i), 0);
+ xas_unlock(&xas);
+
+ for (i = 0; i < (1 << order); i++) {
+ unsigned int val = index + (i & ~((1 << new_order) - 1));
+ XA_BUG_ON(xa, xa_load(xa, index + i) != xa_mk_index(val));
+ }
+
+ xa_set_mark(xa, index, XA_MARK_0);
+ XA_BUG_ON(xa, !xa_get_mark(xa, index, XA_MARK_0));
+
+ xa_destroy(xa);
+}
+
+static noinline void check_split(struct xarray *xa)
+{
+ unsigned int order, new_order;
+
+ XA_BUG_ON(xa, !xa_empty(xa));
+
+ for (order = 1; order < 2 * XA_CHUNK_SHIFT; order++) {
+ for (new_order = 0; new_order < order; new_order++) {
+ check_split_1(xa, 0, order, new_order);
+ check_split_1(xa, 1UL << order, order, new_order);
+ check_split_1(xa, 3UL << order, order, new_order);
+ }
+ }
+}
+#else
+static void check_split(struct xarray *xa) { }
+#endif
+
static void check_align_1(struct xarray *xa, char *name)
{
int i;
@@ -1447,14 +1667,9 @@ static noinline void shadow_remove(struct xarray *xa)
xa_lock(xa);
while ((node = list_first_entry_or_null(&shadow_nodes,
struct xa_node, private_list))) {
- XA_STATE(xas, node->array, 0);
XA_BUG_ON(xa, node->array != xa);
list_del_init(&node->private_list);
- xas.xa_node = xa_parent_locked(node->array, node);
- xas.xa_offset = node->offset;
- xas.xa_shift = node->shift + XA_CHUNK_SHIFT;
- xas_set_update(&xas, test_update_node);
- xas_store(&xas, NULL);
+ xa_delete_node(node, test_update_node);
}
xa_unlock(xa);
}
@@ -1521,6 +1736,26 @@ static noinline void check_account(struct xarray *xa)
#endif
}
+static noinline void check_get_order(struct xarray *xa)
+{
+ unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 20 : 1;
+ unsigned int order;
+ unsigned long i, j;
+
+ for (i = 0; i < 3; i++)
+ XA_BUG_ON(xa, xa_get_order(xa, i) != 0);
+
+ for (order = 0; order < max_order; order++) {
+ for (i = 0; i < 10; i++) {
+ xa_store_order(xa, i << order, order,
+ xa_mk_index(i << order), GFP_KERNEL);
+ for (j = i << order; j < (i + 1) << order; j++)
+ XA_BUG_ON(xa, xa_get_order(xa, j) != order);
+ xa_erase(xa, i << order);
+ }
+ }
+}
+
static noinline void check_destroy(struct xarray *xa)
{
unsigned long index;
@@ -1569,9 +1804,11 @@ static int xarray_checks(void)
check_reserve(&array);
check_reserve(&xa0);
check_multi_store(&array);
+ check_get_order(&array);
check_xa_alloc();
check_find(&array);
check_find_entry(&array);
+ check_pause(&array);
check_account(&array);
check_destroy(&array);
check_move(&array);
@@ -1579,6 +1816,7 @@ static int xarray_checks(void)
check_store_range(&array);
check_store_iter(&array);
check_align(&xa0);
+ check_split(&array);
check_workingset(&array, 0);
check_workingset(&array, 64);
diff --git a/lib/textsearch.c b/lib/textsearch.c
index 4f16eec5d554..f68dea8806be 100644
--- a/lib/textsearch.c
+++ b/lib/textsearch.c
@@ -89,9 +89,9 @@
* goto errout;
* }
*
- * pos = textsearch_find_continuous(conf, \&state, example, strlen(example));
+ * pos = textsearch_find_continuous(conf, &state, example, strlen(example));
* if (pos != UINT_MAX)
- * panic("Oh my god, dancing chickens at \%d\n", pos);
+ * panic("Oh my god, dancing chickens at %d\n", pos);
*
* textsearch_destroy(conf);
*/
diff --git a/lib/timerqueue.c b/lib/timerqueue.c
index bc7e64df27df..cdb9c7658478 100644
--- a/lib/timerqueue.c
+++ b/lib/timerqueue.c
@@ -14,6 +14,14 @@
#include <linux/rbtree.h>
#include <linux/export.h>
+#define __node_2_tq(_n) \
+ rb_entry((_n), struct timerqueue_node, node)
+
+static inline bool __timerqueue_less(struct rb_node *a, const struct rb_node *b)
+{
+ return __node_2_tq(a)->expires < __node_2_tq(b)->expires;
+}
+
/**
* timerqueue_add - Adds timer to timerqueue.
*
@@ -26,29 +34,10 @@
*/
bool timerqueue_add(struct timerqueue_head *head, struct timerqueue_node *node)
{
- struct rb_node **p = &head->head.rb_node;
- struct rb_node *parent = NULL;
- struct timerqueue_node *ptr;
-
/* Make sure we don't add nodes that are already added */
WARN_ON_ONCE(!RB_EMPTY_NODE(&node->node));
- while (*p) {
- parent = *p;
- ptr = rb_entry(parent, struct timerqueue_node, node);
- if (node->expires < ptr->expires)
- p = &(*p)->rb_left;
- else
- p = &(*p)->rb_right;
- }
- rb_link_node(&node->node, parent, p);
- rb_insert_color(&node->node, &head->head);
-
- if (!head->next || node->expires < head->next->expires) {
- head->next = node;
- return true;
- }
- return false;
+ return rb_add_cached(&node->node, &head->rb_root, __timerqueue_less);
}
EXPORT_SYMBOL_GPL(timerqueue_add);
@@ -65,15 +54,10 @@ bool timerqueue_del(struct timerqueue_head *head, struct timerqueue_node *node)
{
WARN_ON_ONCE(RB_EMPTY_NODE(&node->node));
- /* update next pointer */
- if (head->next == node) {
- struct rb_node *rbn = rb_next(&node->node);
-
- head->next = rb_entry_safe(rbn, struct timerqueue_node, node);
- }
- rb_erase(&node->node, &head->head);
+ rb_erase_cached(&node->node, &head->rb_root);
RB_CLEAR_NODE(&node->node);
- return head->next != NULL;
+
+ return !RB_EMPTY_ROOT(&head->rb_root.rb_root);
}
EXPORT_SYMBOL_GPL(timerqueue_del);
diff --git a/lib/trace_readwrite.c b/lib/trace_readwrite.c
new file mode 100644
index 000000000000..a94cd56a1e4c
--- /dev/null
+++ b/lib/trace_readwrite.c
@@ -0,0 +1,47 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Register read and write tracepoints
+ *
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/ftrace.h>
+#include <linux/module.h>
+#include <linux/io.h>
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/rwmmio.h>
+
+#ifdef CONFIG_TRACE_MMIO_ACCESS
+void log_write_mmio(u64 val, u8 width, volatile void __iomem *addr,
+ unsigned long caller_addr, unsigned long caller_addr0)
+{
+ trace_rwmmio_write(caller_addr, caller_addr0, val, width, addr);
+}
+EXPORT_SYMBOL_GPL(log_write_mmio);
+EXPORT_TRACEPOINT_SYMBOL_GPL(rwmmio_write);
+
+void log_post_write_mmio(u64 val, u8 width, volatile void __iomem *addr,
+ unsigned long caller_addr, unsigned long caller_addr0)
+{
+ trace_rwmmio_post_write(caller_addr, caller_addr0, val, width, addr);
+}
+EXPORT_SYMBOL_GPL(log_post_write_mmio);
+EXPORT_TRACEPOINT_SYMBOL_GPL(rwmmio_post_write);
+
+void log_read_mmio(u8 width, const volatile void __iomem *addr,
+ unsigned long caller_addr, unsigned long caller_addr0)
+{
+ trace_rwmmio_read(caller_addr, caller_addr0, width, addr);
+}
+EXPORT_SYMBOL_GPL(log_read_mmio);
+EXPORT_TRACEPOINT_SYMBOL_GPL(rwmmio_read);
+
+void log_post_read_mmio(u64 val, u8 width, const volatile void __iomem *addr,
+ unsigned long caller_addr, unsigned long caller_addr0)
+{
+ trace_rwmmio_post_read(caller_addr, caller_addr0, val, width, addr);
+}
+EXPORT_SYMBOL_GPL(log_post_read_mmio);
+EXPORT_TRACEPOINT_SYMBOL_GPL(rwmmio_post_read);
+#endif /* CONFIG_TRACE_MMIO_ACCESS */
diff --git a/lib/ts_bm.c b/lib/ts_bm.c
index b352903c50e3..e5f30f9177df 100644
--- a/lib/ts_bm.c
+++ b/lib/ts_bm.c
@@ -11,7 +11,7 @@
* [1] A Fast String Searching Algorithm, R.S. Boyer and Moore.
* Communications of the Association for Computing Machinery,
* 20(10), 1977, pp. 762-772.
- * http://www.cs.utexas.edu/users/moore/publications/fstrpos.pdf
+ * https://www.cs.utexas.edu/users/moore/publications/fstrpos.pdf
*
* [2] Handbook of Exact String Matching Algorithms, Thierry Lecroq, 2004
* http://www-igm.univ-mlv.fr/~lecroq/string/string.pdf
@@ -52,37 +52,56 @@ struct ts_bm
u8 * pattern;
unsigned int patlen;
unsigned int bad_shift[ASIZE];
- unsigned int good_shift[0];
+ unsigned int good_shift[];
};
+static unsigned int matchpat(const u8 *pattern, unsigned int patlen,
+ const u8 *text, bool icase)
+{
+ unsigned int i;
+
+ for (i = 0; i < patlen; i++) {
+ u8 t = *(text-i);
+
+ if (icase)
+ t = toupper(t);
+
+ if (t != *(pattern-i))
+ break;
+ }
+
+ return i;
+}
+
static unsigned int bm_find(struct ts_config *conf, struct ts_state *state)
{
struct ts_bm *bm = ts_config_priv(conf);
unsigned int i, text_len, consumed = state->offset;
const u8 *text;
- int shift = bm->patlen - 1, bs;
+ int bs;
const u8 icase = conf->flags & TS_IGNORECASE;
for (;;) {
+ int shift = bm->patlen - 1;
+
text_len = conf->get_next_block(consumed, &text, conf, state);
if (unlikely(text_len == 0))
break;
while (shift < text_len) {
- DEBUGP("Searching in position %d (%c)\n",
- shift, text[shift]);
- for (i = 0; i < bm->patlen; i++)
- if ((icase ? toupper(text[shift-i])
- : text[shift-i])
- != bm->pattern[bm->patlen-1-i])
- goto next;
-
- /* London calling... */
- DEBUGP("found!\n");
- return consumed += (shift-(bm->patlen-1));
-
-next: bs = bm->bad_shift[text[shift-i]];
+ DEBUGP("Searching in position %d (%c)\n",
+ shift, text[shift]);
+
+ i = matchpat(&bm->pattern[bm->patlen-1], bm->patlen,
+ &text[shift], icase);
+ if (i == bm->patlen) {
+ /* London calling... */
+ DEBUGP("found!\n");
+ return consumed + (shift-(bm->patlen-1));
+ }
+
+ bs = bm->bad_shift[text[shift-i]];
/* Now jumping to... */
shift = max_t(int, shift-i+bs, shift+bm->good_shift[i]);
diff --git a/lib/ts_fsm.c b/lib/ts_fsm.c
index 9c873cadab7c..64fd9015ad80 100644
--- a/lib/ts_fsm.c
+++ b/lib/ts_fsm.c
@@ -32,7 +32,7 @@
struct ts_fsm
{
unsigned int ntokens;
- struct ts_fsm_token tokens[0];
+ struct ts_fsm_token tokens[];
};
/* other values derived from ctype.h */
@@ -193,7 +193,7 @@ startover:
TOKEN_MISMATCH();
block_idx++;
- /* fall through */
+ fallthrough;
case TS_FSM_ANY:
if (next == NULL)
diff --git a/lib/ts_kmp.c b/lib/ts_kmp.c
index 94617e014b3a..c77a3d537f24 100644
--- a/lib/ts_kmp.c
+++ b/lib/ts_kmp.c
@@ -36,7 +36,7 @@ struct ts_kmp
{
u8 * pattern;
unsigned int pattern_len;
- unsigned int prefix_tbl[0];
+ unsigned int prefix_tbl[];
};
static unsigned int kmp_find(struct ts_config *conf, struct ts_state *state)
diff --git a/lib/ubsan.c b/lib/ubsan.c
index e7d31735950d..df4f8d1354bb 100644
--- a/lib/ubsan.c
+++ b/lib/ubsan.c
@@ -14,10 +14,77 @@
#include <linux/types.h>
#include <linux/sched.h>
#include <linux/uaccess.h>
+#include <linux/ubsan.h>
+#include <kunit/test-bug.h>
#include "ubsan.h"
-const char *type_check_kinds[] = {
+#ifdef CONFIG_UBSAN_TRAP
+/*
+ * Only include matches for UBSAN checks that are actually compiled in.
+ * The mappings of struct SanitizerKind (the -fsanitize=xxx args) to
+ * enum SanitizerHandler (the traps) in Clang is in clang/lib/CodeGen/.
+ */
+const char *report_ubsan_failure(struct pt_regs *regs, u32 check_type)
+{
+ switch (check_type) {
+#ifdef CONFIG_UBSAN_BOUNDS
+ /*
+ * SanitizerKind::ArrayBounds and SanitizerKind::LocalBounds
+ * emit SanitizerHandler::OutOfBounds.
+ */
+ case ubsan_out_of_bounds:
+ return "UBSAN: array index out of bounds";
+#endif
+#ifdef CONFIG_UBSAN_SHIFT
+ /*
+ * SanitizerKind::ShiftBase and SanitizerKind::ShiftExponent
+ * emit SanitizerHandler::ShiftOutOfBounds.
+ */
+ case ubsan_shift_out_of_bounds:
+ return "UBSAN: shift out of bounds";
+#endif
+#ifdef CONFIG_UBSAN_DIV_ZERO
+ /*
+ * SanitizerKind::IntegerDivideByZero emits
+ * SanitizerHandler::DivremOverflow.
+ */
+ case ubsan_divrem_overflow:
+ return "UBSAN: divide/remainder overflow";
+#endif
+#ifdef CONFIG_UBSAN_UNREACHABLE
+ /*
+ * SanitizerKind::Unreachable emits
+ * SanitizerHandler::BuiltinUnreachable.
+ */
+ case ubsan_builtin_unreachable:
+ return "UBSAN: unreachable code";
+#endif
+#if defined(CONFIG_UBSAN_BOOL) || defined(CONFIG_UBSAN_ENUM)
+ /*
+ * SanitizerKind::Bool and SanitizerKind::Enum emit
+ * SanitizerHandler::LoadInvalidValue.
+ */
+ case ubsan_load_invalid_value:
+ return "UBSAN: loading invalid value";
+#endif
+#ifdef CONFIG_UBSAN_ALIGNMENT
+ /*
+ * SanitizerKind::Alignment emits SanitizerHandler::TypeMismatch
+ * or SanitizerHandler::AlignmentAssumption.
+ */
+ case ubsan_alignment_assumption:
+ return "UBSAN: alignment assumption";
+ case ubsan_type_mismatch:
+ return "UBSAN: type mismatch";
+#endif
+ default:
+ return "UBSAN: unrecognized failure code";
+ }
+}
+
+#else
+static const char * const type_check_kinds[] = {
"load of",
"store to",
"reference binding to",
@@ -45,13 +112,6 @@ static bool was_reported(struct source_location *location)
return test_and_set_bit(REPORTED_BIT, &location->reported);
}
-static void print_source_location(const char *prefix,
- struct source_location *loc)
-{
- pr_err("%s %s:%d:%d\n", prefix, loc->file_name,
- loc->line & LINE_MASK, loc->column & COLUMN_MASK);
-}
-
static bool suppress_report(struct source_location *loc)
{
return current->in_ubsan || was_reported(loc);
@@ -119,7 +179,7 @@ static void val_to_string(char *str, size_t size, struct type_descriptor *type,
{
if (type_is_int(type)) {
if (type_bit_width(type) == 128) {
-#if defined(CONFIG_ARCH_SUPPORTS_INT128) && defined(__SIZEOF_INT128__)
+#if defined(CONFIG_ARCH_SUPPORTS_INT128)
u_max val = get_unsigned_val(type, value);
scnprintf(str, size, "0x%08x%08x%08x%08x",
@@ -140,108 +200,37 @@ static void val_to_string(char *str, size_t size, struct type_descriptor *type,
}
}
-static DEFINE_SPINLOCK(report_lock);
-
-static void ubsan_prologue(struct source_location *location,
- unsigned long *flags)
+static void ubsan_prologue(struct source_location *loc, const char *reason)
{
current->in_ubsan++;
- spin_lock_irqsave(&report_lock, *flags);
- pr_err("========================================"
- "========================================\n");
- print_source_location("UBSAN: Undefined behaviour in", location);
-}
-
-static void ubsan_epilogue(unsigned long *flags)
-{
- dump_stack();
- pr_err("========================================"
- "========================================\n");
- spin_unlock_irqrestore(&report_lock, *flags);
- current->in_ubsan--;
-}
-
-static void handle_overflow(struct overflow_data *data, void *lhs,
- void *rhs, char op)
-{
-
- struct type_descriptor *type = data->type;
- unsigned long flags;
- char lhs_val_str[VALUE_LENGTH];
- char rhs_val_str[VALUE_LENGTH];
-
- if (suppress_report(&data->location))
- return;
-
- ubsan_prologue(&data->location, &flags);
+ pr_warn(CUT_HERE);
- val_to_string(lhs_val_str, sizeof(lhs_val_str), type, lhs);
- val_to_string(rhs_val_str, sizeof(rhs_val_str), type, rhs);
- pr_err("%s integer overflow:\n",
- type_is_signed(type) ? "signed" : "unsigned");
- pr_err("%s %c %s cannot be represented in type %s\n",
- lhs_val_str,
- op,
- rhs_val_str,
- type->type_name);
-
- ubsan_epilogue(&flags);
-}
-
-void __ubsan_handle_add_overflow(struct overflow_data *data,
- void *lhs, void *rhs)
-{
-
- handle_overflow(data, lhs, rhs, '+');
-}
-EXPORT_SYMBOL(__ubsan_handle_add_overflow);
-
-void __ubsan_handle_sub_overflow(struct overflow_data *data,
- void *lhs, void *rhs)
-{
- handle_overflow(data, lhs, rhs, '-');
-}
-EXPORT_SYMBOL(__ubsan_handle_sub_overflow);
+ pr_err("UBSAN: %s in %s:%d:%d\n", reason, loc->file_name,
+ loc->line & LINE_MASK, loc->column & COLUMN_MASK);
-void __ubsan_handle_mul_overflow(struct overflow_data *data,
- void *lhs, void *rhs)
-{
- handle_overflow(data, lhs, rhs, '*');
+ kunit_fail_current_test("%s in %s", reason, loc->file_name);
}
-EXPORT_SYMBOL(__ubsan_handle_mul_overflow);
-void __ubsan_handle_negate_overflow(struct overflow_data *data,
- void *old_val)
+static void ubsan_epilogue(void)
{
- unsigned long flags;
- char old_val_str[VALUE_LENGTH];
-
- if (suppress_report(&data->location))
- return;
-
- ubsan_prologue(&data->location, &flags);
-
- val_to_string(old_val_str, sizeof(old_val_str), data->type, old_val);
+ dump_stack();
+ pr_warn("---[ end trace ]---\n");
- pr_err("negation of %s cannot be represented in type %s:\n",
- old_val_str, data->type->type_name);
+ current->in_ubsan--;
- ubsan_epilogue(&flags);
+ check_panic_on_warn("UBSAN");
}
-EXPORT_SYMBOL(__ubsan_handle_negate_overflow);
-
-void __ubsan_handle_divrem_overflow(struct overflow_data *data,
- void *lhs, void *rhs)
+void __ubsan_handle_divrem_overflow(void *_data, void *lhs, void *rhs)
{
- unsigned long flags;
+ struct overflow_data *data = _data;
char rhs_val_str[VALUE_LENGTH];
if (suppress_report(&data->location))
return;
- ubsan_prologue(&data->location, &flags);
+ ubsan_prologue(&data->location, "division-overflow");
val_to_string(rhs_val_str, sizeof(rhs_val_str), data->type, rhs);
@@ -251,58 +240,52 @@ void __ubsan_handle_divrem_overflow(struct overflow_data *data,
else
pr_err("division by zero\n");
- ubsan_epilogue(&flags);
+ ubsan_epilogue();
}
EXPORT_SYMBOL(__ubsan_handle_divrem_overflow);
static void handle_null_ptr_deref(struct type_mismatch_data_common *data)
{
- unsigned long flags;
-
if (suppress_report(data->location))
return;
- ubsan_prologue(data->location, &flags);
+ ubsan_prologue(data->location, "null-ptr-deref");
pr_err("%s null pointer of type %s\n",
type_check_kinds[data->type_check_kind],
data->type->type_name);
- ubsan_epilogue(&flags);
+ ubsan_epilogue();
}
static void handle_misaligned_access(struct type_mismatch_data_common *data,
unsigned long ptr)
{
- unsigned long flags;
-
if (suppress_report(data->location))
return;
- ubsan_prologue(data->location, &flags);
+ ubsan_prologue(data->location, "misaligned-access");
pr_err("%s misaligned address %p for type %s\n",
type_check_kinds[data->type_check_kind],
(void *)ptr, data->type->type_name);
pr_err("which requires %ld byte alignment\n", data->alignment);
- ubsan_epilogue(&flags);
+ ubsan_epilogue();
}
static void handle_object_size_mismatch(struct type_mismatch_data_common *data,
unsigned long ptr)
{
- unsigned long flags;
-
if (suppress_report(data->location))
return;
- ubsan_prologue(data->location, &flags);
+ ubsan_prologue(data->location, "object-size-mismatch");
pr_err("%s address %p with insufficient space\n",
type_check_kinds[data->type_check_kind],
(void *) ptr);
pr_err("for an object of type %s\n", data->type->type_name);
- ubsan_epilogue(&flags);
+ ubsan_epilogue();
}
static void ubsan_type_mismatch_common(struct type_mismatch_data_common *data,
@@ -334,10 +317,9 @@ void __ubsan_handle_type_mismatch(struct type_mismatch_data *data,
}
EXPORT_SYMBOL(__ubsan_handle_type_mismatch);
-void __ubsan_handle_type_mismatch_v1(struct type_mismatch_data_v1 *data,
- void *ptr)
+void __ubsan_handle_type_mismatch_v1(void *_data, void *ptr)
{
-
+ struct type_mismatch_data_v1 *data = _data;
struct type_mismatch_data_common common_data = {
.location = &data->location,
.type = data->type,
@@ -349,36 +331,36 @@ void __ubsan_handle_type_mismatch_v1(struct type_mismatch_data_v1 *data,
}
EXPORT_SYMBOL(__ubsan_handle_type_mismatch_v1);
-void __ubsan_handle_out_of_bounds(struct out_of_bounds_data *data, void *index)
+void __ubsan_handle_out_of_bounds(void *_data, void *index)
{
- unsigned long flags;
+ struct out_of_bounds_data *data = _data;
char index_str[VALUE_LENGTH];
if (suppress_report(&data->location))
return;
- ubsan_prologue(&data->location, &flags);
+ ubsan_prologue(&data->location, "array-index-out-of-bounds");
val_to_string(index_str, sizeof(index_str), data->index_type, index);
pr_err("index %s is out of range for type %s\n", index_str,
data->array_type->type_name);
- ubsan_epilogue(&flags);
+ ubsan_epilogue();
}
EXPORT_SYMBOL(__ubsan_handle_out_of_bounds);
-void __ubsan_handle_shift_out_of_bounds(struct shift_out_of_bounds_data *data,
- void *lhs, void *rhs)
+void __ubsan_handle_shift_out_of_bounds(void *_data, void *lhs, void *rhs)
{
- unsigned long flags;
+ struct shift_out_of_bounds_data *data = _data;
struct type_descriptor *rhs_type = data->rhs_type;
struct type_descriptor *lhs_type = data->lhs_type;
char rhs_str[VALUE_LENGTH];
char lhs_str[VALUE_LENGTH];
+ unsigned long ua_flags = user_access_save();
if (suppress_report(&data->location))
- return;
+ goto out;
- ubsan_prologue(&data->location, &flags);
+ ubsan_prologue(&data->location, "shift-out-of-bounds");
val_to_string(rhs_str, sizeof(rhs_str), rhs_type, rhs);
val_to_string(lhs_str, sizeof(lhs_str), lhs_type, lhs);
@@ -401,38 +383,71 @@ void __ubsan_handle_shift_out_of_bounds(struct shift_out_of_bounds_data *data,
lhs_str, rhs_str,
lhs_type->type_name);
- ubsan_epilogue(&flags);
+ ubsan_epilogue();
+out:
+ user_access_restore(ua_flags);
}
EXPORT_SYMBOL(__ubsan_handle_shift_out_of_bounds);
-void __ubsan_handle_builtin_unreachable(struct unreachable_data *data)
+void __ubsan_handle_builtin_unreachable(void *_data)
{
- unsigned long flags;
-
- ubsan_prologue(&data->location, &flags);
+ struct unreachable_data *data = _data;
+ ubsan_prologue(&data->location, "unreachable");
pr_err("calling __builtin_unreachable()\n");
- ubsan_epilogue(&flags);
+ ubsan_epilogue();
panic("can't return from __builtin_unreachable()");
}
EXPORT_SYMBOL(__ubsan_handle_builtin_unreachable);
-void __ubsan_handle_load_invalid_value(struct invalid_value_data *data,
- void *val)
+void __ubsan_handle_load_invalid_value(void *_data, void *val)
{
- unsigned long flags;
+ struct invalid_value_data *data = _data;
char val_str[VALUE_LENGTH];
+ unsigned long ua_flags = user_access_save();
if (suppress_report(&data->location))
- return;
+ goto out;
- ubsan_prologue(&data->location, &flags);
+ ubsan_prologue(&data->location, "invalid-load");
val_to_string(val_str, sizeof(val_str), data->type, val);
pr_err("load of value %s is not a valid value for type %s\n",
val_str, data->type->type_name);
- ubsan_epilogue(&flags);
+ ubsan_epilogue();
+out:
+ user_access_restore(ua_flags);
}
EXPORT_SYMBOL(__ubsan_handle_load_invalid_value);
+
+void __ubsan_handle_alignment_assumption(void *_data, unsigned long ptr,
+ unsigned long align,
+ unsigned long offset)
+{
+ struct alignment_assumption_data *data = _data;
+ unsigned long real_ptr;
+
+ if (suppress_report(&data->location))
+ return;
+
+ ubsan_prologue(&data->location, "alignment-assumption");
+
+ if (offset)
+ pr_err("assumption of %lu byte alignment (with offset of %lu byte) for pointer of type %s failed",
+ align, offset, data->type->type_name);
+ else
+ pr_err("assumption of %lu byte alignment for pointer of type %s failed",
+ align, data->type->type_name);
+
+ real_ptr = ptr - offset;
+ pr_err("%saddress is %lu aligned, misalignment offset is %lu bytes",
+ offset ? "offset " : "", BIT(real_ptr ? __ffs(real_ptr) : 0),
+ real_ptr & (align - 1));
+
+ ubsan_epilogue();
+}
+EXPORT_SYMBOL(__ubsan_handle_alignment_assumption);
+
+#endif /* !CONFIG_UBSAN_TRAP */
diff --git a/lib/ubsan.h b/lib/ubsan.h
index b8fa83864467..5d99ab81913b 100644
--- a/lib/ubsan.h
+++ b/lib/ubsan.h
@@ -2,6 +2,38 @@
#ifndef _LIB_UBSAN_H
#define _LIB_UBSAN_H
+/*
+ * ABI defined by Clang's UBSAN enum SanitizerHandler:
+ * https://github.com/llvm/llvm-project/blob/release/16.x/clang/lib/CodeGen/CodeGenFunction.h#L113
+ */
+enum ubsan_checks {
+ ubsan_add_overflow,
+ ubsan_builtin_unreachable,
+ ubsan_cfi_check_fail,
+ ubsan_divrem_overflow,
+ ubsan_dynamic_type_cache_miss,
+ ubsan_float_cast_overflow,
+ ubsan_function_type_mismatch,
+ ubsan_implicit_conversion,
+ ubsan_invalid_builtin,
+ ubsan_invalid_objc_cast,
+ ubsan_load_invalid_value,
+ ubsan_missing_return,
+ ubsan_mul_overflow,
+ ubsan_negate_overflow,
+ ubsan_nullability_arg,
+ ubsan_nullability_return,
+ ubsan_nonnull_arg,
+ ubsan_nonnull_return,
+ ubsan_out_of_bounds,
+ ubsan_pointer_overflow,
+ ubsan_shift_out_of_bounds,
+ ubsan_sub_overflow,
+ ubsan_type_mismatch,
+ ubsan_alignment_assumption,
+ ubsan_vla_bound_not_positive,
+};
+
enum {
type_kind_int = 0,
type_kind_float = 1,
@@ -78,7 +110,13 @@ struct invalid_value_data {
struct type_descriptor *type;
};
-#if defined(CONFIG_ARCH_SUPPORTS_INT128) && defined(__SIZEOF_INT128__)
+struct alignment_assumption_data {
+ struct source_location location;
+ struct source_location assumption_location;
+ struct type_descriptor *type;
+};
+
+#if defined(CONFIG_ARCH_SUPPORTS_INT128)
typedef __int128 s_max;
typedef unsigned __int128 u_max;
#else
@@ -86,4 +124,15 @@ typedef s64 s_max;
typedef u64 u_max;
#endif
+void __ubsan_handle_divrem_overflow(void *_data, void *lhs, void *rhs);
+void __ubsan_handle_type_mismatch(struct type_mismatch_data *data, void *ptr);
+void __ubsan_handle_type_mismatch_v1(void *_data, void *ptr);
+void __ubsan_handle_out_of_bounds(void *_data, void *index);
+void __ubsan_handle_shift_out_of_bounds(void *_data, void *lhs, void *rhs);
+void __ubsan_handle_builtin_unreachable(void *_data);
+void __ubsan_handle_load_invalid_value(void *_data, void *val);
+void __ubsan_handle_alignment_assumption(void *_data, unsigned long ptr,
+ unsigned long align,
+ unsigned long offset);
+
#endif
diff --git a/lib/ucs2_string.c b/lib/ucs2_string.c
index 0a559a42359b..9308bcfb2ad5 100644
--- a/lib/ucs2_string.c
+++ b/lib/ucs2_string.c
@@ -32,6 +32,58 @@ ucs2_strsize(const ucs2_char_t *data, unsigned long maxlength)
}
EXPORT_SYMBOL(ucs2_strsize);
+/**
+ * ucs2_strscpy() - Copy a UCS2 string into a sized buffer.
+ *
+ * @dst: Pointer to the destination buffer where to copy the string to.
+ * @src: Pointer to the source buffer where to copy the string from.
+ * @count: Size of the destination buffer, in UCS2 (16-bit) characters.
+ *
+ * Like strscpy(), only for UCS2 strings.
+ *
+ * Copy the source string @src, or as much of it as fits, into the destination
+ * buffer @dst. The behavior is undefined if the string buffers overlap. The
+ * destination buffer @dst is always NUL-terminated, unless it's zero-sized.
+ *
+ * Return: The number of characters copied into @dst (excluding the trailing
+ * %NUL terminator) or -E2BIG if @count is 0 or @src was truncated due to the
+ * destination buffer being too small.
+ */
+ssize_t ucs2_strscpy(ucs2_char_t *dst, const ucs2_char_t *src, size_t count)
+{
+ long res;
+
+ /*
+ * Ensure that we have a valid amount of space. We need to store at
+ * least one NUL-character.
+ */
+ if (count == 0 || WARN_ON_ONCE(count > INT_MAX / sizeof(*dst)))
+ return -E2BIG;
+
+ /*
+ * Copy at most 'count' characters, return early if we find a
+ * NUL-terminator.
+ */
+ for (res = 0; res < count; res++) {
+ ucs2_char_t c;
+
+ c = src[res];
+ dst[res] = c;
+
+ if (!c)
+ return res;
+ }
+
+ /*
+ * The loop above terminated without finding a NUL-terminator,
+ * exceeding the 'count': Enforce proper NUL-termination and return
+ * error.
+ */
+ dst[count - 1] = 0;
+ return -E2BIG;
+}
+EXPORT_SYMBOL(ucs2_strscpy);
+
int
ucs2_strncmp(const ucs2_char_t *a, const ucs2_char_t *b, size_t len)
{
diff --git a/lib/usercopy.c b/lib/usercopy.c
index c2bfbcaeb3dc..d29fe29c6849 100644
--- a/lib/usercopy.c
+++ b/lib/usercopy.c
@@ -1,5 +1,9 @@
// SPDX-License-Identifier: GPL-2.0
+#include <linux/bitops.h>
+#include <linux/fault-inject-usercopy.h>
+#include <linux/instrumented.h>
#include <linux/uaccess.h>
+#include <linux/nospec.h>
/* out-of-line parts */
@@ -8,9 +12,16 @@ unsigned long _copy_from_user(void *to, const void __user *from, unsigned long n
{
unsigned long res = n;
might_fault();
- if (likely(access_ok(from, n))) {
- kasan_check_write(to, n);
+ if (!should_fail_usercopy() && likely(access_ok(from, n))) {
+ /*
+ * Ensure that bad access_ok() speculation will not
+ * lead to nasty side effects *after* the copy is
+ * finished:
+ */
+ barrier_nospec();
+ instrument_copy_from_user_before(to, from, n);
res = raw_copy_from_user(to, from, n);
+ instrument_copy_from_user_after(to, from, n, res);
}
if (unlikely(res))
memset(to + (n - res), 0, res);
@@ -23,11 +34,67 @@ EXPORT_SYMBOL(_copy_from_user);
unsigned long _copy_to_user(void __user *to, const void *from, unsigned long n)
{
might_fault();
+ if (should_fail_usercopy())
+ return n;
if (likely(access_ok(to, n))) {
- kasan_check_read(from, n);
+ instrument_copy_to_user(to, from, n);
n = raw_copy_to_user(to, from, n);
}
return n;
}
EXPORT_SYMBOL(_copy_to_user);
#endif
+
+/**
+ * check_zeroed_user: check if a userspace buffer only contains zero bytes
+ * @from: Source address, in userspace.
+ * @size: Size of buffer.
+ *
+ * This is effectively shorthand for "memchr_inv(from, 0, size) == NULL" for
+ * userspace addresses (and is more efficient because we don't care where the
+ * first non-zero byte is).
+ *
+ * Returns:
+ * * 0: There were non-zero bytes present in the buffer.
+ * * 1: The buffer was full of zero bytes.
+ * * -EFAULT: access to userspace failed.
+ */
+int check_zeroed_user(const void __user *from, size_t size)
+{
+ unsigned long val;
+ uintptr_t align = (uintptr_t) from % sizeof(unsigned long);
+
+ if (unlikely(size == 0))
+ return 1;
+
+ from -= align;
+ size += align;
+
+ if (!user_read_access_begin(from, size))
+ return -EFAULT;
+
+ unsafe_get_user(val, (unsigned long __user *) from, err_fault);
+ if (align)
+ val &= ~aligned_byte_mask(align);
+
+ while (size > sizeof(unsigned long)) {
+ if (unlikely(val))
+ goto done;
+
+ from += sizeof(unsigned long);
+ size -= sizeof(unsigned long);
+
+ unsafe_get_user(val, (unsigned long __user *) from, err_fault);
+ }
+
+ if (size < sizeof(unsigned long))
+ val &= aligned_byte_mask(size);
+
+done:
+ user_read_access_end();
+ return (val == 0);
+err_fault:
+ user_read_access_end();
+ return -EFAULT;
+}
+EXPORT_SYMBOL(check_zeroed_user);
diff --git a/lib/uuid.c b/lib/uuid.c
index b6a1edb61d87..e309b4c5be3d 100644
--- a/lib/uuid.c
+++ b/lib/uuid.c
@@ -40,9 +40,19 @@ void generate_random_uuid(unsigned char uuid[16])
}
EXPORT_SYMBOL(generate_random_uuid);
+void generate_random_guid(unsigned char guid[16])
+{
+ get_random_bytes(guid, 16);
+ /* Set GUID version to 4 --- truly random generation */
+ guid[7] = (guid[7] & 0x0F) | 0x40;
+ /* Set the GUID variant to DCE */
+ guid[8] = (guid[8] & 0x3F) | 0x80;
+}
+EXPORT_SYMBOL(generate_random_guid);
+
static void __uuid_gen_common(__u8 b[16])
{
- prandom_bytes(b, 16);
+ get_random_bytes(b, 16);
/* reversion 0b10 */
b[8] = (b[8] & 0x3F) | 0x80;
}
diff --git a/lib/vdso/Kconfig b/lib/vdso/Kconfig
new file mode 100644
index 000000000000..d883ac299508
--- /dev/null
+++ b/lib/vdso/Kconfig
@@ -0,0 +1,33 @@
+# SPDX-License-Identifier: GPL-2.0
+
+config HAVE_GENERIC_VDSO
+ bool
+
+if HAVE_GENERIC_VDSO
+
+config GENERIC_GETTIMEOFDAY
+ bool
+ help
+ This is a generic implementation of gettimeofday vdso.
+ Each architecture that enables this feature has to
+ provide the fallback implementation.
+
+config GENERIC_VDSO_32
+ bool
+ depends on GENERIC_GETTIMEOFDAY && !64BIT
+ help
+ This config option helps to avoid possible performance issues
+ in 32 bit only architectures.
+
+config GENERIC_COMPAT_VDSO
+ bool
+ help
+ This config option enables the compat VDSO layer.
+
+config GENERIC_VDSO_TIME_NS
+ bool
+ help
+ Selected by architectures which support time namespaces in the
+ VDSO
+
+endif
diff --git a/lib/vdso/Makefile b/lib/vdso/Makefile
new file mode 100644
index 000000000000..9f031eafc465
--- /dev/null
+++ b/lib/vdso/Makefile
@@ -0,0 +1,17 @@
+# SPDX-License-Identifier: GPL-2.0
+
+GENERIC_VDSO_MK_PATH := $(abspath $(lastword $(MAKEFILE_LIST)))
+GENERIC_VDSO_DIR := $(dir $(GENERIC_VDSO_MK_PATH))
+
+c-gettimeofday-$(CONFIG_GENERIC_GETTIMEOFDAY) := $(addprefix $(GENERIC_VDSO_DIR), gettimeofday.c)
+
+# This cmd checks that the vdso library does not contain dynamic relocations.
+# It has to be called after the linking of the vdso library and requires it
+# as a parameter.
+#
+# As a workaround for some GNU ld ports which produce unneeded R_*_NONE
+# dynamic relocations, ignore R_*_NONE.
+quiet_cmd_vdso_check = VDSOCHK $@
+ cmd_vdso_check = if $(READELF) -rW $@ | grep -v _NONE | grep -q " R_\w*_"; \
+ then (echo >&2 "$@: dynamic relocations are not supported"; \
+ rm -f $@; /bin/false); fi
diff --git a/lib/vdso/gettimeofday.c b/lib/vdso/gettimeofday.c
new file mode 100644
index 000000000000..ce2f69552003
--- /dev/null
+++ b/lib/vdso/gettimeofday.c
@@ -0,0 +1,441 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Generic userspace implementations of gettimeofday() and similar.
+ */
+#include <vdso/datapage.h>
+#include <vdso/helpers.h>
+
+#ifndef vdso_calc_delta
+/*
+ * Default implementation which works for all sane clocksources. That
+ * obviously excludes x86/TSC.
+ */
+static __always_inline
+u64 vdso_calc_delta(u64 cycles, u64 last, u64 mask, u32 mult)
+{
+ return ((cycles - last) & mask) * mult;
+}
+#endif
+
+#ifndef vdso_shift_ns
+static __always_inline u64 vdso_shift_ns(u64 ns, u32 shift)
+{
+ return ns >> shift;
+}
+#endif
+
+#ifndef __arch_vdso_hres_capable
+static inline bool __arch_vdso_hres_capable(void)
+{
+ return true;
+}
+#endif
+
+#ifndef vdso_clocksource_ok
+static inline bool vdso_clocksource_ok(const struct vdso_data *vd)
+{
+ return vd->clock_mode != VDSO_CLOCKMODE_NONE;
+}
+#endif
+
+#ifndef vdso_cycles_ok
+static inline bool vdso_cycles_ok(u64 cycles)
+{
+ return true;
+}
+#endif
+
+#ifdef CONFIG_TIME_NS
+static __always_inline int do_hres_timens(const struct vdso_data *vdns, clockid_t clk,
+ struct __kernel_timespec *ts)
+{
+ const struct vdso_data *vd;
+ const struct timens_offset *offs = &vdns->offset[clk];
+ const struct vdso_timestamp *vdso_ts;
+ u64 cycles, last, ns;
+ u32 seq;
+ s64 sec;
+
+ vd = vdns - (clk == CLOCK_MONOTONIC_RAW ? CS_RAW : CS_HRES_COARSE);
+ vd = __arch_get_timens_vdso_data(vd);
+ if (clk != CLOCK_MONOTONIC_RAW)
+ vd = &vd[CS_HRES_COARSE];
+ else
+ vd = &vd[CS_RAW];
+ vdso_ts = &vd->basetime[clk];
+
+ do {
+ seq = vdso_read_begin(vd);
+
+ if (unlikely(!vdso_clocksource_ok(vd)))
+ return -1;
+
+ cycles = __arch_get_hw_counter(vd->clock_mode, vd);
+ if (unlikely(!vdso_cycles_ok(cycles)))
+ return -1;
+ ns = vdso_ts->nsec;
+ last = vd->cycle_last;
+ ns += vdso_calc_delta(cycles, last, vd->mask, vd->mult);
+ ns = vdso_shift_ns(ns, vd->shift);
+ sec = vdso_ts->sec;
+ } while (unlikely(vdso_read_retry(vd, seq)));
+
+ /* Add the namespace offset */
+ sec += offs->sec;
+ ns += offs->nsec;
+
+ /*
+ * Do this outside the loop: a race inside the loop could result
+ * in __iter_div_u64_rem() being extremely slow.
+ */
+ ts->tv_sec = sec + __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns);
+ ts->tv_nsec = ns;
+
+ return 0;
+}
+#else
+static __always_inline
+const struct vdso_data *__arch_get_timens_vdso_data(const struct vdso_data *vd)
+{
+ return NULL;
+}
+
+static __always_inline int do_hres_timens(const struct vdso_data *vdns, clockid_t clk,
+ struct __kernel_timespec *ts)
+{
+ return -EINVAL;
+}
+#endif
+
+static __always_inline int do_hres(const struct vdso_data *vd, clockid_t clk,
+ struct __kernel_timespec *ts)
+{
+ const struct vdso_timestamp *vdso_ts = &vd->basetime[clk];
+ u64 cycles, last, sec, ns;
+ u32 seq;
+
+ /* Allows to compile the high resolution parts out */
+ if (!__arch_vdso_hres_capable())
+ return -1;
+
+ do {
+ /*
+ * Open coded to handle VDSO_CLOCKMODE_TIMENS. Time namespace
+ * enabled tasks have a special VVAR page installed which
+ * has vd->seq set to 1 and vd->clock_mode set to
+ * VDSO_CLOCKMODE_TIMENS. For non time namespace affected tasks
+ * this does not affect performance because if vd->seq is
+ * odd, i.e. a concurrent update is in progress the extra
+ * check for vd->clock_mode is just a few extra
+ * instructions while spin waiting for vd->seq to become
+ * even again.
+ */
+ while (unlikely((seq = READ_ONCE(vd->seq)) & 1)) {
+ if (IS_ENABLED(CONFIG_TIME_NS) &&
+ vd->clock_mode == VDSO_CLOCKMODE_TIMENS)
+ return do_hres_timens(vd, clk, ts);
+ cpu_relax();
+ }
+ smp_rmb();
+
+ if (unlikely(!vdso_clocksource_ok(vd)))
+ return -1;
+
+ cycles = __arch_get_hw_counter(vd->clock_mode, vd);
+ if (unlikely(!vdso_cycles_ok(cycles)))
+ return -1;
+ ns = vdso_ts->nsec;
+ last = vd->cycle_last;
+ ns += vdso_calc_delta(cycles, last, vd->mask, vd->mult);
+ ns = vdso_shift_ns(ns, vd->shift);
+ sec = vdso_ts->sec;
+ } while (unlikely(vdso_read_retry(vd, seq)));
+
+ /*
+ * Do this outside the loop: a race inside the loop could result
+ * in __iter_div_u64_rem() being extremely slow.
+ */
+ ts->tv_sec = sec + __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns);
+ ts->tv_nsec = ns;
+
+ return 0;
+}
+
+#ifdef CONFIG_TIME_NS
+static __always_inline int do_coarse_timens(const struct vdso_data *vdns, clockid_t clk,
+ struct __kernel_timespec *ts)
+{
+ const struct vdso_data *vd = __arch_get_timens_vdso_data(vdns);
+ const struct vdso_timestamp *vdso_ts = &vd->basetime[clk];
+ const struct timens_offset *offs = &vdns->offset[clk];
+ u64 nsec;
+ s64 sec;
+ s32 seq;
+
+ do {
+ seq = vdso_read_begin(vd);
+ sec = vdso_ts->sec;
+ nsec = vdso_ts->nsec;
+ } while (unlikely(vdso_read_retry(vd, seq)));
+
+ /* Add the namespace offset */
+ sec += offs->sec;
+ nsec += offs->nsec;
+
+ /*
+ * Do this outside the loop: a race inside the loop could result
+ * in __iter_div_u64_rem() being extremely slow.
+ */
+ ts->tv_sec = sec + __iter_div_u64_rem(nsec, NSEC_PER_SEC, &nsec);
+ ts->tv_nsec = nsec;
+ return 0;
+}
+#else
+static __always_inline int do_coarse_timens(const struct vdso_data *vdns, clockid_t clk,
+ struct __kernel_timespec *ts)
+{
+ return -1;
+}
+#endif
+
+static __always_inline int do_coarse(const struct vdso_data *vd, clockid_t clk,
+ struct __kernel_timespec *ts)
+{
+ const struct vdso_timestamp *vdso_ts = &vd->basetime[clk];
+ u32 seq;
+
+ do {
+ /*
+ * Open coded to handle VDSO_CLOCK_TIMENS. See comment in
+ * do_hres().
+ */
+ while ((seq = READ_ONCE(vd->seq)) & 1) {
+ if (IS_ENABLED(CONFIG_TIME_NS) &&
+ vd->clock_mode == VDSO_CLOCKMODE_TIMENS)
+ return do_coarse_timens(vd, clk, ts);
+ cpu_relax();
+ }
+ smp_rmb();
+
+ ts->tv_sec = vdso_ts->sec;
+ ts->tv_nsec = vdso_ts->nsec;
+ } while (unlikely(vdso_read_retry(vd, seq)));
+
+ return 0;
+}
+
+static __always_inline int
+__cvdso_clock_gettime_common(const struct vdso_data *vd, clockid_t clock,
+ struct __kernel_timespec *ts)
+{
+ u32 msk;
+
+ /* Check for negative values or invalid clocks */
+ if (unlikely((u32) clock >= MAX_CLOCKS))
+ return -1;
+
+ /*
+ * Convert the clockid to a bitmask and use it to check which
+ * clocks are handled in the VDSO directly.
+ */
+ msk = 1U << clock;
+ if (likely(msk & VDSO_HRES))
+ vd = &vd[CS_HRES_COARSE];
+ else if (msk & VDSO_COARSE)
+ return do_coarse(&vd[CS_HRES_COARSE], clock, ts);
+ else if (msk & VDSO_RAW)
+ vd = &vd[CS_RAW];
+ else
+ return -1;
+
+ return do_hres(vd, clock, ts);
+}
+
+static __maybe_unused int
+__cvdso_clock_gettime_data(const struct vdso_data *vd, clockid_t clock,
+ struct __kernel_timespec *ts)
+{
+ int ret = __cvdso_clock_gettime_common(vd, clock, ts);
+
+ if (unlikely(ret))
+ return clock_gettime_fallback(clock, ts);
+ return 0;
+}
+
+static __maybe_unused int
+__cvdso_clock_gettime(clockid_t clock, struct __kernel_timespec *ts)
+{
+ return __cvdso_clock_gettime_data(__arch_get_vdso_data(), clock, ts);
+}
+
+#ifdef BUILD_VDSO32
+static __maybe_unused int
+__cvdso_clock_gettime32_data(const struct vdso_data *vd, clockid_t clock,
+ struct old_timespec32 *res)
+{
+ struct __kernel_timespec ts;
+ int ret;
+
+ ret = __cvdso_clock_gettime_common(vd, clock, &ts);
+
+ if (unlikely(ret))
+ return clock_gettime32_fallback(clock, res);
+
+ /* For ret == 0 */
+ res->tv_sec = ts.tv_sec;
+ res->tv_nsec = ts.tv_nsec;
+
+ return ret;
+}
+
+static __maybe_unused int
+__cvdso_clock_gettime32(clockid_t clock, struct old_timespec32 *res)
+{
+ return __cvdso_clock_gettime32_data(__arch_get_vdso_data(), clock, res);
+}
+#endif /* BUILD_VDSO32 */
+
+static __maybe_unused int
+__cvdso_gettimeofday_data(const struct vdso_data *vd,
+ struct __kernel_old_timeval *tv, struct timezone *tz)
+{
+
+ if (likely(tv != NULL)) {
+ struct __kernel_timespec ts;
+
+ if (do_hres(&vd[CS_HRES_COARSE], CLOCK_REALTIME, &ts))
+ return gettimeofday_fallback(tv, tz);
+
+ tv->tv_sec = ts.tv_sec;
+ tv->tv_usec = (u32)ts.tv_nsec / NSEC_PER_USEC;
+ }
+
+ if (unlikely(tz != NULL)) {
+ if (IS_ENABLED(CONFIG_TIME_NS) &&
+ vd->clock_mode == VDSO_CLOCKMODE_TIMENS)
+ vd = __arch_get_timens_vdso_data(vd);
+
+ tz->tz_minuteswest = vd[CS_HRES_COARSE].tz_minuteswest;
+ tz->tz_dsttime = vd[CS_HRES_COARSE].tz_dsttime;
+ }
+
+ return 0;
+}
+
+static __maybe_unused int
+__cvdso_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz)
+{
+ return __cvdso_gettimeofday_data(__arch_get_vdso_data(), tv, tz);
+}
+
+#ifdef VDSO_HAS_TIME
+static __maybe_unused __kernel_old_time_t
+__cvdso_time_data(const struct vdso_data *vd, __kernel_old_time_t *time)
+{
+ __kernel_old_time_t t;
+
+ if (IS_ENABLED(CONFIG_TIME_NS) &&
+ vd->clock_mode == VDSO_CLOCKMODE_TIMENS)
+ vd = __arch_get_timens_vdso_data(vd);
+
+ t = READ_ONCE(vd[CS_HRES_COARSE].basetime[CLOCK_REALTIME].sec);
+
+ if (time)
+ *time = t;
+
+ return t;
+}
+
+static __maybe_unused __kernel_old_time_t __cvdso_time(__kernel_old_time_t *time)
+{
+ return __cvdso_time_data(__arch_get_vdso_data(), time);
+}
+#endif /* VDSO_HAS_TIME */
+
+#ifdef VDSO_HAS_CLOCK_GETRES
+static __maybe_unused
+int __cvdso_clock_getres_common(const struct vdso_data *vd, clockid_t clock,
+ struct __kernel_timespec *res)
+{
+ u32 msk;
+ u64 ns;
+
+ /* Check for negative values or invalid clocks */
+ if (unlikely((u32) clock >= MAX_CLOCKS))
+ return -1;
+
+ if (IS_ENABLED(CONFIG_TIME_NS) &&
+ vd->clock_mode == VDSO_CLOCKMODE_TIMENS)
+ vd = __arch_get_timens_vdso_data(vd);
+
+ /*
+ * Convert the clockid to a bitmask and use it to check which
+ * clocks are handled in the VDSO directly.
+ */
+ msk = 1U << clock;
+ if (msk & (VDSO_HRES | VDSO_RAW)) {
+ /*
+ * Preserves the behaviour of posix_get_hrtimer_res().
+ */
+ ns = READ_ONCE(vd[CS_HRES_COARSE].hrtimer_res);
+ } else if (msk & VDSO_COARSE) {
+ /*
+ * Preserves the behaviour of posix_get_coarse_res().
+ */
+ ns = LOW_RES_NSEC;
+ } else {
+ return -1;
+ }
+
+ if (likely(res)) {
+ res->tv_sec = 0;
+ res->tv_nsec = ns;
+ }
+ return 0;
+}
+
+static __maybe_unused
+int __cvdso_clock_getres_data(const struct vdso_data *vd, clockid_t clock,
+ struct __kernel_timespec *res)
+{
+ int ret = __cvdso_clock_getres_common(vd, clock, res);
+
+ if (unlikely(ret))
+ return clock_getres_fallback(clock, res);
+ return 0;
+}
+
+static __maybe_unused
+int __cvdso_clock_getres(clockid_t clock, struct __kernel_timespec *res)
+{
+ return __cvdso_clock_getres_data(__arch_get_vdso_data(), clock, res);
+}
+
+#ifdef BUILD_VDSO32
+static __maybe_unused int
+__cvdso_clock_getres_time32_data(const struct vdso_data *vd, clockid_t clock,
+ struct old_timespec32 *res)
+{
+ struct __kernel_timespec ts;
+ int ret;
+
+ ret = __cvdso_clock_getres_common(vd, clock, &ts);
+
+ if (unlikely(ret))
+ return clock_getres32_fallback(clock, res);
+
+ if (likely(res)) {
+ res->tv_sec = ts.tv_sec;
+ res->tv_nsec = ts.tv_nsec;
+ }
+ return ret;
+}
+
+static __maybe_unused int
+__cvdso_clock_getres_time32(clockid_t clock, struct old_timespec32 *res)
+{
+ return __cvdso_clock_getres_time32_data(__arch_get_vdso_data(),
+ clock, res);
+}
+#endif /* BUILD_VDSO32 */
+#endif /* VDSO_HAS_CLOCK_GETRES */
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index 63937044c57d..552738f14275 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -17,10 +17,11 @@
* - scnprintf and vscnprintf
*/
-#include <stdarg.h>
+#include <linux/stdarg.h>
#include <linux/build_bug.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
+#include <linux/errname.h>
#include <linux/module.h> /* for KSYM_SYMBOL_LEN */
#include <linux/types.h>
#include <linux/string.h>
@@ -33,11 +34,15 @@
#include <linux/dcache.h>
#include <linux/cred.h>
#include <linux/rtc.h>
+#include <linux/sprintf.h>
+#include <linux/time.h>
#include <linux/uuid.h>
#include <linux/of.h>
#include <net/addrconf.h>
#include <linux/siphash.h>
#include <linux/compiler.h>
+#include <linux/property.h>
+#include <linux/notifier.h>
#ifdef CONFIG_BLOCK
#include <linux/blkdev.h>
#endif
@@ -46,32 +51,52 @@
#include <asm/page.h> /* for PAGE_SIZE */
#include <asm/byteorder.h> /* cpu_to_le16 */
+#include <asm/unaligned.h>
#include <linux/string_helpers.h>
#include "kstrtox.h"
+/* Disable pointer hashing if requested */
+bool no_hash_pointers __ro_after_init;
+EXPORT_SYMBOL_GPL(no_hash_pointers);
+
+noinline
+static unsigned long long simple_strntoull(const char *startp, char **endp, unsigned int base, size_t max_chars)
+{
+ const char *cp;
+ unsigned long long result = 0ULL;
+ size_t prefix_chars;
+ unsigned int rv;
+
+ cp = _parse_integer_fixup_radix(startp, &base);
+ prefix_chars = cp - startp;
+ if (prefix_chars < max_chars) {
+ rv = _parse_integer_limit(cp, base, &result, max_chars - prefix_chars);
+ /* FIXME */
+ cp += (rv & ~KSTRTOX_OVERFLOW);
+ } else {
+ /* Field too short for prefix + digit, skip over without converting */
+ cp = startp + max_chars;
+ }
+
+ if (endp)
+ *endp = (char *)cp;
+
+ return result;
+}
+
/**
* simple_strtoull - convert a string to an unsigned long long
* @cp: The start of the string
* @endp: A pointer to the end of the parsed string will be placed here
* @base: The number base to use
*
- * This function is obsolete. Please use kstrtoull instead.
+ * This function has caveats. Please use kstrtoull instead.
*/
+noinline
unsigned long long simple_strtoull(const char *cp, char **endp, unsigned int base)
{
- unsigned long long result;
- unsigned int rv;
-
- cp = _parse_integer_fixup_radix(cp, &base);
- rv = _parse_integer(cp, base, &result);
- /* FIXME */
- cp += (rv & ~KSTRTOX_OVERFLOW);
-
- if (endp)
- *endp = (char *)cp;
-
- return result;
+ return simple_strntoull(cp, endp, base, INT_MAX);
}
EXPORT_SYMBOL(simple_strtoull);
@@ -81,7 +106,7 @@ EXPORT_SYMBOL(simple_strtoull);
* @endp: A pointer to the end of the parsed string will be placed here
* @base: The number base to use
*
- * This function is obsolete. Please use kstrtoul instead.
+ * This function has caveats. Please use kstrtoul instead.
*/
unsigned long simple_strtoul(const char *cp, char **endp, unsigned int base)
{
@@ -95,7 +120,7 @@ EXPORT_SYMBOL(simple_strtoul);
* @endp: A pointer to the end of the parsed string will be placed here
* @base: The number base to use
*
- * This function is obsolete. Please use kstrtol instead.
+ * This function has caveats. Please use kstrtol instead.
*/
long simple_strtol(const char *cp, char **endp, unsigned int base)
{
@@ -106,20 +131,32 @@ long simple_strtol(const char *cp, char **endp, unsigned int base)
}
EXPORT_SYMBOL(simple_strtol);
+noinline
+static long long simple_strntoll(const char *cp, char **endp, unsigned int base, size_t max_chars)
+{
+ /*
+ * simple_strntoull() safely handles receiving max_chars==0 in the
+ * case cp[0] == '-' && max_chars == 1.
+ * If max_chars == 0 we can drop through and pass it to simple_strntoull()
+ * and the content of *cp is irrelevant.
+ */
+ if (*cp == '-' && max_chars > 0)
+ return -simple_strntoull(cp + 1, endp, base, max_chars - 1);
+
+ return simple_strntoull(cp, endp, base, max_chars);
+}
+
/**
* simple_strtoll - convert a string to a signed long long
* @cp: The start of the string
* @endp: A pointer to the end of the parsed string will be placed here
* @base: The number base to use
*
- * This function is obsolete. Please use kstrtoll instead.
+ * This function has caveats. Please use kstrtoll instead.
*/
long long simple_strtoll(const char *cp, char **endp, unsigned int base)
{
- if (*cp == '-')
- return -simple_strtoull(cp + 1, endp, base);
-
- return simple_strtoull(cp, endp, base);
+ return simple_strntoll(cp, endp, base, INT_MAX);
}
EXPORT_SYMBOL(simple_strtoll);
@@ -378,6 +415,10 @@ int num_to_str(char *buf, int size, unsigned long long num, unsigned int width)
#define SMALL 32 /* use lowercase in hex (must be 32 == 0x20) */
#define SPECIAL 64 /* prefix hex with "0x", octal with "0" */
+static_assert(SIGN == 1);
+static_assert(ZEROPAD == ('0' - ' '));
+static_assert(SMALL == ('a' ^ 'A'));
+
enum format_type {
FORMAT_TYPE_NONE, /* Just a string part */
FORMAT_TYPE_WIDTH,
@@ -504,7 +545,7 @@ char *number(char *buf, char *end, unsigned long long num,
/* zero or space padding */
if (!(spec.flags & LEFT)) {
char c = ' ' + (spec.flags & ZEROPAD);
- BUILD_BUG_ON(' ' + ZEROPAD != '0');
+
while (--field_width >= 0) {
if (buf < end)
*buf = c;
@@ -599,7 +640,7 @@ static char *string_nocheck(char *buf, char *end, const char *s,
struct printf_spec spec)
{
int len = 0;
- size_t lim = spec.precision;
+ int lim = spec.precision;
while (lim--) {
char c = *s++;
@@ -613,6 +654,25 @@ static char *string_nocheck(char *buf, char *end, const char *s,
return widen_string(buf, len, end, spec);
}
+static char *err_ptr(char *buf, char *end, void *ptr,
+ struct printf_spec spec)
+{
+ int err = PTR_ERR(ptr);
+ const char *sym = errname(err);
+
+ if (sym)
+ return string_nocheck(buf, end, sym, spec);
+
+ /*
+ * Somebody passed ERR_PTR(-1234) or some other non-existing
+ * Efoo - or perhaps CONFIG_SYMBOLIC_ERRNAME=n. Fall back to
+ * printing it as its decimal representation.
+ */
+ spec.flags |= SIGN;
+ spec.base = 10;
+ return number(buf, end, err, spec);
+}
+
/* Be careful: error messages must fit into the given buffer. */
static char *error_string(char *buf, char *end, const char *s,
struct printf_spec spec)
@@ -693,58 +753,70 @@ static int __init debug_boot_weak_hash_enable(char *str)
}
early_param("debug_boot_weak_hash", debug_boot_weak_hash_enable);
-static DEFINE_STATIC_KEY_TRUE(not_filled_random_ptr_key);
+static bool filled_random_ptr_key __read_mostly;
static siphash_key_t ptr_key __read_mostly;
-static void enable_ptr_key_workfn(struct work_struct *work)
+static int fill_ptr_key(struct notifier_block *nb, unsigned long action, void *data)
{
get_random_bytes(&ptr_key, sizeof(ptr_key));
- /* Needs to run from preemptible context */
- static_branch_disable(&not_filled_random_ptr_key);
-}
-static DECLARE_WORK(enable_ptr_key_work, enable_ptr_key_workfn);
+ /* Pairs with smp_rmb() before reading ptr_key. */
+ smp_wmb();
+ WRITE_ONCE(filled_random_ptr_key, true);
+ return NOTIFY_DONE;
+}
-static void fill_random_ptr_key(struct random_ready_callback *unused)
+static int __init vsprintf_init_hashval(void)
{
- /* This may be in an interrupt handler. */
- queue_work(system_unbound_wq, &enable_ptr_key_work);
+ static struct notifier_block fill_ptr_key_nb = { .notifier_call = fill_ptr_key };
+ execute_with_initialized_rng(&fill_ptr_key_nb);
+ return 0;
}
+subsys_initcall(vsprintf_init_hashval)
-static struct random_ready_callback random_ready = {
- .func = fill_random_ptr_key
-};
-
-static int __init initialize_ptr_random(void)
+/* Maps a pointer to a 32 bit unique identifier. */
+static inline int __ptr_to_hashval(const void *ptr, unsigned long *hashval_out)
{
- int key_size = sizeof(ptr_key);
- int ret;
+ unsigned long hashval;
- /* Use hw RNG if available. */
- if (get_random_bytes_arch(&ptr_key, key_size) == key_size) {
- static_branch_disable(&not_filled_random_ptr_key);
- return 0;
- }
+ if (!READ_ONCE(filled_random_ptr_key))
+ return -EBUSY;
- ret = add_random_ready_callback(&random_ready);
- if (!ret) {
- return 0;
- } else if (ret == -EALREADY) {
- /* This is in preemptible context */
- enable_ptr_key_workfn(&enable_ptr_key_work);
- return 0;
- }
+ /* Pairs with smp_wmb() after writing ptr_key. */
+ smp_rmb();
- return ret;
+#ifdef CONFIG_64BIT
+ hashval = (unsigned long)siphash_1u64((u64)ptr, &ptr_key);
+ /*
+ * Mask off the first 32 bits, this makes explicit that we have
+ * modified the address (and 32 bits is plenty for a unique ID).
+ */
+ hashval = hashval & 0xffffffff;
+#else
+ hashval = (unsigned long)siphash_1u32((u32)ptr, &ptr_key);
+#endif
+ *hashval_out = hashval;
+ return 0;
+}
+
+int ptr_to_hashval(const void *ptr, unsigned long *hashval_out)
+{
+ return __ptr_to_hashval(ptr, hashval_out);
}
-early_initcall(initialize_ptr_random);
-/* Maps a pointer to a 32 bit unique identifier. */
static char *ptr_to_id(char *buf, char *end, const void *ptr,
struct printf_spec spec)
{
const char *str = sizeof(ptr) == 8 ? "(____ptrval____)" : "(ptrval)";
unsigned long hashval;
+ int ret;
+
+ /*
+ * Print the real pointer value for NULL and error pointers,
+ * as they are not actual addresses.
+ */
+ if (IS_ERR_OR_NULL(ptr))
+ return pointer_string(buf, end, ptr, spec);
/* When debugging early boot use non-cryptographically secure hash. */
if (unlikely(debug_boot_weak_hash)) {
@@ -752,23 +824,27 @@ static char *ptr_to_id(char *buf, char *end, const void *ptr,
return pointer_string(buf, end, (const void *)hashval, spec);
}
- if (static_branch_unlikely(&not_filled_random_ptr_key)) {
+ ret = __ptr_to_hashval(ptr, &hashval);
+ if (ret) {
spec.field_width = 2 * sizeof(ptr);
/* string length must be less than default_width */
return error_string(buf, end, str, spec);
}
-#ifdef CONFIG_64BIT
- hashval = (unsigned long)siphash_1u64((u64)ptr, &ptr_key);
+ return pointer_string(buf, end, (const void *)hashval, spec);
+}
+
+static char *default_pointer(char *buf, char *end, const void *ptr,
+ struct printf_spec spec)
+{
/*
- * Mask off the first 32 bits, this makes explicit that we have
- * modified the address (and 32 bits is plenty for a unique ID).
+ * default is to _not_ leak addresses, so hash before printing,
+ * unless no_hash_pointers is specified on the command line.
*/
- hashval = hashval & 0xffffffff;
-#else
- hashval = (unsigned long)siphash_1u32((u32)ptr, &ptr_key);
-#endif
- return pointer_string(buf, end, (const void *)hashval, spec);
+ if (unlikely(no_hash_pointers))
+ return pointer_string(buf, end, ptr, spec);
+
+ return ptr_to_id(buf, end, ptr, spec);
}
int kptr_restrict __read_mostly;
@@ -780,7 +856,7 @@ char *restricted_pointer(char *buf, char *end, const void *ptr,
switch (kptr_restrict) {
case 0:
/* Handle as %p, hash and do _not_ leak addresses. */
- return ptr_to_id(buf, end, ptr, spec);
+ return default_pointer(buf, end, ptr, spec);
case 1: {
const struct cred *cred;
@@ -788,7 +864,7 @@ char *restricted_pointer(char *buf, char *end, const void *ptr,
* kptr_restrict==1 cannot be used in IRQ context
* because its test for CAP_SYSLOG would be meaningless.
*/
- if (in_irq() || in_serving_softirq() || in_nmi()) {
+ if (in_hardirq() || in_serving_softirq() || in_nmi()) {
if (spec.field_width == -1)
spec.field_width = 2 * sizeof(ptr);
return error_string(buf, end, "pK-error", spec);
@@ -869,6 +945,15 @@ char *dentry_name(char *buf, char *end, const struct dentry *d, struct printf_sp
return widen_string(buf, n, end, spec);
}
+static noinline_for_stack
+char *file_dentry_name(char *buf, char *end, const struct file *f,
+ struct printf_spec spec, const char *fmt)
+{
+ if (check_pointer(&buf, end, f, spec))
+ return buf;
+
+ return dentry_name(buf, end, f->f_path.dentry, spec, fmt);
+}
#ifdef CONFIG_BLOCK
static noinline_for_stack
char *bdev_name(char *buf, char *end, struct block_device *bdev,
@@ -881,13 +966,13 @@ char *bdev_name(char *buf, char *end, struct block_device *bdev,
hd = bdev->bd_disk;
buf = string(buf, end, hd->disk_name, spec);
- if (bdev->bd_part->partno) {
+ if (bdev->bd_partno) {
if (isdigit(hd->disk_name[strlen(hd->disk_name)-1])) {
if (buf < end)
*buf = 'p';
buf++;
}
- buf = number(buf, end, bdev->bd_part->partno, spec);
+ buf = number(buf, end, bdev->bd_partno, spec);
}
return buf;
}
@@ -907,9 +992,13 @@ char *symbol_string(char *buf, char *end, void *ptr,
value = (unsigned long)ptr;
#ifdef CONFIG_KALLSYMS
- if (*fmt == 'B')
+ if (*fmt == 'B' && fmt[1] == 'b')
+ sprint_backtrace_build_id(sym, value);
+ else if (*fmt == 'B')
sprint_backtrace(sym, value);
- else if (*fmt != 'f' && *fmt != 's')
+ else if (*fmt == 'S' && (fmt[1] == 'b' || (fmt[1] == 'R' && fmt[2] == 'b')))
+ sprint_symbol_build_id(sym, value);
+ else if (*fmt != 's')
sprint_symbol(sym, value);
else
sprint_symbol_no_offset(sym, value);
@@ -1103,7 +1192,7 @@ char *hex_string(char *buf, char *end, u8 *addr, struct printf_spec spec,
}
static noinline_for_stack
-char *bitmap_string(char *buf, char *end, unsigned long *bitmap,
+char *bitmap_string(char *buf, char *end, const unsigned long *bitmap,
struct printf_spec spec, const char *fmt)
{
const int CHUNKSZ = 32;
@@ -1147,24 +1236,17 @@ char *bitmap_string(char *buf, char *end, unsigned long *bitmap,
}
static noinline_for_stack
-char *bitmap_list_string(char *buf, char *end, unsigned long *bitmap,
+char *bitmap_list_string(char *buf, char *end, const unsigned long *bitmap,
struct printf_spec spec, const char *fmt)
{
int nr_bits = max_t(int, spec.field_width, 0);
- /* current bit is 'cur', most recently seen range is [rbot, rtop] */
- int cur, rbot, rtop;
bool first = true;
+ int rbot, rtop;
if (check_pointer(&buf, end, bitmap, spec))
return buf;
- rbot = cur = find_first_bit(bitmap, nr_bits);
- while (cur < nr_bits) {
- rtop = cur;
- cur = find_next_bit(bitmap, nr_bits, cur + 1);
- if (cur < nr_bits && cur <= rtop + 1)
- continue;
-
+ for_each_set_bitrange(rbot, rtop, bitmap, nr_bits) {
if (!first) {
if (buf < end)
*buf = ',';
@@ -1173,15 +1255,12 @@ char *bitmap_list_string(char *buf, char *end, unsigned long *bitmap,
first = false;
buf = number(buf, end, rbot, default_dec_spec);
- if (rbot < rtop) {
- if (buf < end)
- *buf = '-';
- buf++;
-
- buf = number(buf, end, rtop, default_dec_spec);
- }
+ if (rtop == rbot + 1)
+ continue;
- rbot = cur;
+ if (buf < end)
+ *buf = '-';
+ buf = number(++buf, end, rtop - 1, default_dec_spec);
}
return buf;
}
@@ -1206,7 +1285,7 @@ char *mac_address_string(char *buf, char *end, u8 *addr,
case 'R':
reversed = true;
- /* fall through */
+ fallthrough;
default:
separator = ':';
@@ -1622,7 +1701,8 @@ char *uuid_string(char *buf, char *end, const u8 *addr,
switch (*(++fmt)) {
case 'L':
- uc = true; /* fall-through */
+ uc = true;
+ fallthrough;
case 'l':
index = guid_index;
break;
@@ -1674,6 +1754,44 @@ char *netdev_bits(char *buf, char *end, const void *addr,
}
static noinline_for_stack
+char *fourcc_string(char *buf, char *end, const u32 *fourcc,
+ struct printf_spec spec, const char *fmt)
+{
+ char output[sizeof("0123 little-endian (0x01234567)")];
+ char *p = output;
+ unsigned int i;
+ u32 orig, val;
+
+ if (fmt[1] != 'c' || fmt[2] != 'c')
+ return error_string(buf, end, "(%p4?)", spec);
+
+ if (check_pointer(&buf, end, fourcc, spec))
+ return buf;
+
+ orig = get_unaligned(fourcc);
+ val = orig & ~BIT(31);
+
+ for (i = 0; i < sizeof(u32); i++) {
+ unsigned char c = val >> (i * 8);
+
+ /* Print non-control ASCII characters as-is, dot otherwise */
+ *p++ = isascii(c) && isprint(c) ? c : '.';
+ }
+
+ *p++ = ' ';
+ strcpy(p, orig & BIT(31) ? "big-endian" : "little-endian");
+ p += strlen(p);
+
+ *p++ = ' ';
+ *p++ = '(';
+ p = special_hex_number(p, output + sizeof(output) - 2, orig, sizeof(u32));
+ *p++ = ')';
+ *p = '\0';
+
+ return string(buf, end, output, spec);
+}
+
+static noinline_for_stack
char *address_val(char *buf, char *end, const void *addr,
struct printf_spec spec, const char *fmt)
{
@@ -1738,7 +1856,8 @@ char *rtc_str(char *buf, char *end, const struct rtc_time *tm,
struct printf_spec spec, const char *fmt)
{
bool have_t = true, have_d = true;
- bool raw = false;
+ bool raw = false, iso8601_separator = true;
+ bool found = true;
int count = 2;
if (check_pointer(&buf, end, tm, spec))
@@ -1755,14 +1874,25 @@ char *rtc_str(char *buf, char *end, const struct rtc_time *tm,
break;
}
- raw = fmt[count] == 'r';
+ do {
+ switch (fmt[count++]) {
+ case 'r':
+ raw = true;
+ break;
+ case 's':
+ iso8601_separator = false;
+ break;
+ default:
+ found = false;
+ break;
+ }
+ } while (found);
if (have_d)
buf = date_str(buf, end, tm, raw);
if (have_d && have_t) {
- /* Respect ISO 8601 */
if (buf < end)
- *buf = 'T';
+ *buf = iso8601_separator ? 'T' : ' ';
buf++;
}
if (have_t)
@@ -1772,14 +1902,39 @@ char *rtc_str(char *buf, char *end, const struct rtc_time *tm,
}
static noinline_for_stack
+char *time64_str(char *buf, char *end, const time64_t time,
+ struct printf_spec spec, const char *fmt)
+{
+ struct rtc_time rtc_time;
+ struct tm tm;
+
+ time64_to_tm(time, 0, &tm);
+
+ rtc_time.tm_sec = tm.tm_sec;
+ rtc_time.tm_min = tm.tm_min;
+ rtc_time.tm_hour = tm.tm_hour;
+ rtc_time.tm_mday = tm.tm_mday;
+ rtc_time.tm_mon = tm.tm_mon;
+ rtc_time.tm_year = tm.tm_year;
+ rtc_time.tm_wday = tm.tm_wday;
+ rtc_time.tm_yday = tm.tm_yday;
+
+ rtc_time.tm_isdst = 0;
+
+ return rtc_str(buf, end, &rtc_time, spec, fmt);
+}
+
+static noinline_for_stack
char *time_and_date(char *buf, char *end, void *ptr, struct printf_spec spec,
const char *fmt)
{
switch (fmt[1]) {
case 'R':
return rtc_str(buf, end, (const struct rtc_time *)ptr, spec, fmt);
+ case 'T':
+ return time64_str(buf, end, *(const time64_t *)ptr, spec, fmt);
default:
- return error_string(buf, end, "(%ptR?)", spec);
+ return error_string(buf, end, "(%pt?)", spec);
}
}
@@ -1799,7 +1954,7 @@ char *clock(char *buf, char *end, struct clk *clk, struct printf_spec spec,
#ifdef CONFIG_COMMON_CLK
return string(buf, end, __clk_get_name(clk), spec);
#else
- return error_string(buf, end, "(%pC?)", spec);
+ return ptr_to_id(buf, end, clk, spec);
#endif
}
}
@@ -1831,6 +1986,93 @@ char *format_flags(char *buf, char *end, unsigned long flags,
return buf;
}
+struct page_flags_fields {
+ int width;
+ int shift;
+ int mask;
+ const struct printf_spec *spec;
+ const char *name;
+};
+
+static const struct page_flags_fields pff[] = {
+ {SECTIONS_WIDTH, SECTIONS_PGSHIFT, SECTIONS_MASK,
+ &default_dec_spec, "section"},
+ {NODES_WIDTH, NODES_PGSHIFT, NODES_MASK,
+ &default_dec_spec, "node"},
+ {ZONES_WIDTH, ZONES_PGSHIFT, ZONES_MASK,
+ &default_dec_spec, "zone"},
+ {LAST_CPUPID_WIDTH, LAST_CPUPID_PGSHIFT, LAST_CPUPID_MASK,
+ &default_flag_spec, "lastcpupid"},
+ {KASAN_TAG_WIDTH, KASAN_TAG_PGSHIFT, KASAN_TAG_MASK,
+ &default_flag_spec, "kasantag"},
+};
+
+static
+char *format_page_flags(char *buf, char *end, unsigned long flags)
+{
+ unsigned long main_flags = flags & PAGEFLAGS_MASK;
+ bool append = false;
+ int i;
+
+ buf = number(buf, end, flags, default_flag_spec);
+ if (buf < end)
+ *buf = '(';
+ buf++;
+
+ /* Page flags from the main area. */
+ if (main_flags) {
+ buf = format_flags(buf, end, main_flags, pageflag_names);
+ append = true;
+ }
+
+ /* Page flags from the fields area */
+ for (i = 0; i < ARRAY_SIZE(pff); i++) {
+ /* Skip undefined fields. */
+ if (!pff[i].width)
+ continue;
+
+ /* Format: Flag Name + '=' (equals sign) + Number + '|' (separator) */
+ if (append) {
+ if (buf < end)
+ *buf = '|';
+ buf++;
+ }
+
+ buf = string(buf, end, pff[i].name, default_str_spec);
+ if (buf < end)
+ *buf = '=';
+ buf++;
+ buf = number(buf, end, (flags >> pff[i].shift) & pff[i].mask,
+ *pff[i].spec);
+
+ append = true;
+ }
+ if (buf < end)
+ *buf = ')';
+ buf++;
+
+ return buf;
+}
+
+static
+char *format_page_type(char *buf, char *end, unsigned int page_type)
+{
+ buf = number(buf, end, page_type, default_flag_spec);
+
+ if (buf < end)
+ *buf = '(';
+ buf++;
+
+ if (page_type_has_type(page_type))
+ buf = format_flags(buf, end, ~page_type, pagetype_names);
+
+ if (buf < end)
+ *buf = ')';
+ buf++;
+
+ return buf;
+}
+
static noinline_for_stack
char *flags_string(char *buf, char *end, void *flags_ptr,
struct printf_spec spec, const char *fmt)
@@ -1843,17 +2085,15 @@ char *flags_string(char *buf, char *end, void *flags_ptr,
switch (fmt[1]) {
case 'p':
- flags = *(unsigned long *)flags_ptr;
- /* Remove zone id */
- flags &= (1UL << NR_PAGEFLAGS) - 1;
- names = pageflag_names;
- break;
+ return format_page_flags(buf, end, *(unsigned long *)flags_ptr);
+ case 't':
+ return format_page_type(buf, end, *(unsigned int *)flags_ptr);
case 'v':
flags = *(unsigned long *)flags_ptr;
names = vmaflag_names;
break;
case 'g':
- flags = *(gfp_t *)flags_ptr;
+ flags = (__force unsigned long)(*(gfp_t *)flags_ptr);
names = gfpflag_names;
break;
default:
@@ -1863,32 +2103,30 @@ char *flags_string(char *buf, char *end, void *flags_ptr,
return format_flags(buf, end, flags, names);
}
-static const char *device_node_name_for_depth(const struct device_node *np, int depth)
-{
- for ( ; np && depth; depth--)
- np = np->parent;
-
- return kbasename(np->full_name);
-}
-
static noinline_for_stack
-char *device_node_gen_full_name(const struct device_node *np, char *buf, char *end)
+char *fwnode_full_name_string(struct fwnode_handle *fwnode, char *buf,
+ char *end)
{
int depth;
- const struct device_node *parent = np->parent;
- /* special case for root node */
- if (!parent)
- return string_nocheck(buf, end, "/", default_str_spec);
-
- for (depth = 0; parent->parent; depth++)
- parent = parent->parent;
+ /* Loop starting from the root node to the current node. */
+ for (depth = fwnode_count_parents(fwnode); depth >= 0; depth--) {
+ /*
+ * Only get a reference for other nodes (i.e. parent nodes).
+ * fwnode refcount may be 0 here.
+ */
+ struct fwnode_handle *__fwnode = depth ?
+ fwnode_get_nth_parent(fwnode, depth) : fwnode;
- for ( ; depth >= 0; depth--) {
- buf = string_nocheck(buf, end, "/", default_str_spec);
- buf = string(buf, end, device_node_name_for_depth(np, depth),
+ buf = string(buf, end, fwnode_get_name_prefix(__fwnode),
+ default_str_spec);
+ buf = string(buf, end, fwnode_get_name(__fwnode),
default_str_spec);
+
+ if (depth)
+ fwnode_handle_put(__fwnode);
}
+
return buf;
}
@@ -1902,16 +2140,13 @@ char *device_node_string(char *buf, char *end, struct device_node *dn,
char *buf_start = buf;
struct property *prop;
bool has_mult, pass;
- static const struct printf_spec num_spec = {
- .flags = SMALL,
- .field_width = -1,
- .precision = -1,
- .base = 10,
- };
struct printf_spec str_spec = spec;
str_spec.field_width = -1;
+ if (fmt[0] != 'F')
+ return error_string(buf, end, "(%pO?)", spec);
+
if (!IS_ENABLED(CONFIG_OF))
return error_string(buf, end, "(%pOF?)", spec);
@@ -1933,20 +2168,21 @@ char *device_node_string(char *buf, char *end, struct device_node *dn,
switch (*fmt) {
case 'f': /* full_name */
- buf = device_node_gen_full_name(dn, buf, end);
+ buf = fwnode_full_name_string(of_fwnode_handle(dn), buf,
+ end);
break;
case 'n': /* name */
- p = kbasename(of_node_full_name(dn));
+ p = fwnode_get_name(of_fwnode_handle(dn));
precision = str_spec.precision;
str_spec.precision = strchrnul(p, '@') - p;
buf = string(buf, end, p, str_spec);
str_spec.precision = precision;
break;
case 'p': /* phandle */
- buf = number(buf, end, (unsigned int)dn->phandle, num_spec);
+ buf = number(buf, end, (unsigned int)dn->phandle, default_dec_spec);
break;
case 'P': /* path-spec */
- p = kbasename(of_node_full_name(dn));
+ p = fwnode_get_name(of_fwnode_handle(dn));
if (!p[1])
p = "/";
buf = string(buf, end, p, str_spec);
@@ -1984,17 +2220,64 @@ char *device_node_string(char *buf, char *end, struct device_node *dn,
return widen_string(buf, buf - buf_start, end, spec);
}
-static char *kobject_string(char *buf, char *end, void *ptr,
- struct printf_spec spec, const char *fmt)
+static noinline_for_stack
+char *fwnode_string(char *buf, char *end, struct fwnode_handle *fwnode,
+ struct printf_spec spec, const char *fmt)
{
- switch (fmt[1]) {
- case 'F':
- return device_node_string(buf, end, ptr, spec, fmt + 1);
+ struct printf_spec str_spec = spec;
+ char *buf_start = buf;
+
+ str_spec.field_width = -1;
+
+ if (*fmt != 'w')
+ return error_string(buf, end, "(%pf?)", spec);
+
+ if (check_pointer(&buf, end, fwnode, spec))
+ return buf;
+
+ fmt++;
+
+ switch (*fmt) {
+ case 'P': /* name */
+ buf = string(buf, end, fwnode_get_name(fwnode), str_spec);
+ break;
+ case 'f': /* full_name */
+ default:
+ buf = fwnode_full_name_string(fwnode, buf, end);
+ break;
}
- return error_string(buf, end, "(%pO?)", spec);
+ return widen_string(buf, buf - buf_start, end, spec);
}
+int __init no_hash_pointers_enable(char *str)
+{
+ if (no_hash_pointers)
+ return 0;
+
+ no_hash_pointers = true;
+
+ pr_warn("**********************************************************\n");
+ pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
+ pr_warn("** **\n");
+ pr_warn("** This system shows unhashed kernel memory addresses **\n");
+ pr_warn("** via the console, logs, and other interfaces. This **\n");
+ pr_warn("** might reduce the security of your system. **\n");
+ pr_warn("** **\n");
+ pr_warn("** If you see this message and you are not debugging **\n");
+ pr_warn("** the kernel, report this immediately to your system **\n");
+ pr_warn("** administrator! **\n");
+ pr_warn("** **\n");
+ pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
+ pr_warn("**********************************************************\n");
+
+ return 0;
+}
+early_param("no_hash_pointers", no_hash_pointers_enable);
+
+/* Used for Rust formatting ('%pA'). */
+char *rust_fmt_argument(char *buf, char *end, void *ptr);
+
/*
* Show a '%p' thing. A kernel extension is that the '%p' is followed
* by an extra set of alphanumeric characters that are extended format
@@ -2007,10 +2290,12 @@ static char *kobject_string(char *buf, char *end, void *ptr,
*
* - 'S' For symbolic direct pointers (or function descriptors) with offset
* - 's' For symbolic direct pointers (or function descriptors) without offset
- * - 'F' Same as 'S'
- * - 'f' Same as 's'
- * - '[FfSs]R' as above with __builtin_extract_return_addr() translation
+ * - '[Ss]R' as above with __builtin_extract_return_addr() translation
+ * - 'S[R]b' as above with module build ID (for use in backtraces)
+ * - '[Ff]' %pf and %pF were obsoleted and later removed in favor of
+ * %ps and %pS. Be careful when re-using these specifiers.
* - 'B' For backtraced symbolic direct pointers with offset
+ * - 'Bb' as above with module build ID (for use in backtraces)
* - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
* - 'r' For raw struct resource, e.g., [mem 0x0-0x1f flags 0x201]
* - 'b[l]' For a bitmap, the number of bits is determined by the field
@@ -2037,7 +2322,7 @@ static char *kobject_string(char *buf, char *end, void *ptr,
* [4] or [6] and is able to print port [p], flowinfo [f], scope [s]
* - '[Ii][4S][hnbl]' IPv4 addresses in host, network, big or little endian order
* - 'I[6S]c' for IPv6 addresses printed as specified by
- * http://tools.ietf.org/html/rfc5952
+ * https://tools.ietf.org/html/rfc5952
* - 'E[achnops]' For an escaped buffer, where rules are defined by combination
* of the following flags (see string_escape_mem() for the
* details):
@@ -2065,8 +2350,11 @@ static char *kobject_string(char *buf, char *end, void *ptr,
* Implements a "recursive vsnprintf".
* Do not use this feature without some mechanism to verify the
* correctness of the format string and va_list arguments.
- * - 'K' For a kernel pointer that should be hidden from unprivileged users
+ * - 'K' For a kernel pointer that should be hidden from unprivileged users.
+ * Use only for procfs, sysfs and similar files, not printk(); please
+ * read the documentation (path below) first.
* - 'NF' For a netdev_features_t
+ * - '4cc' V4L2 or DRM FourCC code, with endianness and raw numerical value.
* - 'h[CDN]' For a variable-length buffer, it prints it as a hex string with
* a certain separator (' ' by default):
* C colon
@@ -2079,8 +2367,9 @@ static char *kobject_string(char *buf, char *end, void *ptr,
* - 'd[234]' For a dentry name (optionally 2-4 last components)
* - 'D[234]' Same as 'd' but for a struct file
* - 'g' For block_device name (gendisk + partition number)
- * - 't[R][dt][r]' For time and date as represented:
+ * - 't[RT][dt][r][s]' For time and date as represented by:
* R struct rtc_time
+ * T time64_t
* - 'C' For a clock, it prints the name (Common Clock Framework) or address
* (legacy clock framework) of the clock
* - 'Cn' For a clock, it prints the name (Common Clock Framework) or address
@@ -2099,25 +2388,36 @@ static char *kobject_string(char *buf, char *end, void *ptr,
* F device node flags
* c major compatible string
* C full compatible string
- * - 'x' For printing the address. Equivalent to "%lx".
+ * - 'fw[fP]' For a firmware node (struct fwnode_handle) pointer
+ * Without an option prints the full name of the node
+ * f full name
+ * P node name, including a possible unit address
+ * - 'x' For printing the address unmodified. Equivalent to "%lx".
+ * Please read the documentation (path below) before using!
+ * - '[ku]s' For a BPF/tracing related format specifier, e.g. used out of
+ * bpf_trace_printk() where [ku] prefix specifies either kernel (k)
+ * or user (u) memory to probe, and:
+ * s a string, equivalent to "%s" on direct vsnprintf() use
*
* ** When making changes please also update:
* Documentation/core-api/printk-formats.rst
*
* Note: The default behaviour (unadorned %p) is to hash the address,
* rendering it useful as a unique identifier.
+ *
+ * There is also a '%pA' format specifier, but it is only intended to be used
+ * from Rust code to format core::fmt::Arguments. Do *not* use it from C.
+ * See rust/kernel/print.rs for details.
*/
static noinline_for_stack
char *pointer(const char *fmt, char *buf, char *end, void *ptr,
struct printf_spec spec)
{
switch (*fmt) {
- case 'F':
- case 'f':
case 'S':
case 's':
ptr = dereference_symbol_descriptor(ptr);
- /* Fallthrough */
+ fallthrough;
case 'B':
return symbol_string(buf, end, ptr, spec, fmt);
case 'R':
@@ -2157,6 +2457,8 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
return restricted_pointer(buf, end, ptr, spec);
case 'N':
return netdev_bits(buf, end, ptr, spec, fmt);
+ case '4':
+ return fourcc_string(buf, end, ptr, spec, fmt);
case 'a':
return address_val(buf, end, ptr, spec, fmt);
case 'd':
@@ -2166,9 +2468,7 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
case 'C':
return clock(buf, end, ptr, spec, fmt);
case 'D':
- return dentry_name(buf, end,
- ((const struct file *)ptr)->f_path.dentry,
- spec, fmt);
+ return file_dentry_name(buf, end, ptr, spec, fmt);
#ifdef CONFIG_BLOCK
case 'g':
return bdev_name(buf, end, ptr, spec, fmt);
@@ -2177,13 +2477,33 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
case 'G':
return flags_string(buf, end, ptr, spec, fmt);
case 'O':
- return kobject_string(buf, end, ptr, spec, fmt);
+ return device_node_string(buf, end, ptr, spec, fmt + 1);
+ case 'f':
+ return fwnode_string(buf, end, ptr, spec, fmt + 1);
+ case 'A':
+ if (!IS_ENABLED(CONFIG_RUST)) {
+ WARN_ONCE(1, "Please remove %%pA from non-Rust code\n");
+ return error_string(buf, end, "(%pA?)", spec);
+ }
+ return rust_fmt_argument(buf, end, ptr);
case 'x':
return pointer_string(buf, end, ptr, spec);
+ case 'e':
+ /* %pe with a non-ERR_PTR gets treated as plain %p */
+ if (!IS_ERR(ptr))
+ return default_pointer(buf, end, ptr, spec);
+ return err_ptr(buf, end, ptr, spec);
+ case 'u':
+ case 'k':
+ switch (fmt[1]) {
+ case 's':
+ return string(buf, end, ptr, spec);
+ default:
+ return error_string(buf, end, "(einval)", spec);
+ }
+ default:
+ return default_pointer(buf, end, ptr, spec);
}
-
- /* default is to _not_ leak addresses, hash before printing */
- return ptr_to_id(buf, end, ptr, spec);
}
/*
@@ -2335,7 +2655,7 @@ qualifier:
case 'x':
spec->flags |= SMALL;
- /* fall through */
+ fallthrough;
case 'X':
spec->base = 16;
@@ -2344,6 +2664,7 @@ qualifier:
case 'd':
case 'i':
spec->flags |= SIGN;
+ break;
case 'u':
break;
@@ -2353,7 +2674,7 @@ qualifier:
* utility, treat it as any other invalid or
* unsupported format specifier.
*/
- /* Fall-through */
+ fallthrough;
default:
WARN_ONCE(1, "Please remove unsupported %%%c in format string\n", *fmt);
@@ -2603,13 +2924,15 @@ int vscnprintf(char *buf, size_t size, const char *fmt, va_list args)
{
int i;
+ if (unlikely(!size))
+ return 0;
+
i = vsnprintf(buf, size, fmt, args);
if (likely(i < size))
return i;
- if (size != 0)
- return size - 1;
- return 0;
+
+ return size - 1;
}
EXPORT_SYMBOL(vscnprintf);
@@ -2812,10 +3135,9 @@ int vbin_printf(u32 *bin_buf, size_t size, const char *fmt, va_list args)
/* Dereference of functions is still OK */
case 'S':
case 's':
- case 'F':
- case 'f':
case 'x':
case 'K':
+ case 'e':
save_arg(void *);
break;
default:
@@ -2988,10 +3310,9 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
switch (*fmt) {
case 'S':
case 's':
- case 'F':
- case 'f':
case 'x':
case 'K':
+ case 'e':
process = true;
break;
default:
@@ -3131,7 +3452,7 @@ int vsscanf(const char *buf, const char *fmt, va_list args)
while (*fmt) {
/* skip any white space in format */
- /* white space in format matchs any amount of
+ /* white space in format matches any amount of
* white space, including none, in the input.
*/
if (isspace(*fmt)) {
@@ -3264,7 +3585,7 @@ int vsscanf(const char *buf, const char *fmt, va_list args)
++fmt;
for ( ; *fmt && *fmt != ']'; ++fmt, ++len)
- set_bit((u8)*fmt, set);
+ __set_bit((u8)*fmt, set);
/* no ']' or no character set found */
if (!*fmt || !len)
@@ -3274,7 +3595,7 @@ int vsscanf(const char *buf, const char *fmt, va_list args)
if (negate) {
bitmap_complement(set, set, 256);
/* exclude null '\0' byte */
- clear_bit(0, set);
+ __clear_bit(0, set);
}
/* match must be non-empty */
@@ -3296,10 +3617,10 @@ int vsscanf(const char *buf, const char *fmt, va_list args)
break;
case 'i':
base = 0;
- /* fall through */
+ fallthrough;
case 'd':
is_sign = true;
- /* fall through */
+ fallthrough;
case 'u':
break;
case '%':
@@ -3318,36 +3639,26 @@ int vsscanf(const char *buf, const char *fmt, va_list args)
str = skip_spaces(str);
digit = *str;
- if (is_sign && digit == '-')
+ if (is_sign && digit == '-') {
+ if (field_width == 1)
+ break;
+
digit = *(str + 1);
+ }
if (!digit
|| (base == 16 && !isxdigit(digit))
|| (base == 10 && !isdigit(digit))
- || (base == 8 && (!isdigit(digit) || digit > '7'))
+ || (base == 8 && !isodigit(digit))
|| (base == 0 && !isdigit(digit)))
break;
if (is_sign)
- val.s = qualifier != 'L' ?
- simple_strtol(str, &next, base) :
- simple_strtoll(str, &next, base);
+ val.s = simple_strntoll(str, &next, base,
+ field_width >= 0 ? field_width : INT_MAX);
else
- val.u = qualifier != 'L' ?
- simple_strtoul(str, &next, base) :
- simple_strtoull(str, &next, base);
-
- if (field_width > 0 && next - str > field_width) {
- if (base == 0)
- _parse_integer_fixup_radix(str, &base);
- while (next - str > field_width) {
- if (is_sign)
- val.s = div_s64(val.s, base);
- else
- val.u = div_u64(val.u, base);
- --next;
- }
- }
+ val.u = simple_strntoull(str, &next, base,
+ field_width >= 0 ? field_width : INT_MAX);
switch (qualifier) {
case 'H': /* that's 'hh' in format */
diff --git a/lib/win_minmax.c b/lib/win_minmax.c
index 6bdc1cd15f76..ec10506834b6 100644
--- a/lib/win_minmax.c
+++ b/lib/win_minmax.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/**
+/*
* lib/minmax.c: windowed min/max tracker
*
* Kathleen Nichols' algorithm for tracking the minimum (or maximum)
diff --git a/lib/xarray.c b/lib/xarray.c
index 446b956c9188..39f07bfc4dcc 100644
--- a/lib/xarray.c
+++ b/lib/xarray.c
@@ -1,7 +1,8 @@
// SPDX-License-Identifier: GPL-2.0+
/*
* XArray implementation
- * Copyright (c) 2017 Microsoft Corporation
+ * Copyright (c) 2017-2018 Microsoft Corporation
+ * Copyright (c) 2018-2020 Oracle
* Author: Matthew Wilcox <willy@infradead.org>
*/
@@ -11,6 +12,8 @@
#include <linux/slab.h>
#include <linux/xarray.h>
+#include "radix-tree.h"
+
/*
* Coding conventions in this file:
*
@@ -156,7 +159,7 @@ static void xas_move_index(struct xa_state *xas, unsigned long offset)
xas->xa_index += offset << shift;
}
-static void xas_advance(struct xa_state *xas)
+static void xas_next_offset(struct xa_state *xas)
{
xas->xa_offset++;
xas_move_index(xas, xas->xa_offset);
@@ -203,9 +206,11 @@ static void *xas_descend(struct xa_state *xas, struct xa_node *node)
void *entry = xa_entry(xas->xa, node, offset);
xas->xa_node = node;
- if (xa_is_sibling(entry)) {
+ while (xa_is_sibling(entry)) {
offset = xa_to_sibling(entry);
entry = xa_entry(xas->xa, node, offset);
+ if (node->shift && xa_is_node(entry))
+ entry = XA_RETRY_ENTRY;
}
xas->xa_offset = offset;
@@ -244,10 +249,6 @@ void *xas_load(struct xa_state *xas)
}
EXPORT_SYMBOL_GPL(xas_load);
-/* Move the radix tree node cache here */
-extern struct kmem_cache *radix_tree_node_cachep;
-extern void radix_tree_node_rcu_free(struct rcu_head *head);
-
#define XA_RCU_FREE ((struct xarray *)1)
static void xa_node_free(struct xa_node *node)
@@ -261,17 +262,19 @@ static void xa_node_free(struct xa_node *node)
* xas_destroy() - Free any resources allocated during the XArray operation.
* @xas: XArray operation state.
*
- * This function is now internal-only.
+ * Most users will not need to call this function; it is called for you
+ * by xas_nomem().
*/
-static void xas_destroy(struct xa_state *xas)
+void xas_destroy(struct xa_state *xas)
{
- struct xa_node *node = xas->xa_alloc;
+ struct xa_node *next, *node = xas->xa_alloc;
- if (!node)
- return;
- XA_NODE_BUG_ON(node, !list_empty(&node->private_list));
- kmem_cache_free(radix_tree_node_cachep, node);
- xas->xa_alloc = NULL;
+ while (node) {
+ XA_NODE_BUG_ON(node, !list_empty(&node->private_list));
+ next = rcu_dereference_raw(node->parent);
+ radix_tree_node_rcu_free(&node->rcu_head);
+ xas->xa_alloc = node = next;
+ }
}
/**
@@ -300,9 +303,10 @@ bool xas_nomem(struct xa_state *xas, gfp_t gfp)
}
if (xas->xa->xa_flags & XA_FLAGS_ACCOUNT)
gfp |= __GFP_ACCOUNT;
- xas->xa_alloc = kmem_cache_alloc(radix_tree_node_cachep, gfp);
+ xas->xa_alloc = kmem_cache_alloc_lru(radix_tree_node_cachep, xas->xa_lru, gfp);
if (!xas->xa_alloc)
return false;
+ xas->xa_alloc->parent = NULL;
XA_NODE_BUG_ON(xas->xa_alloc, !list_empty(&xas->xa_alloc->private_list));
xas->xa_node = XAS_RESTART;
return true;
@@ -331,13 +335,14 @@ static bool __xas_nomem(struct xa_state *xas, gfp_t gfp)
gfp |= __GFP_ACCOUNT;
if (gfpflags_allow_blocking(gfp)) {
xas_unlock_type(xas, lock_type);
- xas->xa_alloc = kmem_cache_alloc(radix_tree_node_cachep, gfp);
+ xas->xa_alloc = kmem_cache_alloc_lru(radix_tree_node_cachep, xas->xa_lru, gfp);
xas_lock_type(xas, lock_type);
} else {
- xas->xa_alloc = kmem_cache_alloc(radix_tree_node_cachep, gfp);
+ xas->xa_alloc = kmem_cache_alloc_lru(radix_tree_node_cachep, xas->xa_lru, gfp);
}
if (!xas->xa_alloc)
return false;
+ xas->xa_alloc->parent = NULL;
XA_NODE_BUG_ON(xas->xa_alloc, !list_empty(&xas->xa_alloc->private_list));
xas->xa_node = XAS_RESTART;
return true;
@@ -367,7 +372,7 @@ static void *xas_alloc(struct xa_state *xas, unsigned int shift)
if (xas->xa->xa_flags & XA_FLAGS_ACCOUNT)
gfp |= __GFP_ACCOUNT;
- node = kmem_cache_alloc(radix_tree_node_cachep, gfp);
+ node = kmem_cache_alloc_lru(radix_tree_node_cachep, xas->xa_lru, gfp);
if (!node) {
xas_set_err(xas, -ENOMEM);
return NULL;
@@ -402,7 +407,7 @@ static unsigned long xas_size(const struct xa_state *xas)
/*
* Use this to calculate the maximum index that will need to be created
* in order to add the entry described by @xas. Because we cannot store a
- * multiple-index entry at index 0, the calculation is a little more complex
+ * multi-index entry at index 0, the calculation is a little more complex
* than you might expect.
*/
static unsigned long xas_max(struct xa_state *xas)
@@ -702,7 +707,7 @@ void xas_create_range(struct xa_state *xas)
unsigned char shift = xas->xa_shift;
unsigned char sibs = xas->xa_sibs;
- xas->xa_index |= ((sibs + 1) << shift) - 1;
+ xas->xa_index |= ((sibs + 1UL) << shift) - 1;
if (xas_is_node(xas) && xas->xa_node->shift == xas->xa_shift)
xas->xa_offset |= sibs;
xas->xa_shift = 0;
@@ -718,6 +723,8 @@ void xas_create_range(struct xa_state *xas)
for (;;) {
struct xa_node *node = xas->xa_node;
+ if (node->shift >= shift)
+ break;
xas->xa_node = xa_parent_locked(xas->xa, node);
xas->xa_offset = node->offset - 1;
if (node->offset != 0)
@@ -945,6 +952,156 @@ void xas_init_marks(const struct xa_state *xas)
}
EXPORT_SYMBOL_GPL(xas_init_marks);
+#ifdef CONFIG_XARRAY_MULTI
+static unsigned int node_get_marks(struct xa_node *node, unsigned int offset)
+{
+ unsigned int marks = 0;
+ xa_mark_t mark = XA_MARK_0;
+
+ for (;;) {
+ if (node_get_mark(node, offset, mark))
+ marks |= 1 << (__force unsigned int)mark;
+ if (mark == XA_MARK_MAX)
+ break;
+ mark_inc(mark);
+ }
+
+ return marks;
+}
+
+static void node_set_marks(struct xa_node *node, unsigned int offset,
+ struct xa_node *child, unsigned int marks)
+{
+ xa_mark_t mark = XA_MARK_0;
+
+ for (;;) {
+ if (marks & (1 << (__force unsigned int)mark)) {
+ node_set_mark(node, offset, mark);
+ if (child)
+ node_mark_all(child, mark);
+ }
+ if (mark == XA_MARK_MAX)
+ break;
+ mark_inc(mark);
+ }
+}
+
+/**
+ * xas_split_alloc() - Allocate memory for splitting an entry.
+ * @xas: XArray operation state.
+ * @entry: New entry which will be stored in the array.
+ * @order: Current entry order.
+ * @gfp: Memory allocation flags.
+ *
+ * This function should be called before calling xas_split().
+ * If necessary, it will allocate new nodes (and fill them with @entry)
+ * to prepare for the upcoming split of an entry of @order size into
+ * entries of the order stored in the @xas.
+ *
+ * Context: May sleep if @gfp flags permit.
+ */
+void xas_split_alloc(struct xa_state *xas, void *entry, unsigned int order,
+ gfp_t gfp)
+{
+ unsigned int sibs = (1 << (order % XA_CHUNK_SHIFT)) - 1;
+ unsigned int mask = xas->xa_sibs;
+
+ /* XXX: no support for splitting really large entries yet */
+ if (WARN_ON(xas->xa_shift + 2 * XA_CHUNK_SHIFT < order))
+ goto nomem;
+ if (xas->xa_shift + XA_CHUNK_SHIFT > order)
+ return;
+
+ do {
+ unsigned int i;
+ void *sibling = NULL;
+ struct xa_node *node;
+
+ node = kmem_cache_alloc_lru(radix_tree_node_cachep, xas->xa_lru, gfp);
+ if (!node)
+ goto nomem;
+ node->array = xas->xa;
+ for (i = 0; i < XA_CHUNK_SIZE; i++) {
+ if ((i & mask) == 0) {
+ RCU_INIT_POINTER(node->slots[i], entry);
+ sibling = xa_mk_sibling(i);
+ } else {
+ RCU_INIT_POINTER(node->slots[i], sibling);
+ }
+ }
+ RCU_INIT_POINTER(node->parent, xas->xa_alloc);
+ xas->xa_alloc = node;
+ } while (sibs-- > 0);
+
+ return;
+nomem:
+ xas_destroy(xas);
+ xas_set_err(xas, -ENOMEM);
+}
+EXPORT_SYMBOL_GPL(xas_split_alloc);
+
+/**
+ * xas_split() - Split a multi-index entry into smaller entries.
+ * @xas: XArray operation state.
+ * @entry: New entry to store in the array.
+ * @order: Current entry order.
+ *
+ * The size of the new entries is set in @xas. The value in @entry is
+ * copied to all the replacement entries.
+ *
+ * Context: Any context. The caller should hold the xa_lock.
+ */
+void xas_split(struct xa_state *xas, void *entry, unsigned int order)
+{
+ unsigned int sibs = (1 << (order % XA_CHUNK_SHIFT)) - 1;
+ unsigned int offset, marks;
+ struct xa_node *node;
+ void *curr = xas_load(xas);
+ int values = 0;
+
+ node = xas->xa_node;
+ if (xas_top(node))
+ return;
+
+ marks = node_get_marks(node, xas->xa_offset);
+
+ offset = xas->xa_offset + sibs;
+ do {
+ if (xas->xa_shift < node->shift) {
+ struct xa_node *child = xas->xa_alloc;
+
+ xas->xa_alloc = rcu_dereference_raw(child->parent);
+ child->shift = node->shift - XA_CHUNK_SHIFT;
+ child->offset = offset;
+ child->count = XA_CHUNK_SIZE;
+ child->nr_values = xa_is_value(entry) ?
+ XA_CHUNK_SIZE : 0;
+ RCU_INIT_POINTER(child->parent, node);
+ node_set_marks(node, offset, child, marks);
+ rcu_assign_pointer(node->slots[offset],
+ xa_mk_node(child));
+ if (xa_is_value(curr))
+ values--;
+ xas_update(xas, child);
+ } else {
+ unsigned int canon = offset - xas->xa_sibs;
+
+ node_set_marks(node, canon, NULL, marks);
+ rcu_assign_pointer(node->slots[canon], entry);
+ while (offset > canon)
+ rcu_assign_pointer(node->slots[offset--],
+ xa_mk_sibling(canon));
+ values += (xa_is_value(entry) - xa_is_value(curr)) *
+ (xas->xa_sibs + 1);
+ }
+ } while (offset-- > xas->xa_offset);
+
+ node->nr_values += values;
+ xas_update(xas, node);
+}
+EXPORT_SYMBOL_GPL(xas_split);
+#endif
+
/**
* xas_pause() - Pause a walk to drop a lock.
* @xas: XArray operation state.
@@ -967,17 +1124,19 @@ void xas_pause(struct xa_state *xas)
if (xas_invalid(xas))
return;
+ xas->xa_node = XAS_RESTART;
if (node) {
- unsigned int offset = xas->xa_offset;
+ unsigned long offset = xas->xa_offset;
while (++offset < XA_CHUNK_SIZE) {
if (!xa_is_sibling(xa_entry(xas->xa, node, offset)))
break;
}
xas->xa_index += (offset - xas->xa_offset) << node->shift;
+ if (xas->xa_index == 0)
+ xas->xa_node = XAS_BOUNDS;
} else {
xas->xa_index++;
}
- xas->xa_node = XAS_RESTART;
}
EXPORT_SYMBOL_GPL(xas_pause);
@@ -994,6 +1153,8 @@ void *__xas_prev(struct xa_state *xas)
if (!xas_frozen(xas->xa_node))
xas->xa_index--;
+ if (!xas->xa_node)
+ return set_bounds(xas);
if (xas_not_node(xas->xa_node))
return xas_load(xas);
@@ -1031,6 +1192,8 @@ void *__xas_next(struct xa_state *xas)
if (!xas_frozen(xas->xa_node))
xas->xa_index++;
+ if (!xas->xa_node)
+ return set_bounds(xas);
if (xas_not_node(xas->xa_node))
return xas_load(xas);
@@ -1075,13 +1238,15 @@ void *xas_find(struct xa_state *xas, unsigned long max)
{
void *entry;
- if (xas_error(xas))
+ if (xas_error(xas) || xas->xa_node == XAS_BOUNDS)
return NULL;
+ if (xas->xa_index > max)
+ return set_bounds(xas);
if (!xas->xa_node) {
xas->xa_index = 1;
return set_bounds(xas);
- } else if (xas_top(xas->xa_node)) {
+ } else if (xas->xa_node == XAS_RESTART) {
entry = xas_load(xas);
if (entry || xas_not_node(xas->xa_node))
return entry;
@@ -1090,7 +1255,7 @@ void *xas_find(struct xa_state *xas, unsigned long max)
xas->xa_offset = ((xas->xa_index - 1) & XA_CHUNK_MASK) + 1;
}
- xas_advance(xas);
+ xas_next_offset(xas);
while (xas->xa_node && (xas->xa_index <= max)) {
if (unlikely(xas->xa_offset == XA_CHUNK_SIZE)) {
@@ -1108,7 +1273,7 @@ void *xas_find(struct xa_state *xas, unsigned long max)
if (entry && !xa_is_sibling(entry))
return entry;
- xas_advance(xas);
+ xas_next_offset(xas);
}
if (!xas->xa_node)
@@ -1146,6 +1311,8 @@ void *xas_find_marked(struct xa_state *xas, unsigned long max, xa_mark_t mark)
if (xas_error(xas))
return NULL;
+ if (xas->xa_index > max)
+ goto max;
if (!xas->xa_node) {
xas->xa_index = 1;
@@ -1197,6 +1364,8 @@ void *xas_find_marked(struct xa_state *xas, unsigned long max, xa_mark_t mark)
}
entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset);
+ if (!entry && !(xa_track_free(xas->xa) && mark == XA_FREE_MARK))
+ continue;
if (!xa_is_node(entry))
return entry;
xas->xa_node = xa_to_node(entry);
@@ -1394,7 +1563,7 @@ EXPORT_SYMBOL(__xa_store);
* @gfp: Memory allocation flags.
*
* After this function returns, loads from this index will return @entry.
- * Storing into an existing multislot entry updates the entry of every index.
+ * Storing into an existing multi-index entry updates the entry of every index.
* The marks associated with @index are unaffected unless @entry is %NULL.
*
* Context: Any context. Takes and releases the xa_lock.
@@ -1536,7 +1705,7 @@ static void xas_set_range(struct xa_state *xas, unsigned long first,
*
* After this function returns, loads from any index between @first and @last,
* inclusive will return @entry.
- * Storing into an existing multislot entry updates the entry of every index.
+ * Storing into an existing multi-index entry updates the entry of every index.
* The marks associated with @index are unaffected unless @entry is %NULL.
*
* Context: Process context. Takes and releases the xa_lock. May sleep
@@ -1579,6 +1748,46 @@ unlock:
return xas_result(&xas, NULL);
}
EXPORT_SYMBOL(xa_store_range);
+
+/**
+ * xa_get_order() - Get the order of an entry.
+ * @xa: XArray.
+ * @index: Index of the entry.
+ *
+ * Return: A number between 0 and 63 indicating the order of the entry.
+ */
+int xa_get_order(struct xarray *xa, unsigned long index)
+{
+ XA_STATE(xas, xa, index);
+ void *entry;
+ int order = 0;
+
+ rcu_read_lock();
+ entry = xas_load(&xas);
+
+ if (!entry)
+ goto unlock;
+
+ if (!xas.xa_node)
+ goto unlock;
+
+ for (;;) {
+ unsigned int slot = xas.xa_offset + (1 << order);
+
+ if (slot >= XA_CHUNK_SIZE)
+ break;
+ if (!xa_is_sibling(xas.xa_node->slots[slot]))
+ break;
+ order++;
+ }
+
+ order += xas.xa_node->shift;
+unlock:
+ rcu_read_unlock();
+
+ return order;
+}
+EXPORT_SYMBOL(xa_get_order);
#endif /* CONFIG_XARRAY_MULTI */
/**
@@ -1593,6 +1802,9 @@ EXPORT_SYMBOL(xa_store_range);
* stores the index into the @id pointer, then stores the entry at
* that index. A concurrent lookup will not see an uninitialised @id.
*
+ * Must only be operated on an xarray initialized with flag XA_FLAGS_ALLOC set
+ * in xa_init_flags().
+ *
* Context: Any context. Expects xa_lock to be held on entry. May
* release and reacquire xa_lock if @gfp flags permit.
* Return: 0 on success, -ENOMEM if memory could not be allocated or
@@ -1641,6 +1853,9 @@ EXPORT_SYMBOL(__xa_alloc);
* The search for an empty entry will start at @next and will wrap
* around if necessary.
*
+ * Must only be operated on an xarray initialized with flag XA_FLAGS_ALLOC set
+ * in xa_init_flags().
+ *
* Context: Any context. Expects xa_lock to be held on entry. May
* release and reacquire xa_lock if @gfp flags permit.
* Return: 0 if the allocation succeeded without wrapping. 1 if the
@@ -1820,6 +2035,18 @@ void *xa_find(struct xarray *xa, unsigned long *indexp,
}
EXPORT_SYMBOL(xa_find);
+static bool xas_sibling(struct xa_state *xas)
+{
+ struct xa_node *node = xas->xa_node;
+ unsigned long mask;
+
+ if (!IS_ENABLED(CONFIG_XARRAY_MULTI) || !node)
+ return false;
+ mask = (XA_CHUNK_SIZE << node->shift) - 1;
+ return (xas->xa_index & mask) >
+ ((unsigned long)xas->xa_offset << node->shift);
+}
+
/**
* xa_find_after() - Search the XArray for a present entry.
* @xa: XArray.
@@ -1843,21 +2070,20 @@ void *xa_find_after(struct xarray *xa, unsigned long *indexp,
XA_STATE(xas, xa, *indexp + 1);
void *entry;
+ if (xas.xa_index == 0)
+ return NULL;
+
rcu_read_lock();
for (;;) {
if ((__force unsigned int)filter < XA_MAX_MARKS)
entry = xas_find_marked(&xas, max, filter);
else
entry = xas_find(&xas, max);
- if (xas.xa_node == XAS_BOUNDS)
+
+ if (xas_invalid(&xas))
break;
- if (xas.xa_shift) {
- if (xas.xa_index & ((1UL << xas.xa_shift) - 1))
- continue;
- } else {
- if (xas.xa_offset < (xas.xa_index & XA_CHUNK_MASK))
- continue;
- }
+ if (xas_sibling(&xas))
+ continue;
if (!xas_retry(&xas, entry))
break;
}
@@ -1950,6 +2176,29 @@ unsigned int xa_extract(struct xarray *xa, void **dst, unsigned long start,
EXPORT_SYMBOL(xa_extract);
/**
+ * xa_delete_node() - Private interface for workingset code.
+ * @node: Node to be removed from the tree.
+ * @update: Function to call to update ancestor nodes.
+ *
+ * Context: xa_lock must be held on entry and will not be released.
+ */
+void xa_delete_node(struct xa_node *node, xa_update_node_t update)
+{
+ struct xa_state xas = {
+ .xa = node->array,
+ .xa_index = (unsigned long)node->offset <<
+ (node->shift + XA_CHUNK_SHIFT),
+ .xa_shift = node->shift + XA_CHUNK_SHIFT,
+ .xa_offset = node->offset,
+ .xa_node = xa_parent_locked(node->array, node),
+ .xa_update = update,
+ };
+
+ xas_store(&xas, NULL);
+}
+EXPORT_SYMBOL_GPL(xa_delete_node); /* For the benefit of the test suite */
+
+/**
* xa_destroy() - Free all internal data structures.
* @xa: XArray.
*
diff --git a/lib/xxhash.c b/lib/xxhash.c
index aa61e2a3802f..d5bb9ff10607 100644
--- a/lib/xxhash.c
+++ b/lib/xxhash.c
@@ -34,7 +34,7 @@
* ("BSD").
*
* You can contact the author at:
- * - xxHash homepage: http://cyan4973.github.io/xxHash/
+ * - xxHash homepage: https://cyan4973.github.io/xxHash/
* - xxHash source repository: https://github.com/Cyan4973/xxHash
*/
diff --git a/lib/xz/Kconfig b/lib/xz/Kconfig
index 22528743d4ce..aef086a6bf2f 100644
--- a/lib/xz/Kconfig
+++ b/lib/xz/Kconfig
@@ -5,7 +5,7 @@ config XZ_DEC
help
LZMA2 compression algorithm and BCJ filters are supported using
the .xz file format as the container. For integrity checking,
- CRC32 is supported. See Documentation/xz.txt for more information.
+ CRC32 is supported. See Documentation/staging/xz.rst for more information.
if XZ_DEC
@@ -19,11 +19,6 @@ config XZ_DEC_POWERPC
default y
select XZ_DEC_BCJ
-config XZ_DEC_IA64
- bool "IA-64 BCJ filter decoder" if EXPERT
- default y
- select XZ_DEC_BCJ
-
config XZ_DEC_ARM
bool "ARM BCJ filter decoder" if EXPERT
default y
@@ -39,6 +34,19 @@ config XZ_DEC_SPARC
default y
select XZ_DEC_BCJ
+config XZ_DEC_MICROLZMA
+ bool "MicroLZMA decoder"
+ default n
+ help
+ MicroLZMA is a header format variant where the first byte
+ of a raw LZMA stream (without the end of stream marker) has
+ been replaced with a bitwise-negation of the lc/lp/pb
+ properties byte. MicroLZMA was created to be used in EROFS
+ but can be used by other things too where wasting minimal
+ amount of space for headers is important.
+
+ Unless you know that you need this, say N.
+
endif
config XZ_DEC_BCJ
diff --git a/lib/xz/xz_crc32.c b/lib/xz/xz_crc32.c
index 912aae5fa09e..88a2c35e1b59 100644
--- a/lib/xz/xz_crc32.c
+++ b/lib/xz/xz_crc32.c
@@ -2,7 +2,7 @@
* CRC32 using the polynomial from IEEE-802.3
*
* Authors: Lasse Collin <lasse.collin@tukaani.org>
- * Igor Pavlov <http://7-zip.org/>
+ * Igor Pavlov <https://7-zip.org/>
*
* This file has been put into the public domain.
* You can do whatever you want with this file.
diff --git a/lib/xz/xz_dec_bcj.c b/lib/xz/xz_dec_bcj.c
index a768e6d28bbb..ef449e97d1a1 100644
--- a/lib/xz/xz_dec_bcj.c
+++ b/lib/xz/xz_dec_bcj.c
@@ -2,7 +2,7 @@
* Branch/Call/Jump (BCJ) filter decoders
*
* Authors: Lasse Collin <lasse.collin@tukaani.org>
- * Igor Pavlov <http://7-zip.org/>
+ * Igor Pavlov <https://7-zip.org/>
*
* This file has been put into the public domain.
* You can do whatever you want with this file.
@@ -422,7 +422,7 @@ XZ_EXTERN enum xz_ret xz_dec_bcj_run(struct xz_dec_bcj *s,
/*
* Flush pending already filtered data to the output buffer. Return
- * immediatelly if we couldn't flush everything, or if the next
+ * immediately if we couldn't flush everything, or if the next
* filter in the chain had already returned XZ_STREAM_END.
*/
if (s->temp.filtered > 0) {
diff --git a/lib/xz/xz_dec_lzma2.c b/lib/xz/xz_dec_lzma2.c
index 08c3c8049998..27ce34520e78 100644
--- a/lib/xz/xz_dec_lzma2.c
+++ b/lib/xz/xz_dec_lzma2.c
@@ -2,7 +2,7 @@
* LZMA2 decoder
*
* Authors: Lasse Collin <lasse.collin@tukaani.org>
- * Igor Pavlov <http://7-zip.org/>
+ * Igor Pavlov <https://7-zip.org/>
*
* This file has been put into the public domain.
* You can do whatever you want with this file.
@@ -147,8 +147,8 @@ struct lzma_dec {
/*
* LZMA properties or related bit masks (number of literal
- * context bits, a mask dervied from the number of literal
- * position bits, and a mask dervied from the number
+ * context bits, a mask derived from the number of literal
+ * position bits, and a mask derived from the number
* position bits)
*/
uint32_t lc;
@@ -248,6 +248,10 @@ struct lzma2_dec {
* before the first LZMA chunk.
*/
bool need_props;
+
+#ifdef XZ_DEC_MICROLZMA
+ bool pedantic_microlzma;
+#endif
};
struct xz_dec_lzma2 {
@@ -387,7 +391,14 @@ static void dict_uncompressed(struct dictionary *dict, struct xz_buf *b,
*left -= copy_size;
- memcpy(dict->buf + dict->pos, b->in + b->in_pos, copy_size);
+ /*
+ * If doing in-place decompression in single-call mode and the
+ * uncompressed size of the file is larger than the caller
+ * thought (i.e. it is invalid input!), the buffers below may
+ * overlap and cause undefined behavior with memcpy().
+ * With valid inputs memcpy() would be fine here.
+ */
+ memmove(dict->buf + dict->pos, b->in + b->in_pos, copy_size);
dict->pos += copy_size;
if (dict->full < dict->pos)
@@ -397,7 +408,11 @@ static void dict_uncompressed(struct dictionary *dict, struct xz_buf *b,
if (dict->pos == dict->end)
dict->pos = 0;
- memcpy(b->out + b->out_pos, b->in + b->in_pos,
+ /*
+ * Like above but for multi-call mode: use memmove()
+ * to avoid undefined behavior with invalid input.
+ */
+ memmove(b->out + b->out_pos, b->in + b->in_pos,
copy_size);
}
@@ -408,6 +423,12 @@ static void dict_uncompressed(struct dictionary *dict, struct xz_buf *b,
}
}
+#ifdef XZ_DEC_MICROLZMA
+# define DICT_FLUSH_SUPPORTS_SKIPPING true
+#else
+# define DICT_FLUSH_SUPPORTS_SKIPPING false
+#endif
+
/*
* Flush pending data from dictionary to b->out. It is assumed that there is
* enough space in b->out. This is guaranteed because caller uses dict_limit()
@@ -421,8 +442,19 @@ static uint32_t dict_flush(struct dictionary *dict, struct xz_buf *b)
if (dict->pos == dict->end)
dict->pos = 0;
- memcpy(b->out + b->out_pos, dict->buf + dict->start,
- copy_size);
+ /*
+ * These buffers cannot overlap even if doing in-place
+ * decompression because in multi-call mode dict->buf
+ * has been allocated by us in this file; it's not
+ * provided by the caller like in single-call mode.
+ *
+ * With MicroLZMA, b->out can be NULL to skip bytes that
+ * the caller doesn't need. This cannot be done with XZ
+ * because it would break BCJ filters.
+ */
+ if (!DICT_FLUSH_SUPPORTS_SKIPPING || b->out != NULL)
+ memcpy(b->out + b->out_pos, dict->buf + dict->start,
+ copy_size);
}
dict->start = dict->pos;
@@ -484,11 +516,11 @@ static __always_inline void rc_normalize(struct rc_dec *rc)
}
/*
- * Decode one bit. In some versions, this function has been splitted in three
+ * Decode one bit. In some versions, this function has been split in three
* functions so that the compiler is supposed to be able to more easily avoid
* an extra branch. In this particular version of the LZMA decoder, this
* doesn't seem to be a good idea (tested with GCC 3.3.6, 3.4.6, and 4.3.3
- * on x86). Using a non-splitted version results in nicer looking code too.
+ * on x86). Using a non-split version results in nicer looking code too.
*
* NOTE: This must return an int. Do not make it return a bool or the speed
* of the code generated by GCC 3.x decreases 10-15 %. (GCC 4.3 doesn't care,
@@ -761,7 +793,7 @@ static bool lzma_main(struct xz_dec_lzma2 *s)
}
/*
- * Reset the LZMA decoder and range decoder state. Dictionary is nore reset
+ * Reset the LZMA decoder and range decoder state. Dictionary is not reset
* here, because LZMA state may be reset without resetting the dictionary.
*/
static void lzma_reset(struct xz_dec_lzma2 *s)
@@ -774,6 +806,7 @@ static void lzma_reset(struct xz_dec_lzma2 *s)
s->lzma.rep1 = 0;
s->lzma.rep2 = 0;
s->lzma.rep3 = 0;
+ s->lzma.len = 0;
/*
* All probabilities are initialized to the same value. This hack
@@ -1043,7 +1076,7 @@ XZ_EXTERN enum xz_ret xz_dec_lzma2_run(struct xz_dec_lzma2 *s,
s->lzma2.sequence = SEQ_LZMA_PREPARE;
- /* Fall through */
+ fallthrough;
case SEQ_LZMA_PREPARE:
if (s->lzma2.compressed < RC_INIT_BYTES)
@@ -1055,7 +1088,7 @@ XZ_EXTERN enum xz_ret xz_dec_lzma2_run(struct xz_dec_lzma2 *s,
s->lzma2.compressed -= RC_INIT_BYTES;
s->lzma2.sequence = SEQ_LZMA_RUN;
- /* Fall through */
+ fallthrough;
case SEQ_LZMA_RUN:
/*
@@ -1146,6 +1179,7 @@ XZ_EXTERN enum xz_ret xz_dec_lzma2_reset(struct xz_dec_lzma2 *s, uint8_t props)
if (DEC_IS_DYNALLOC(s->dict.mode)) {
if (s->dict.allocated < s->dict.size) {
+ s->dict.allocated = s->dict.size;
vfree(s->dict.buf);
s->dict.buf = vmalloc(s->dict.size);
if (s->dict.buf == NULL) {
@@ -1156,8 +1190,6 @@ XZ_EXTERN enum xz_ret xz_dec_lzma2_reset(struct xz_dec_lzma2 *s, uint8_t props)
}
}
- s->lzma.len = 0;
-
s->lzma2.sequence = SEQ_CONTROL;
s->lzma2.need_dict_reset = true;
@@ -1173,3 +1205,140 @@ XZ_EXTERN void xz_dec_lzma2_end(struct xz_dec_lzma2 *s)
kfree(s);
}
+
+#ifdef XZ_DEC_MICROLZMA
+/* This is a wrapper struct to have a nice struct name in the public API. */
+struct xz_dec_microlzma {
+ struct xz_dec_lzma2 s;
+};
+
+enum xz_ret xz_dec_microlzma_run(struct xz_dec_microlzma *s_ptr,
+ struct xz_buf *b)
+{
+ struct xz_dec_lzma2 *s = &s_ptr->s;
+
+ /*
+ * sequence is SEQ_PROPERTIES before the first input byte,
+ * SEQ_LZMA_PREPARE until a total of five bytes have been read,
+ * and SEQ_LZMA_RUN for the rest of the input stream.
+ */
+ if (s->lzma2.sequence != SEQ_LZMA_RUN) {
+ if (s->lzma2.sequence == SEQ_PROPERTIES) {
+ /* One byte is needed for the props. */
+ if (b->in_pos >= b->in_size)
+ return XZ_OK;
+
+ /*
+ * Don't increment b->in_pos here. The same byte is
+ * also passed to rc_read_init() which will ignore it.
+ */
+ if (!lzma_props(s, ~b->in[b->in_pos]))
+ return XZ_DATA_ERROR;
+
+ s->lzma2.sequence = SEQ_LZMA_PREPARE;
+ }
+
+ /*
+ * xz_dec_microlzma_reset() doesn't validate the compressed
+ * size so we do it here. We have to limit the maximum size
+ * to avoid integer overflows in lzma2_lzma(). 3 GiB is a nice
+ * round number and much more than users of this code should
+ * ever need.
+ */
+ if (s->lzma2.compressed < RC_INIT_BYTES
+ || s->lzma2.compressed > (3U << 30))
+ return XZ_DATA_ERROR;
+
+ if (!rc_read_init(&s->rc, b))
+ return XZ_OK;
+
+ s->lzma2.compressed -= RC_INIT_BYTES;
+ s->lzma2.sequence = SEQ_LZMA_RUN;
+
+ dict_reset(&s->dict, b);
+ }
+
+ /* This is to allow increasing b->out_size between calls. */
+ if (DEC_IS_SINGLE(s->dict.mode))
+ s->dict.end = b->out_size - b->out_pos;
+
+ while (true) {
+ dict_limit(&s->dict, min_t(size_t, b->out_size - b->out_pos,
+ s->lzma2.uncompressed));
+
+ if (!lzma2_lzma(s, b))
+ return XZ_DATA_ERROR;
+
+ s->lzma2.uncompressed -= dict_flush(&s->dict, b);
+
+ if (s->lzma2.uncompressed == 0) {
+ if (s->lzma2.pedantic_microlzma) {
+ if (s->lzma2.compressed > 0 || s->lzma.len > 0
+ || !rc_is_finished(&s->rc))
+ return XZ_DATA_ERROR;
+ }
+
+ return XZ_STREAM_END;
+ }
+
+ if (b->out_pos == b->out_size)
+ return XZ_OK;
+
+ if (b->in_pos == b->in_size
+ && s->temp.size < s->lzma2.compressed)
+ return XZ_OK;
+ }
+}
+
+struct xz_dec_microlzma *xz_dec_microlzma_alloc(enum xz_mode mode,
+ uint32_t dict_size)
+{
+ struct xz_dec_microlzma *s;
+
+ /* Restrict dict_size to the same range as in the LZMA2 code. */
+ if (dict_size < 4096 || dict_size > (3U << 30))
+ return NULL;
+
+ s = kmalloc(sizeof(*s), GFP_KERNEL);
+ if (s == NULL)
+ return NULL;
+
+ s->s.dict.mode = mode;
+ s->s.dict.size = dict_size;
+
+ if (DEC_IS_MULTI(mode)) {
+ s->s.dict.end = dict_size;
+
+ s->s.dict.buf = vmalloc(dict_size);
+ if (s->s.dict.buf == NULL) {
+ kfree(s);
+ return NULL;
+ }
+ }
+
+ return s;
+}
+
+void xz_dec_microlzma_reset(struct xz_dec_microlzma *s, uint32_t comp_size,
+ uint32_t uncomp_size, int uncomp_size_is_exact)
+{
+ /*
+ * comp_size is validated in xz_dec_microlzma_run().
+ * uncomp_size can safely be anything.
+ */
+ s->s.lzma2.compressed = comp_size;
+ s->s.lzma2.uncompressed = uncomp_size;
+ s->s.lzma2.pedantic_microlzma = uncomp_size_is_exact;
+
+ s->s.lzma2.sequence = SEQ_PROPERTIES;
+ s->s.temp.size = 0;
+}
+
+void xz_dec_microlzma_end(struct xz_dec_microlzma *s)
+{
+ if (DEC_IS_MULTI(s->s.dict.mode))
+ vfree(s->s.dict.buf);
+
+ kfree(s);
+}
+#endif
diff --git a/lib/xz/xz_dec_stream.c b/lib/xz/xz_dec_stream.c
index bd1d182419d7..683570b93a8c 100644
--- a/lib/xz/xz_dec_stream.c
+++ b/lib/xz/xz_dec_stream.c
@@ -402,12 +402,12 @@ static enum xz_ret dec_stream_header(struct xz_dec *s)
* we will accept other check types too, but then the check won't
* be verified and a warning (XZ_UNSUPPORTED_CHECK) will be given.
*/
+ if (s->temp.buf[HEADER_MAGIC_SIZE + 1] > XZ_CHECK_MAX)
+ return XZ_OPTIONS_ERROR;
+
s->check_type = s->temp.buf[HEADER_MAGIC_SIZE + 1];
#ifdef XZ_DEC_ANY_CHECK
- if (s->check_type > XZ_CHECK_MAX)
- return XZ_OPTIONS_ERROR;
-
if (s->check_type > XZ_CHECK_CRC32)
return XZ_UNSUPPORTED_CHECK;
#else
@@ -583,7 +583,7 @@ static enum xz_ret dec_main(struct xz_dec *s, struct xz_buf *b)
if (ret != XZ_OK)
return ret;
- /* Fall through */
+ fallthrough;
case SEQ_BLOCK_START:
/* We need one byte of input to continue. */
@@ -608,7 +608,7 @@ static enum xz_ret dec_main(struct xz_dec *s, struct xz_buf *b)
s->temp.pos = 0;
s->sequence = SEQ_BLOCK_HEADER;
- /* Fall through */
+ fallthrough;
case SEQ_BLOCK_HEADER:
if (!fill_temp(s, b))
@@ -620,7 +620,7 @@ static enum xz_ret dec_main(struct xz_dec *s, struct xz_buf *b)
s->sequence = SEQ_BLOCK_UNCOMPRESS;
- /* Fall through */
+ fallthrough;
case SEQ_BLOCK_UNCOMPRESS:
ret = dec_block(s, b);
@@ -629,7 +629,7 @@ static enum xz_ret dec_main(struct xz_dec *s, struct xz_buf *b)
s->sequence = SEQ_BLOCK_PADDING;
- /* Fall through */
+ fallthrough;
case SEQ_BLOCK_PADDING:
/*
@@ -651,7 +651,7 @@ static enum xz_ret dec_main(struct xz_dec *s, struct xz_buf *b)
s->sequence = SEQ_BLOCK_CHECK;
- /* Fall through */
+ fallthrough;
case SEQ_BLOCK_CHECK:
if (s->check_type == XZ_CHECK_CRC32) {
@@ -675,7 +675,7 @@ static enum xz_ret dec_main(struct xz_dec *s, struct xz_buf *b)
s->sequence = SEQ_INDEX_PADDING;
- /* Fall through */
+ fallthrough;
case SEQ_INDEX_PADDING:
while ((s->index.size + (b->in_pos - s->in_start))
@@ -699,7 +699,7 @@ static enum xz_ret dec_main(struct xz_dec *s, struct xz_buf *b)
s->sequence = SEQ_INDEX_CRC32;
- /* Fall through */
+ fallthrough;
case SEQ_INDEX_CRC32:
ret = crc32_validate(s, b);
@@ -709,7 +709,7 @@ static enum xz_ret dec_main(struct xz_dec *s, struct xz_buf *b)
s->temp.size = STREAM_HEADER_SIZE;
s->sequence = SEQ_STREAM_FOOTER;
- /* Fall through */
+ fallthrough;
case SEQ_STREAM_FOOTER:
if (!fill_temp(s, b))
diff --git a/lib/xz/xz_dec_syms.c b/lib/xz/xz_dec_syms.c
index 32eb3c03aede..61098c67a413 100644
--- a/lib/xz/xz_dec_syms.c
+++ b/lib/xz/xz_dec_syms.c
@@ -15,8 +15,15 @@ EXPORT_SYMBOL(xz_dec_reset);
EXPORT_SYMBOL(xz_dec_run);
EXPORT_SYMBOL(xz_dec_end);
+#ifdef CONFIG_XZ_DEC_MICROLZMA
+EXPORT_SYMBOL(xz_dec_microlzma_alloc);
+EXPORT_SYMBOL(xz_dec_microlzma_reset);
+EXPORT_SYMBOL(xz_dec_microlzma_run);
+EXPORT_SYMBOL(xz_dec_microlzma_end);
+#endif
+
MODULE_DESCRIPTION("XZ decompressor");
-MODULE_VERSION("1.0");
+MODULE_VERSION("1.1");
MODULE_AUTHOR("Lasse Collin <lasse.collin@tukaani.org> and Igor Pavlov");
/*
diff --git a/lib/xz/xz_lzma2.h b/lib/xz/xz_lzma2.h
index 071d67bee9f5..92d852d4f87a 100644
--- a/lib/xz/xz_lzma2.h
+++ b/lib/xz/xz_lzma2.h
@@ -2,7 +2,7 @@
* LZMA2 definitions
*
* Authors: Lasse Collin <lasse.collin@tukaani.org>
- * Igor Pavlov <http://7-zip.org/>
+ * Igor Pavlov <https://7-zip.org/>
*
* This file has been put into the public domain.
* You can do whatever you want with this file.
diff --git a/lib/xz/xz_private.h b/lib/xz/xz_private.h
index 09360ebb510e..bf1e94ec7873 100644
--- a/lib/xz/xz_private.h
+++ b/lib/xz/xz_private.h
@@ -37,6 +37,9 @@
# ifdef CONFIG_XZ_DEC_SPARC
# define XZ_DEC_SPARC
# endif
+# ifdef CONFIG_XZ_DEC_MICROLZMA
+# define XZ_DEC_MICROLZMA
+# endif
# define memeq(a, b, size) (memcmp(a, b, size) == 0)
# define memzero(buf, size) memset(buf, 0, size)
# endif
diff --git a/lib/xz/xz_stream.h b/lib/xz/xz_stream.h
index 66cb5a7055ec..430bb3a0d195 100644
--- a/lib/xz/xz_stream.h
+++ b/lib/xz/xz_stream.h
@@ -19,7 +19,7 @@
/*
* See the .xz file format specification at
- * http://tukaani.org/xz/xz-file-format.txt
+ * https://tukaani.org/xz/xz-file-format.txt
* to understand the container format.
*/
diff --git a/lib/zlib_deflate/deflate.c b/lib/zlib_deflate/deflate.c
index d20ef458f137..3a1d8d34182e 100644
--- a/lib/zlib_deflate/deflate.c
+++ b/lib/zlib_deflate/deflate.c
@@ -52,16 +52,19 @@
#include <linux/zutil.h>
#include "defutil.h"
+/* architecture-specific bits */
+#ifdef CONFIG_ZLIB_DFLTCC
+# include "../zlib_dfltcc/dfltcc_deflate.h"
+#else
+#define DEFLATE_RESET_HOOK(strm) do {} while (0)
+#define DEFLATE_HOOK(strm, flush, bstate) 0
+#define DEFLATE_NEED_CHECKSUM(strm) 1
+#define DEFLATE_DFLTCC_ENABLED() 0
+#endif
/* ===========================================================================
* Function prototypes.
*/
-typedef enum {
- need_more, /* block not completed, need more input or more output */
- block_done, /* block flush performed */
- finish_started, /* finish started, need only more output at next deflate */
- finish_done /* finish done, accept no more input or output */
-} block_state;
typedef block_state (*compress_func) (deflate_state *s, int flush);
/* Compression function. Returns the block state after the call. */
@@ -72,7 +75,6 @@ static block_state deflate_fast (deflate_state *s, int flush);
static block_state deflate_slow (deflate_state *s, int flush);
static void lm_init (deflate_state *s);
static void putShortMSB (deflate_state *s, uInt b);
-static void flush_pending (z_streamp strm);
static int read_buf (z_streamp strm, Byte *buf, unsigned size);
static uInt longest_match (deflate_state *s, IPos cur_match);
@@ -98,6 +100,25 @@ static void check_match (deflate_state *s, IPos start, IPos match,
* See deflate.c for comments about the MIN_MATCH+1.
*/
+/* Workspace to be allocated for deflate processing */
+typedef struct deflate_workspace {
+ /* State memory for the deflator */
+ deflate_state deflate_memory;
+#ifdef CONFIG_ZLIB_DFLTCC
+ /* State memory for s390 hardware deflate */
+ struct dfltcc_deflate_state dfltcc_memory;
+#endif
+ Byte *window_memory;
+ Pos *prev_memory;
+ Pos *head_memory;
+ char *overlay_memory;
+} deflate_workspace;
+
+#ifdef CONFIG_ZLIB_DFLTCC
+/* dfltcc_state must be doubleword aligned for DFLTCC call */
+static_assert(offsetof(struct deflate_workspace, dfltcc_memory) % 8 == 0);
+#endif
+
/* Values for max_lazy_match, good_match and max_chain_length, depending on
* the desired pack level (0..9). The values given below have been tuned to
* exclude worst case performance for pathological files. Better values may be
@@ -207,7 +228,15 @@ int zlib_deflateInit2(
*/
next = (char *) mem;
next += sizeof(*mem);
+#ifdef CONFIG_ZLIB_DFLTCC
+ /*
+ * DFLTCC requires the window to be page aligned.
+ * Thus, we overallocate and take the aligned portion of the buffer.
+ */
+ mem->window_memory = (Byte *) PTR_ALIGN(next, PAGE_SIZE);
+#else
mem->window_memory = (Byte *) next;
+#endif
next += zlib_deflate_window_memsize(windowBits);
mem->prev_memory = (Pos *) next;
next += zlib_deflate_prev_memsize(windowBits);
@@ -277,6 +306,8 @@ int zlib_deflateReset(
zlib_tr_init(s);
lm_init(s);
+ DEFLATE_RESET_HOOK(strm);
+
return Z_OK;
}
@@ -294,35 +325,6 @@ static void putShortMSB(
put_byte(s, (Byte)(b & 0xff));
}
-/* =========================================================================
- * Flush as much pending output as possible. All deflate() output goes
- * through this function so some applications may wish to modify it
- * to avoid allocating a large strm->next_out buffer and copying into it.
- * (See also read_buf()).
- */
-static void flush_pending(
- z_streamp strm
-)
-{
- deflate_state *s = (deflate_state *) strm->state;
- unsigned len = s->pending;
-
- if (len > strm->avail_out) len = strm->avail_out;
- if (len == 0) return;
-
- if (strm->next_out != NULL) {
- memcpy(strm->next_out, s->pending_out, len);
- strm->next_out += len;
- }
- s->pending_out += len;
- strm->total_out += len;
- strm->avail_out -= len;
- s->pending -= len;
- if (s->pending == 0) {
- s->pending_out = s->pending_buf;
- }
-}
-
/* ========================================================================= */
int zlib_deflate(
z_streamp strm,
@@ -404,7 +406,8 @@ int zlib_deflate(
(flush != Z_NO_FLUSH && s->status != FINISH_STATE)) {
block_state bstate;
- bstate = (*(configuration_table[s->level].func))(s, flush);
+ bstate = DEFLATE_HOOK(strm, flush, &bstate) ? bstate :
+ (*(configuration_table[s->level].func))(s, flush);
if (bstate == finish_started || bstate == finish_done) {
s->status = FINISH_STATE;
@@ -448,17 +451,24 @@ int zlib_deflate(
Assert(strm->avail_out > 0, "bug2");
if (flush != Z_FINISH) return Z_OK;
- if (s->noheader) return Z_STREAM_END;
- /* Write the zlib trailer (adler32) */
- putShortMSB(s, (uInt)(strm->adler >> 16));
- putShortMSB(s, (uInt)(strm->adler & 0xffff));
+ if (!s->noheader) {
+ /* Write zlib trailer (adler32) */
+ putShortMSB(s, (uInt)(strm->adler >> 16));
+ putShortMSB(s, (uInt)(strm->adler & 0xffff));
+ }
flush_pending(strm);
/* If avail_out is zero, the application will call deflate again
* to flush the rest.
*/
- s->noheader = -1; /* write the trailer only once! */
- return s->pending != 0 ? Z_OK : Z_STREAM_END;
+ if (!s->noheader) {
+ s->noheader = -1; /* write the trailer only once! */
+ }
+ if (s->pending == 0) {
+ Assert(s->bi_valid == 0, "bi_buf not flushed");
+ return Z_STREAM_END;
+ }
+ return Z_OK;
}
/* ========================================================================= */
@@ -503,7 +513,8 @@ static int read_buf(
strm->avail_in -= len;
- if (!((deflate_state *)(strm->state))->noheader) {
+ if (!DEFLATE_NEED_CHECKSUM(strm)) {}
+ else if (!((deflate_state *)(strm->state))->noheader) {
strm->adler = zlib_adler32(strm->adler, strm->next_in, len);
}
memcpy(buf, strm->next_in, len);
@@ -1135,3 +1146,8 @@ int zlib_deflate_workspacesize(int windowBits, int memLevel)
+ zlib_deflate_head_memsize(memLevel)
+ zlib_deflate_overlay_memsize(memLevel);
}
+
+int zlib_deflate_dfltcc_enabled(void)
+{
+ return DEFLATE_DFLTCC_ENABLED();
+}
diff --git a/lib/zlib_deflate/deflate_syms.c b/lib/zlib_deflate/deflate_syms.c
index 72fe4b73be53..24b740b99678 100644
--- a/lib/zlib_deflate/deflate_syms.c
+++ b/lib/zlib_deflate/deflate_syms.c
@@ -12,6 +12,7 @@
#include <linux/zlib.h>
EXPORT_SYMBOL(zlib_deflate_workspacesize);
+EXPORT_SYMBOL(zlib_deflate_dfltcc_enabled);
EXPORT_SYMBOL(zlib_deflate);
EXPORT_SYMBOL(zlib_deflateInit2);
EXPORT_SYMBOL(zlib_deflateEnd);
diff --git a/lib/zlib_deflate/deftree.c b/lib/zlib_deflate/deftree.c
index 9b1756b12743..a4a34da512fe 100644
--- a/lib/zlib_deflate/deftree.c
+++ b/lib/zlib_deflate/deftree.c
@@ -76,11 +76,6 @@ static const uch bl_order[BL_CODES]
* probability, to avoid transmitting the lengths for unused bit length codes.
*/
-#define Buf_size (8 * 2*sizeof(char))
-/* Number of bits used within bi_buf. (bi_buf might be implemented on
- * more than 16 bits on some systems.)
- */
-
/* ===========================================================================
* Local data. These are initialized only once.
*/
@@ -147,7 +142,6 @@ static void send_all_trees (deflate_state *s, int lcodes, int dcodes,
static void compress_block (deflate_state *s, ct_data *ltree,
ct_data *dtree);
static void set_data_type (deflate_state *s);
-static void bi_windup (deflate_state *s);
static void bi_flush (deflate_state *s);
static void copy_block (deflate_state *s, char *buf, unsigned len,
int header);
@@ -170,54 +164,6 @@ static void copy_block (deflate_state *s, char *buf, unsigned len,
*/
/* ===========================================================================
- * Send a value on a given number of bits.
- * IN assertion: length <= 16 and value fits in length bits.
- */
-#ifdef DEBUG_ZLIB
-static void send_bits (deflate_state *s, int value, int length);
-
-static void send_bits(
- deflate_state *s,
- int value, /* value to send */
- int length /* number of bits */
-)
-{
- Tracevv((stderr," l %2d v %4x ", length, value));
- Assert(length > 0 && length <= 15, "invalid length");
- s->bits_sent += (ulg)length;
-
- /* If not enough room in bi_buf, use (valid) bits from bi_buf and
- * (16 - bi_valid) bits from value, leaving (width - (16-bi_valid))
- * unused bits in value.
- */
- if (s->bi_valid > (int)Buf_size - length) {
- s->bi_buf |= (value << s->bi_valid);
- put_short(s, s->bi_buf);
- s->bi_buf = (ush)value >> (Buf_size - s->bi_valid);
- s->bi_valid += length - Buf_size;
- } else {
- s->bi_buf |= value << s->bi_valid;
- s->bi_valid += length;
- }
-}
-#else /* !DEBUG_ZLIB */
-
-#define send_bits(s, value, length) \
-{ int len = length;\
- if (s->bi_valid > (int)Buf_size - len) {\
- int val = value;\
- s->bi_buf |= (val << s->bi_valid);\
- put_short(s, s->bi_buf);\
- s->bi_buf = (ush)val >> (Buf_size - s->bi_valid);\
- s->bi_valid += len - Buf_size;\
- } else {\
- s->bi_buf |= (value) << s->bi_valid;\
- s->bi_valid += len;\
- }\
-}
-#endif /* DEBUG_ZLIB */
-
-/* ===========================================================================
* Initialize the various 'constant' tables. In a multi-threaded environment,
* this function may be called by two threads concurrently, but this is
* harmless since both invocations do exactly the same thing.
diff --git a/lib/zlib_deflate/defutil.h b/lib/zlib_deflate/defutil.h
index a8c370897c9f..4ea40f5a279f 100644
--- a/lib/zlib_deflate/defutil.h
+++ b/lib/zlib_deflate/defutil.h
@@ -1,5 +1,7 @@
+#ifndef DEFUTIL_H
+#define DEFUTIL_H
-
+#include <linux/zutil.h>
#define Assert(err, str)
#define Trace(dummy)
@@ -238,17 +240,13 @@ typedef struct deflate_state {
} deflate_state;
-typedef struct deflate_workspace {
- /* State memory for the deflator */
- deflate_state deflate_memory;
- Byte *window_memory;
- Pos *prev_memory;
- Pos *head_memory;
- char *overlay_memory;
-} deflate_workspace;
-
+#ifdef CONFIG_ZLIB_DFLTCC
+#define zlib_deflate_window_memsize(windowBits) \
+ (2 * (1 << (windowBits)) * sizeof(Byte) + PAGE_SIZE)
+#else
#define zlib_deflate_window_memsize(windowBits) \
(2 * (1 << (windowBits)) * sizeof(Byte))
+#endif
#define zlib_deflate_prev_memsize(windowBits) \
((1 << (windowBits)) * sizeof(Pos))
#define zlib_deflate_head_memsize(memLevel) \
@@ -293,6 +291,24 @@ void zlib_tr_stored_type_only (deflate_state *);
}
/* ===========================================================================
+ * Reverse the first len bits of a code, using straightforward code (a faster
+ * method would use a table)
+ * IN assertion: 1 <= len <= 15
+ */
+static inline unsigned bi_reverse(
+ unsigned code, /* the value to invert */
+ int len /* its bit length */
+)
+{
+ register unsigned res = 0;
+ do {
+ res |= code & 1;
+ code >>= 1, res <<= 1;
+ } while (--len > 0);
+ return res >> 1;
+}
+
+/* ===========================================================================
* Flush the bit buffer, keeping at most 7 bits in it.
*/
static inline void bi_flush(deflate_state *s)
@@ -325,3 +341,103 @@ static inline void bi_windup(deflate_state *s)
#endif
}
+typedef enum {
+ need_more, /* block not completed, need more input or more output */
+ block_done, /* block flush performed */
+ finish_started, /* finish started, need only more output at next deflate */
+ finish_done /* finish done, accept no more input or output */
+} block_state;
+
+#define Buf_size (8 * 2*sizeof(char))
+/* Number of bits used within bi_buf. (bi_buf might be implemented on
+ * more than 16 bits on some systems.)
+ */
+
+/* ===========================================================================
+ * Send a value on a given number of bits.
+ * IN assertion: length <= 16 and value fits in length bits.
+ */
+#ifdef DEBUG_ZLIB
+static void send_bits (deflate_state *s, int value, int length);
+
+static void send_bits(
+ deflate_state *s,
+ int value, /* value to send */
+ int length /* number of bits */
+)
+{
+ Tracevv((stderr," l %2d v %4x ", length, value));
+ Assert(length > 0 && length <= 15, "invalid length");
+ s->bits_sent += (ulg)length;
+
+ /* If not enough room in bi_buf, use (valid) bits from bi_buf and
+ * (16 - bi_valid) bits from value, leaving (width - (16-bi_valid))
+ * unused bits in value.
+ */
+ if (s->bi_valid > (int)Buf_size - length) {
+ s->bi_buf |= (value << s->bi_valid);
+ put_short(s, s->bi_buf);
+ s->bi_buf = (ush)value >> (Buf_size - s->bi_valid);
+ s->bi_valid += length - Buf_size;
+ } else {
+ s->bi_buf |= value << s->bi_valid;
+ s->bi_valid += length;
+ }
+}
+#else /* !DEBUG_ZLIB */
+
+#define send_bits(s, value, length) \
+{ int len = length;\
+ if (s->bi_valid > (int)Buf_size - len) {\
+ int val = value;\
+ s->bi_buf |= (val << s->bi_valid);\
+ put_short(s, s->bi_buf);\
+ s->bi_buf = (ush)val >> (Buf_size - s->bi_valid);\
+ s->bi_valid += len - Buf_size;\
+ } else {\
+ s->bi_buf |= (value) << s->bi_valid;\
+ s->bi_valid += len;\
+ }\
+}
+#endif /* DEBUG_ZLIB */
+
+static inline void zlib_tr_send_bits(
+ deflate_state *s,
+ int value,
+ int length
+)
+{
+ send_bits(s, value, length);
+}
+
+/* =========================================================================
+ * Flush as much pending output as possible. All deflate() output goes
+ * through this function so some applications may wish to modify it
+ * to avoid allocating a large strm->next_out buffer and copying into it.
+ * (See also read_buf()).
+ */
+static inline void flush_pending(
+ z_streamp strm
+)
+{
+ unsigned len;
+ deflate_state *s = (deflate_state *) strm->state;
+
+ bi_flush(s);
+ len = s->pending;
+ if (len > strm->avail_out) len = strm->avail_out;
+ if (len == 0) return;
+
+ if (strm->next_out != NULL) {
+ memcpy(strm->next_out, s->pending_out, len);
+ strm->next_out += len;
+ }
+ s->pending_out += len;
+ strm->total_out += len;
+ strm->avail_out -= len;
+ s->pending -= len;
+ if (s->pending == 0) {
+ s->pending_out = s->pending_buf;
+ }
+}
+#endif /* DEFUTIL_H */
diff --git a/lib/zlib_dfltcc/Makefile b/lib/zlib_dfltcc/Makefile
new file mode 100644
index 000000000000..66e1c96387c4
--- /dev/null
+++ b/lib/zlib_dfltcc/Makefile
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# This is a modified version of zlib, which does all memory
+# allocation ahead of time.
+#
+# This is the code for s390 zlib hardware support.
+#
+
+obj-$(CONFIG_ZLIB_DFLTCC) += zlib_dfltcc.o
+
+zlib_dfltcc-objs := dfltcc.o dfltcc_deflate.o dfltcc_inflate.o
diff --git a/lib/zlib_dfltcc/dfltcc.c b/lib/zlib_dfltcc/dfltcc.c
new file mode 100644
index 000000000000..ac6ac9739f5b
--- /dev/null
+++ b/lib/zlib_dfltcc/dfltcc.c
@@ -0,0 +1,40 @@
+// SPDX-License-Identifier: Zlib
+/* dfltcc.c - SystemZ DEFLATE CONVERSION CALL support. */
+
+#include <linux/export.h>
+#include <linux/module.h>
+#include "dfltcc_util.h"
+#include "dfltcc.h"
+
+char *oesc_msg(
+ char *buf,
+ int oesc
+)
+{
+ if (oesc == 0x00)
+ return NULL; /* Successful completion */
+ else {
+#ifdef STATIC
+ return NULL; /* Ignore for pre-boot decompressor */
+#else
+ sprintf(buf, "Operation-Ending-Supplemental Code is 0x%.2X", oesc);
+ return buf;
+#endif
+ }
+}
+
+void dfltcc_reset_state(struct dfltcc_state *dfltcc_state) {
+ /* Initialize available functions */
+ if (is_dfltcc_enabled()) {
+ dfltcc(DFLTCC_QAF, &dfltcc_state->param, NULL, NULL, NULL, NULL, NULL);
+ memmove(&dfltcc_state->af, &dfltcc_state->param, sizeof(dfltcc_state->af));
+ } else
+ memset(&dfltcc_state->af, 0, sizeof(dfltcc_state->af));
+
+ /* Initialize parameter block */
+ memset(&dfltcc_state->param, 0, sizeof(dfltcc_state->param));
+ dfltcc_state->param.nt = 1;
+ dfltcc_state->param.ribm = DFLTCC_RIBM;
+}
+
+MODULE_LICENSE("GPL");
diff --git a/lib/zlib_dfltcc/dfltcc.h b/lib/zlib_dfltcc/dfltcc.h
new file mode 100644
index 000000000000..b96232bdd44d
--- /dev/null
+++ b/lib/zlib_dfltcc/dfltcc.h
@@ -0,0 +1,124 @@
+// SPDX-License-Identifier: Zlib
+#ifndef DFLTCC_H
+#define DFLTCC_H
+
+#include "../zlib_deflate/defutil.h"
+#include <asm/facility.h>
+#include <asm/setup.h>
+
+/*
+ * Tuning parameters.
+ */
+#define DFLTCC_LEVEL_MASK 0x2 /* DFLTCC compression for level 1 only */
+#define DFLTCC_LEVEL_MASK_DEBUG 0x3fe /* DFLTCC compression for all levels */
+#define DFLTCC_BLOCK_SIZE 1048576
+#define DFLTCC_FIRST_FHT_BLOCK_SIZE 4096
+#define DFLTCC_DHT_MIN_SAMPLE_SIZE 4096
+#define DFLTCC_RIBM 0
+
+#define DFLTCC_FACILITY 151
+
+/*
+ * Parameter Block for Query Available Functions.
+ */
+struct dfltcc_qaf_param {
+ char fns[16];
+ char reserved1[8];
+ char fmts[2];
+ char reserved2[6];
+};
+
+static_assert(sizeof(struct dfltcc_qaf_param) == 32);
+
+#define DFLTCC_FMT0 0
+
+/*
+ * Parameter Block for Generate Dynamic-Huffman Table, Compress and Expand.
+ */
+struct dfltcc_param_v0 {
+ uint16_t pbvn; /* Parameter-Block-Version Number */
+ uint8_t mvn; /* Model-Version Number */
+ uint8_t ribm; /* Reserved for IBM use */
+ unsigned reserved32 : 31;
+ unsigned cf : 1; /* Continuation Flag */
+ uint8_t reserved64[8];
+ unsigned nt : 1; /* New Task */
+ unsigned reserved129 : 1;
+ unsigned cvt : 1; /* Check Value Type */
+ unsigned reserved131 : 1;
+ unsigned htt : 1; /* Huffman-Table Type */
+ unsigned bcf : 1; /* Block-Continuation Flag */
+ unsigned bcc : 1; /* Block Closing Control */
+ unsigned bhf : 1; /* Block Header Final */
+ unsigned reserved136 : 1;
+ unsigned reserved137 : 1;
+ unsigned dhtgc : 1; /* DHT Generation Control */
+ unsigned reserved139 : 5;
+ unsigned reserved144 : 5;
+ unsigned sbb : 3; /* Sub-Byte Boundary */
+ uint8_t oesc; /* Operation-Ending-Supplemental Code */
+ unsigned reserved160 : 12;
+ unsigned ifs : 4; /* Incomplete-Function Status */
+ uint16_t ifl; /* Incomplete-Function Length */
+ uint8_t reserved192[8];
+ uint8_t reserved256[8];
+ uint8_t reserved320[4];
+ uint16_t hl; /* History Length */
+ unsigned reserved368 : 1;
+ uint16_t ho : 15; /* History Offset */
+ uint32_t cv; /* Check Value */
+ unsigned eobs : 15; /* End-of-block Symbol */
+ unsigned reserved431: 1;
+ uint8_t eobl : 4; /* End-of-block Length */
+ unsigned reserved436 : 12;
+ unsigned reserved448 : 4;
+ uint16_t cdhtl : 12; /* Compressed-Dynamic-Huffman Table
+ Length */
+ uint8_t reserved464[6];
+ uint8_t cdht[288];
+ uint8_t reserved[32];
+ uint8_t csb[1152];
+};
+
+static_assert(sizeof(struct dfltcc_param_v0) == 1536);
+
+#define CVT_CRC32 0
+#define CVT_ADLER32 1
+#define HTT_FIXED 0
+#define HTT_DYNAMIC 1
+
+/*
+ * Extension of inflate_state and deflate_state for DFLTCC.
+ */
+struct dfltcc_state {
+ struct dfltcc_param_v0 param; /* Parameter block */
+ struct dfltcc_qaf_param af; /* Available functions */
+ char msg[64]; /* Buffer for strm->msg */
+};
+
+/*
+ * Extension of inflate_state and deflate_state for DFLTCC.
+ */
+struct dfltcc_deflate_state {
+ struct dfltcc_state common; /* Parameter block */
+ uLong level_mask; /* Levels on which to use DFLTCC */
+ uLong block_size; /* New block each X bytes */
+ uLong block_threshold; /* New block after total_in > X */
+ uLong dht_threshold; /* New block only if avail_in >= X */
+};
+
+#define ALIGN_UP(p, size) (__typeof__(p))(((uintptr_t)(p) + ((size) - 1)) & ~((size) - 1))
+/* Resides right after inflate_state or deflate_state */
+#define GET_DFLTCC_STATE(state) ((struct dfltcc_state *)((char *)(state) + ALIGN_UP(sizeof(*state), 8)))
+
+void dfltcc_reset_state(struct dfltcc_state *dfltcc_state);
+
+static inline int is_dfltcc_enabled(void)
+{
+return (zlib_dfltcc_support != ZLIB_DFLTCC_DISABLED &&
+ test_facility(DFLTCC_FACILITY));
+}
+
+#define DEFLATE_DFLTCC_ENABLED() is_dfltcc_enabled()
+
+#endif /* DFLTCC_H */
diff --git a/lib/zlib_dfltcc/dfltcc_deflate.c b/lib/zlib_dfltcc/dfltcc_deflate.c
new file mode 100644
index 000000000000..b732b6d9e35d
--- /dev/null
+++ b/lib/zlib_dfltcc/dfltcc_deflate.c
@@ -0,0 +1,313 @@
+// SPDX-License-Identifier: Zlib
+
+#include "../zlib_deflate/defutil.h"
+#include "dfltcc_util.h"
+#include "dfltcc_deflate.h"
+#include <asm/setup.h>
+#include <linux/export.h>
+#include <linux/zutil.h>
+
+#define GET_DFLTCC_DEFLATE_STATE(state) ((struct dfltcc_deflate_state *)GET_DFLTCC_STATE(state))
+
+/*
+ * Compress.
+ */
+int dfltcc_can_deflate(
+ z_streamp strm
+)
+{
+ deflate_state *state = (deflate_state *)strm->state;
+ struct dfltcc_deflate_state *dfltcc_state = GET_DFLTCC_DEFLATE_STATE(state);
+
+ /* Check for kernel dfltcc command line parameter */
+ if (zlib_dfltcc_support == ZLIB_DFLTCC_DISABLED ||
+ zlib_dfltcc_support == ZLIB_DFLTCC_INFLATE_ONLY)
+ return 0;
+
+ /* Unsupported compression settings */
+ if (!dfltcc_are_params_ok(state->level, state->w_bits, state->strategy,
+ dfltcc_state->level_mask))
+ return 0;
+
+ /* Unsupported hardware */
+ if (!is_bit_set(dfltcc_state->common.af.fns, DFLTCC_GDHT) ||
+ !is_bit_set(dfltcc_state->common.af.fns, DFLTCC_CMPR) ||
+ !is_bit_set(dfltcc_state->common.af.fmts, DFLTCC_FMT0))
+ return 0;
+
+ return 1;
+}
+EXPORT_SYMBOL(dfltcc_can_deflate);
+
+void dfltcc_reset_deflate_state(z_streamp strm) {
+ deflate_state *state = (deflate_state *)strm->state;
+ struct dfltcc_deflate_state *dfltcc_state = GET_DFLTCC_DEFLATE_STATE(state);
+
+ dfltcc_reset_state(&dfltcc_state->common);
+
+ /* Initialize tuning parameters */
+ if (zlib_dfltcc_support == ZLIB_DFLTCC_FULL_DEBUG)
+ dfltcc_state->level_mask = DFLTCC_LEVEL_MASK_DEBUG;
+ else
+ dfltcc_state->level_mask = DFLTCC_LEVEL_MASK;
+ dfltcc_state->block_size = DFLTCC_BLOCK_SIZE;
+ dfltcc_state->block_threshold = DFLTCC_FIRST_FHT_BLOCK_SIZE;
+ dfltcc_state->dht_threshold = DFLTCC_DHT_MIN_SAMPLE_SIZE;
+}
+EXPORT_SYMBOL(dfltcc_reset_deflate_state);
+
+static void dfltcc_gdht(
+ z_streamp strm
+)
+{
+ deflate_state *state = (deflate_state *)strm->state;
+ struct dfltcc_param_v0 *param = &GET_DFLTCC_STATE(state)->param;
+ size_t avail_in = strm->avail_in;
+
+ dfltcc(DFLTCC_GDHT,
+ param, NULL, NULL,
+ &strm->next_in, &avail_in, NULL);
+}
+
+static dfltcc_cc dfltcc_cmpr(
+ z_streamp strm
+)
+{
+ deflate_state *state = (deflate_state *)strm->state;
+ struct dfltcc_param_v0 *param = &GET_DFLTCC_STATE(state)->param;
+ size_t avail_in = strm->avail_in;
+ size_t avail_out = strm->avail_out;
+ dfltcc_cc cc;
+
+ cc = dfltcc(DFLTCC_CMPR | HBT_CIRCULAR,
+ param, &strm->next_out, &avail_out,
+ &strm->next_in, &avail_in, state->window);
+ strm->total_in += (strm->avail_in - avail_in);
+ strm->total_out += (strm->avail_out - avail_out);
+ strm->avail_in = avail_in;
+ strm->avail_out = avail_out;
+ return cc;
+}
+
+static void send_eobs(
+ z_streamp strm,
+ const struct dfltcc_param_v0 *param
+)
+{
+ deflate_state *state = (deflate_state *)strm->state;
+
+ zlib_tr_send_bits(
+ state,
+ bi_reverse(param->eobs >> (15 - param->eobl), param->eobl),
+ param->eobl);
+ flush_pending(strm);
+ if (state->pending != 0) {
+ /* The remaining data is located in pending_out[0:pending]. If someone
+ * calls put_byte() - this might happen in deflate() - the byte will be
+ * placed into pending_buf[pending], which is incorrect. Move the
+ * remaining data to the beginning of pending_buf so that put_byte() is
+ * usable again.
+ */
+ memmove(state->pending_buf, state->pending_out, state->pending);
+ state->pending_out = state->pending_buf;
+ }
+#ifdef ZLIB_DEBUG
+ state->compressed_len += param->eobl;
+#endif
+}
+
+int dfltcc_deflate(
+ z_streamp strm,
+ int flush,
+ block_state *result
+)
+{
+ deflate_state *state = (deflate_state *)strm->state;
+ struct dfltcc_deflate_state *dfltcc_state = GET_DFLTCC_DEFLATE_STATE(state);
+ struct dfltcc_param_v0 *param = &dfltcc_state->common.param;
+ uInt masked_avail_in;
+ dfltcc_cc cc;
+ int need_empty_block;
+ int soft_bcc;
+ int no_flush;
+
+ if (!dfltcc_can_deflate(strm)) {
+ /* Clear history. */
+ if (flush == Z_FULL_FLUSH)
+ param->hl = 0;
+ return 0;
+ }
+
+again:
+ masked_avail_in = 0;
+ soft_bcc = 0;
+ no_flush = flush == Z_NO_FLUSH;
+
+ /* No input data. Return, except when Continuation Flag is set, which means
+ * that DFLTCC has buffered some output in the parameter block and needs to
+ * be called again in order to flush it.
+ */
+ if (strm->avail_in == 0 && !param->cf) {
+ /* A block is still open, and the hardware does not support closing
+ * blocks without adding data. Thus, close it manually.
+ */
+ if (!no_flush && param->bcf) {
+ send_eobs(strm, param);
+ param->bcf = 0;
+ }
+ /* Let one of deflate_* functions write a trailing empty block. */
+ if (flush == Z_FINISH)
+ return 0;
+ /* Clear history. */
+ if (flush == Z_FULL_FLUSH)
+ param->hl = 0;
+ /* Trigger block post-processing if necessary. */
+ *result = no_flush ? need_more : block_done;
+ return 1;
+ }
+
+ /* There is an open non-BFINAL block, we are not going to close it just
+ * yet, we have compressed more than DFLTCC_BLOCK_SIZE bytes and we see
+ * more than DFLTCC_DHT_MIN_SAMPLE_SIZE bytes. Open a new block with a new
+ * DHT in order to adapt to a possibly changed input data distribution.
+ */
+ if (param->bcf && no_flush &&
+ strm->total_in > dfltcc_state->block_threshold &&
+ strm->avail_in >= dfltcc_state->dht_threshold) {
+ if (param->cf) {
+ /* We need to flush the DFLTCC buffer before writing the
+ * End-of-block Symbol. Mask the input data and proceed as usual.
+ */
+ masked_avail_in += strm->avail_in;
+ strm->avail_in = 0;
+ no_flush = 0;
+ } else {
+ /* DFLTCC buffer is empty, so we can manually write the
+ * End-of-block Symbol right away.
+ */
+ send_eobs(strm, param);
+ param->bcf = 0;
+ dfltcc_state->block_threshold =
+ strm->total_in + dfltcc_state->block_size;
+ }
+ }
+
+ /* No space for compressed data. If we proceed, dfltcc_cmpr() will return
+ * DFLTCC_CC_OP1_TOO_SHORT without buffering header bits, but we will still
+ * set BCF=1, which is wrong. Avoid complications and return early.
+ */
+ if (strm->avail_out == 0) {
+ *result = need_more;
+ return 1;
+ }
+
+ /* The caller gave us too much data. Pass only one block worth of
+ * uncompressed data to DFLTCC and mask the rest, so that on the next
+ * iteration we start a new block.
+ */
+ if (no_flush && strm->avail_in > dfltcc_state->block_size) {
+ masked_avail_in += (strm->avail_in - dfltcc_state->block_size);
+ strm->avail_in = dfltcc_state->block_size;
+ }
+
+ /* When we have an open non-BFINAL deflate block and caller indicates that
+ * the stream is ending, we need to close an open deflate block and open a
+ * BFINAL one.
+ */
+ need_empty_block = flush == Z_FINISH && param->bcf && !param->bhf;
+
+ /* Translate stream to parameter block */
+ param->cvt = CVT_ADLER32;
+ if (!no_flush)
+ /* We need to close a block. Always do this in software - when there is
+ * no input data, the hardware will not hohor BCC. */
+ soft_bcc = 1;
+ if (flush == Z_FINISH && !param->bcf)
+ /* We are about to open a BFINAL block, set Block Header Final bit
+ * until the stream ends.
+ */
+ param->bhf = 1;
+ /* DFLTCC-CMPR will write to next_out, so make sure that buffers with
+ * higher precedence are empty.
+ */
+ Assert(state->pending == 0, "There must be no pending bytes");
+ Assert(state->bi_valid < 8, "There must be less than 8 pending bits");
+ param->sbb = (unsigned int)state->bi_valid;
+ if (param->sbb > 0)
+ *strm->next_out = (Byte)state->bi_buf;
+ /* Honor history and check value */
+ param->nt = 0;
+ param->cv = strm->adler;
+
+ /* When opening a block, choose a Huffman-Table Type */
+ if (!param->bcf) {
+ if (strm->total_in == 0 && dfltcc_state->block_threshold > 0) {
+ param->htt = HTT_FIXED;
+ }
+ else {
+ param->htt = HTT_DYNAMIC;
+ dfltcc_gdht(strm);
+ }
+ }
+
+ /* Deflate */
+ do {
+ cc = dfltcc_cmpr(strm);
+ if (strm->avail_in < 4096 && masked_avail_in > 0)
+ /* We are about to call DFLTCC with a small input buffer, which is
+ * inefficient. Since there is masked data, there will be at least
+ * one more DFLTCC call, so skip the current one and make the next
+ * one handle more data.
+ */
+ break;
+ } while (cc == DFLTCC_CC_AGAIN);
+
+ /* Translate parameter block to stream */
+ strm->msg = oesc_msg(dfltcc_state->common.msg, param->oesc);
+ state->bi_valid = param->sbb;
+ if (state->bi_valid == 0)
+ state->bi_buf = 0; /* Avoid accessing next_out */
+ else
+ state->bi_buf = *strm->next_out & ((1 << state->bi_valid) - 1);
+ strm->adler = param->cv;
+
+ /* Unmask the input data */
+ strm->avail_in += masked_avail_in;
+ masked_avail_in = 0;
+
+ /* If we encounter an error, it means there is a bug in DFLTCC call */
+ Assert(cc != DFLTCC_CC_OP2_CORRUPT || param->oesc == 0, "BUG");
+
+ /* Update Block-Continuation Flag. It will be used to check whether to call
+ * GDHT the next time.
+ */
+ if (cc == DFLTCC_CC_OK) {
+ if (soft_bcc) {
+ send_eobs(strm, param);
+ param->bcf = 0;
+ dfltcc_state->block_threshold =
+ strm->total_in + dfltcc_state->block_size;
+ } else
+ param->bcf = 1;
+ if (flush == Z_FINISH) {
+ if (need_empty_block)
+ /* Make the current deflate() call also close the stream */
+ return 0;
+ else {
+ bi_windup(state);
+ *result = finish_done;
+ }
+ } else {
+ if (flush == Z_FULL_FLUSH)
+ param->hl = 0; /* Clear history */
+ *result = flush == Z_NO_FLUSH ? need_more : block_done;
+ }
+ } else {
+ param->bcf = 1;
+ *result = need_more;
+ }
+ if (strm->avail_in != 0 && strm->avail_out != 0)
+ goto again; /* deflate() must use all input or all output */
+ return 1;
+}
+EXPORT_SYMBOL(dfltcc_deflate);
diff --git a/lib/zlib_dfltcc/dfltcc_deflate.h b/lib/zlib_dfltcc/dfltcc_deflate.h
new file mode 100644
index 000000000000..be44b43833b1
--- /dev/null
+++ b/lib/zlib_dfltcc/dfltcc_deflate.h
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: Zlib
+#ifndef DFLTCC_DEFLATE_H
+#define DFLTCC_DEFLATE_H
+
+#include "dfltcc.h"
+
+/* External functions */
+int dfltcc_can_deflate(z_streamp strm);
+int dfltcc_deflate(z_streamp strm,
+ int flush,
+ block_state *result);
+void dfltcc_reset_deflate_state(z_streamp strm);
+
+#define DEFLATE_RESET_HOOK(strm) \
+ dfltcc_reset_deflate_state((strm))
+
+#define DEFLATE_HOOK dfltcc_deflate
+
+#define DEFLATE_NEED_CHECKSUM(strm) (!dfltcc_can_deflate((strm)))
+
+#endif /* DFLTCC_DEFLATE_H */
diff --git a/lib/zlib_dfltcc/dfltcc_inflate.c b/lib/zlib_dfltcc/dfltcc_inflate.c
new file mode 100644
index 000000000000..437cd34c8490
--- /dev/null
+++ b/lib/zlib_dfltcc/dfltcc_inflate.c
@@ -0,0 +1,154 @@
+// SPDX-License-Identifier: Zlib
+
+#include "../zlib_inflate/inflate.h"
+#include "dfltcc_util.h"
+#include "dfltcc_inflate.h"
+#include <asm/setup.h>
+#include <linux/export.h>
+#include <linux/zutil.h>
+
+/*
+ * Expand.
+ */
+int dfltcc_can_inflate(
+ z_streamp strm
+)
+{
+ struct inflate_state *state = (struct inflate_state *)strm->state;
+ struct dfltcc_state *dfltcc_state = GET_DFLTCC_STATE(state);
+
+ /* Check for kernel dfltcc command line parameter */
+ if (zlib_dfltcc_support == ZLIB_DFLTCC_DISABLED ||
+ zlib_dfltcc_support == ZLIB_DFLTCC_DEFLATE_ONLY)
+ return 0;
+
+ /* Unsupported hardware */
+ return is_bit_set(dfltcc_state->af.fns, DFLTCC_XPND) &&
+ is_bit_set(dfltcc_state->af.fmts, DFLTCC_FMT0);
+}
+EXPORT_SYMBOL(dfltcc_can_inflate);
+
+void dfltcc_reset_inflate_state(z_streamp strm) {
+ struct inflate_state *state = (struct inflate_state *)strm->state;
+ struct dfltcc_state *dfltcc_state = GET_DFLTCC_STATE(state);
+
+ dfltcc_reset_state(dfltcc_state);
+}
+EXPORT_SYMBOL(dfltcc_reset_inflate_state);
+
+static int dfltcc_was_inflate_used(
+ z_streamp strm
+)
+{
+ struct inflate_state *state = (struct inflate_state *)strm->state;
+ struct dfltcc_param_v0 *param = &GET_DFLTCC_STATE(state)->param;
+
+ return !param->nt;
+}
+
+static int dfltcc_inflate_disable(
+ z_streamp strm
+)
+{
+ struct inflate_state *state = (struct inflate_state *)strm->state;
+ struct dfltcc_state *dfltcc_state = GET_DFLTCC_STATE(state);
+
+ if (!dfltcc_can_inflate(strm))
+ return 0;
+ if (dfltcc_was_inflate_used(strm))
+ /* DFLTCC has already decompressed some data. Since there is not
+ * enough information to resume decompression in software, the call
+ * must fail.
+ */
+ return 1;
+ /* DFLTCC was not used yet - decompress in software */
+ memset(&dfltcc_state->af, 0, sizeof(dfltcc_state->af));
+ return 0;
+}
+
+static dfltcc_cc dfltcc_xpnd(
+ z_streamp strm
+)
+{
+ struct inflate_state *state = (struct inflate_state *)strm->state;
+ struct dfltcc_param_v0 *param = &GET_DFLTCC_STATE(state)->param;
+ size_t avail_in = strm->avail_in;
+ size_t avail_out = strm->avail_out;
+ dfltcc_cc cc;
+
+ cc = dfltcc(DFLTCC_XPND | HBT_CIRCULAR,
+ param, &strm->next_out, &avail_out,
+ &strm->next_in, &avail_in, state->window);
+ strm->avail_in = avail_in;
+ strm->avail_out = avail_out;
+ return cc;
+}
+
+dfltcc_inflate_action dfltcc_inflate(
+ z_streamp strm,
+ int flush,
+ int *ret
+)
+{
+ struct inflate_state *state = (struct inflate_state *)strm->state;
+ struct dfltcc_state *dfltcc_state = GET_DFLTCC_STATE(state);
+ struct dfltcc_param_v0 *param = &dfltcc_state->param;
+ dfltcc_cc cc;
+
+ if (flush == Z_BLOCK || flush == Z_PACKET_FLUSH) {
+ /* DFLTCC does not support stopping on block boundaries (Z_BLOCK flush option)
+ * as well as the use of Z_PACKET_FLUSH option (used exclusively by PPP driver)
+ */
+ if (dfltcc_inflate_disable(strm)) {
+ *ret = Z_STREAM_ERROR;
+ return DFLTCC_INFLATE_BREAK;
+ } else
+ return DFLTCC_INFLATE_SOFTWARE;
+ }
+
+ if (state->last) {
+ if (state->bits != 0) {
+ strm->next_in++;
+ strm->avail_in--;
+ state->bits = 0;
+ }
+ state->mode = CHECK;
+ return DFLTCC_INFLATE_CONTINUE;
+ }
+
+ if (strm->avail_in == 0 && !param->cf)
+ return DFLTCC_INFLATE_BREAK;
+
+ if (!state->window || state->wsize == 0) {
+ state->mode = MEM;
+ return DFLTCC_INFLATE_CONTINUE;
+ }
+
+ /* Translate stream to parameter block */
+ param->cvt = CVT_ADLER32;
+ param->sbb = state->bits;
+ if (param->hl)
+ param->nt = 0; /* Honor history for the first block */
+ param->cv = state->check;
+
+ /* Inflate */
+ do {
+ cc = dfltcc_xpnd(strm);
+ } while (cc == DFLTCC_CC_AGAIN);
+
+ /* Translate parameter block to stream */
+ strm->msg = oesc_msg(dfltcc_state->msg, param->oesc);
+ state->last = cc == DFLTCC_CC_OK;
+ state->bits = param->sbb;
+ state->check = param->cv;
+ if (cc == DFLTCC_CC_OP2_CORRUPT && param->oesc != 0) {
+ /* Report an error if stream is corrupted */
+ state->mode = BAD;
+ return DFLTCC_INFLATE_CONTINUE;
+ }
+ state->mode = TYPEDO;
+ /* Break if operands are exhausted, otherwise continue looping */
+ return (cc == DFLTCC_CC_OP1_TOO_SHORT || cc == DFLTCC_CC_OP2_TOO_SHORT) ?
+ DFLTCC_INFLATE_BREAK : DFLTCC_INFLATE_CONTINUE;
+}
+EXPORT_SYMBOL(dfltcc_inflate);
diff --git a/lib/zlib_dfltcc/dfltcc_inflate.h b/lib/zlib_dfltcc/dfltcc_inflate.h
new file mode 100644
index 000000000000..98d4bc42e526
--- /dev/null
+++ b/lib/zlib_dfltcc/dfltcc_inflate.h
@@ -0,0 +1,37 @@
+// SPDX-License-Identifier: Zlib
+#ifndef DFLTCC_INFLATE_H
+#define DFLTCC_INFLATE_H
+
+#include "dfltcc.h"
+
+/* External functions */
+void dfltcc_reset_inflate_state(z_streamp strm);
+int dfltcc_can_inflate(z_streamp strm);
+typedef enum {
+ DFLTCC_INFLATE_CONTINUE,
+ DFLTCC_INFLATE_BREAK,
+ DFLTCC_INFLATE_SOFTWARE,
+} dfltcc_inflate_action;
+dfltcc_inflate_action dfltcc_inflate(z_streamp strm,
+ int flush, int *ret);
+#define INFLATE_RESET_HOOK(strm) \
+ dfltcc_reset_inflate_state((strm))
+
+#define INFLATE_TYPEDO_HOOK(strm, flush) \
+ if (dfltcc_can_inflate((strm))) { \
+ dfltcc_inflate_action action; \
+\
+ RESTORE(); \
+ action = dfltcc_inflate((strm), (flush), &ret); \
+ LOAD(); \
+ if (action == DFLTCC_INFLATE_CONTINUE) \
+ break; \
+ else if (action == DFLTCC_INFLATE_BREAK) \
+ goto inf_leave; \
+ }
+
+#define INFLATE_NEED_CHECKSUM(strm) (!dfltcc_can_inflate((strm)))
+
+#define INFLATE_NEED_UPDATEWINDOW(strm) (!dfltcc_can_inflate((strm)))
+
+#endif /* DFLTCC_DEFLATE_H */
diff --git a/lib/zlib_dfltcc/dfltcc_util.h b/lib/zlib_dfltcc/dfltcc_util.h
new file mode 100644
index 000000000000..4a46b5009f0d
--- /dev/null
+++ b/lib/zlib_dfltcc/dfltcc_util.h
@@ -0,0 +1,103 @@
+// SPDX-License-Identifier: Zlib
+#ifndef DFLTCC_UTIL_H
+#define DFLTCC_UTIL_H
+
+#include <linux/zutil.h>
+
+/*
+ * C wrapper for the DEFLATE CONVERSION CALL instruction.
+ */
+typedef enum {
+ DFLTCC_CC_OK = 0,
+ DFLTCC_CC_OP1_TOO_SHORT = 1,
+ DFLTCC_CC_OP2_TOO_SHORT = 2,
+ DFLTCC_CC_OP2_CORRUPT = 2,
+ DFLTCC_CC_AGAIN = 3,
+} dfltcc_cc;
+
+#define DFLTCC_QAF 0
+#define DFLTCC_GDHT 1
+#define DFLTCC_CMPR 2
+#define DFLTCC_XPND 4
+#define HBT_CIRCULAR (1 << 7)
+#define HB_BITS 15
+#define HB_SIZE (1 << HB_BITS)
+
+static inline dfltcc_cc dfltcc(
+ int fn,
+ void *param,
+ Byte **op1,
+ size_t *len1,
+ const Byte **op2,
+ size_t *len2,
+ void *hist
+)
+{
+ Byte *t2 = op1 ? *op1 : NULL;
+ size_t t3 = len1 ? *len1 : 0;
+ const Byte *t4 = op2 ? *op2 : NULL;
+ size_t t5 = len2 ? *len2 : 0;
+ register int r0 __asm__("r0") = fn;
+ register void *r1 __asm__("r1") = param;
+ register Byte *r2 __asm__("r2") = t2;
+ register size_t r3 __asm__("r3") = t3;
+ register const Byte *r4 __asm__("r4") = t4;
+ register size_t r5 __asm__("r5") = t5;
+ int cc;
+
+ __asm__ volatile(
+ ".insn rrf,0xb9390000,%[r2],%[r4],%[hist],0\n"
+ "ipm %[cc]\n"
+ : [r2] "+r" (r2)
+ , [r3] "+r" (r3)
+ , [r4] "+r" (r4)
+ , [r5] "+r" (r5)
+ , [cc] "=r" (cc)
+ : [r0] "r" (r0)
+ , [r1] "r" (r1)
+ , [hist] "r" (hist)
+ : "cc", "memory");
+ t2 = r2; t3 = r3; t4 = r4; t5 = r5;
+
+ if (op1)
+ *op1 = t2;
+ if (len1)
+ *len1 = t3;
+ if (op2)
+ *op2 = t4;
+ if (len2)
+ *len2 = t5;
+ return (cc >> 28) & 3;
+}
+
+static inline int is_bit_set(
+ const char *bits,
+ int n
+)
+{
+ return bits[n / 8] & (1 << (7 - (n % 8)));
+}
+
+static inline void turn_bit_off(
+ char *bits,
+ int n
+)
+{
+ bits[n / 8] &= ~(1 << (7 - (n % 8)));
+}
+
+static inline int dfltcc_are_params_ok(
+ int level,
+ uInt window_bits,
+ int strategy,
+ uLong level_mask
+)
+{
+ return (level_mask & (1 << level)) != 0 &&
+ (window_bits == HB_BITS) &&
+ (strategy == Z_DEFAULT_STRATEGY);
+}
+
+char *oesc_msg(char *buf, int oesc);
+
+#endif /* DFLTCC_UTIL_H */
diff --git a/lib/zlib_inflate/inffast.c b/lib/zlib_inflate/inffast.c
index 2c13ecc5bb2c..2843f9bb42ac 100644
--- a/lib/zlib_inflate/inffast.c
+++ b/lib/zlib_inflate/inffast.c
@@ -10,23 +10,12 @@
#ifndef ASMINF
-/* Allow machine dependent optimization for post-increment or pre-increment.
- Based on testing to date,
- Pre-increment preferred for:
- - PowerPC G3 (Adler)
- - MIPS R5000 (Randers-Pehrson)
- Post-increment preferred for:
- - none
- No measurable difference:
- - Pentium III (Anderson)
- - M68060 (Nikl)
- */
union uu {
unsigned short us;
unsigned char b[2];
};
-/* Endian independed version */
+/* Endian independent version */
static inline unsigned short
get_unaligned16(const unsigned short *p)
{
@@ -38,16 +27,6 @@ get_unaligned16(const unsigned short *p)
return mm.us;
}
-#ifdef POSTINC
-# define OFF 0
-# define PUP(a) *(a)++
-# define UP_UNALIGNED(a) get_unaligned16((a)++)
-#else
-# define OFF 1
-# define PUP(a) *++(a)
-# define UP_UNALIGNED(a) get_unaligned16(++(a))
-#endif
-
/*
Decode literal, length, and distance codes and write out the resulting
literal and match bytes until either not enough input or output is
@@ -115,9 +94,9 @@ void inflate_fast(z_streamp strm, unsigned start)
/* copy state to local variables */
state = (struct inflate_state *)strm->state;
- in = strm->next_in - OFF;
+ in = strm->next_in;
last = in + (strm->avail_in - 5);
- out = strm->next_out - OFF;
+ out = strm->next_out;
beg = out - (start - strm->avail_out);
end = out + (strm->avail_out - 257);
#ifdef INFLATE_STRICT
@@ -138,9 +117,9 @@ void inflate_fast(z_streamp strm, unsigned start)
input data or output space */
do {
if (bits < 15) {
- hold += (unsigned long)(PUP(in)) << bits;
+ hold += (unsigned long)(*in++) << bits;
bits += 8;
- hold += (unsigned long)(PUP(in)) << bits;
+ hold += (unsigned long)(*in++) << bits;
bits += 8;
}
this = lcode[hold & lmask];
@@ -150,14 +129,14 @@ void inflate_fast(z_streamp strm, unsigned start)
bits -= op;
op = (unsigned)(this.op);
if (op == 0) { /* literal */
- PUP(out) = (unsigned char)(this.val);
+ *out++ = (unsigned char)(this.val);
}
else if (op & 16) { /* length base */
len = (unsigned)(this.val);
op &= 15; /* number of extra bits */
if (op) {
if (bits < op) {
- hold += (unsigned long)(PUP(in)) << bits;
+ hold += (unsigned long)(*in++) << bits;
bits += 8;
}
len += (unsigned)hold & ((1U << op) - 1);
@@ -165,9 +144,9 @@ void inflate_fast(z_streamp strm, unsigned start)
bits -= op;
}
if (bits < 15) {
- hold += (unsigned long)(PUP(in)) << bits;
+ hold += (unsigned long)(*in++) << bits;
bits += 8;
- hold += (unsigned long)(PUP(in)) << bits;
+ hold += (unsigned long)(*in++) << bits;
bits += 8;
}
this = dcode[hold & dmask];
@@ -180,10 +159,10 @@ void inflate_fast(z_streamp strm, unsigned start)
dist = (unsigned)(this.val);
op &= 15; /* number of extra bits */
if (bits < op) {
- hold += (unsigned long)(PUP(in)) << bits;
+ hold += (unsigned long)(*in++) << bits;
bits += 8;
if (bits < op) {
- hold += (unsigned long)(PUP(in)) << bits;
+ hold += (unsigned long)(*in++) << bits;
bits += 8;
}
}
@@ -205,13 +184,13 @@ void inflate_fast(z_streamp strm, unsigned start)
state->mode = BAD;
break;
}
- from = window - OFF;
+ from = window;
if (write == 0) { /* very common case */
from += wsize - op;
if (op < len) { /* some from window */
len -= op;
do {
- PUP(out) = PUP(from);
+ *out++ = *from++;
} while (--op);
from = out - dist; /* rest from output */
}
@@ -222,14 +201,14 @@ void inflate_fast(z_streamp strm, unsigned start)
if (op < len) { /* some from end of window */
len -= op;
do {
- PUP(out) = PUP(from);
+ *out++ = *from++;
} while (--op);
- from = window - OFF;
+ from = window;
if (write < len) { /* some from start of window */
op = write;
len -= op;
do {
- PUP(out) = PUP(from);
+ *out++ = *from++;
} while (--op);
from = out - dist; /* rest from output */
}
@@ -240,21 +219,21 @@ void inflate_fast(z_streamp strm, unsigned start)
if (op < len) { /* some from window */
len -= op;
do {
- PUP(out) = PUP(from);
+ *out++ = *from++;
} while (--op);
from = out - dist; /* rest from output */
}
}
while (len > 2) {
- PUP(out) = PUP(from);
- PUP(out) = PUP(from);
- PUP(out) = PUP(from);
+ *out++ = *from++;
+ *out++ = *from++;
+ *out++ = *from++;
len -= 3;
}
if (len) {
- PUP(out) = PUP(from);
+ *out++ = *from++;
if (len > 1)
- PUP(out) = PUP(from);
+ *out++ = *from++;
}
}
else {
@@ -264,29 +243,28 @@ void inflate_fast(z_streamp strm, unsigned start)
from = out - dist; /* copy direct from output */
/* minimum length is three */
/* Align out addr */
- if (!((long)(out - 1 + OFF) & 1)) {
- PUP(out) = PUP(from);
+ if (!((long)(out - 1) & 1)) {
+ *out++ = *from++;
len--;
}
- sout = (unsigned short *)(out - OFF);
+ sout = (unsigned short *)(out);
if (dist > 2) {
unsigned short *sfrom;
- sfrom = (unsigned short *)(from - OFF);
+ sfrom = (unsigned short *)(from);
loops = len >> 1;
- do
-#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
- PUP(sout) = PUP(sfrom);
-#else
- PUP(sout) = UP_UNALIGNED(sfrom);
-#endif
- while (--loops);
- out = (unsigned char *)sout + OFF;
- from = (unsigned char *)sfrom + OFF;
+ do {
+ if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS))
+ *sout++ = *sfrom++;
+ else
+ *sout++ = get_unaligned16(sfrom++);
+ } while (--loops);
+ out = (unsigned char *)sout;
+ from = (unsigned char *)sfrom;
} else { /* dist == 1 or dist == 2 */
unsigned short pat16;
- pat16 = *(sout-1+OFF);
+ pat16 = *(sout-1);
if (dist == 1) {
union uu mm;
/* copy one char pattern to both bytes */
@@ -296,12 +274,12 @@ void inflate_fast(z_streamp strm, unsigned start)
}
loops = len >> 1;
do
- PUP(sout) = pat16;
+ *sout++ = pat16;
while (--loops);
- out = (unsigned char *)sout + OFF;
+ out = (unsigned char *)sout;
}
if (len & 1)
- PUP(out) = PUP(from);
+ *out++ = *from++;
}
}
else if ((op & 64) == 0) { /* 2nd level distance code */
@@ -336,8 +314,8 @@ void inflate_fast(z_streamp strm, unsigned start)
hold &= (1U << bits) - 1;
/* update state and return */
- strm->next_in = in + OFF;
- strm->next_out = out + OFF;
+ strm->next_in = in;
+ strm->next_out = out;
strm->avail_in = (unsigned)(in < last ? 5 + (last - in) : 5 - (in - last));
strm->avail_out = (unsigned)(out < end ?
257 + (end - out) : 257 - (out - end));
diff --git a/lib/zlib_inflate/inflate.c b/lib/zlib_inflate/inflate.c
index 48f14cd58c77..d1efad69f02b 100644
--- a/lib/zlib_inflate/inflate.c
+++ b/lib/zlib_inflate/inflate.c
@@ -15,6 +15,16 @@
#include "inffast.h"
#include "infutil.h"
+/* architecture-specific bits */
+#ifdef CONFIG_ZLIB_DFLTCC
+# include "../zlib_dfltcc/dfltcc_inflate.h"
+#else
+#define INFLATE_RESET_HOOK(strm) do {} while (0)
+#define INFLATE_TYPEDO_HOOK(strm, flush) do {} while (0)
+#define INFLATE_NEED_UPDATEWINDOW(strm) 1
+#define INFLATE_NEED_CHECKSUM(strm) 1
+#endif
+
int zlib_inflate_workspacesize(void)
{
return sizeof(struct inflate_workspace);
@@ -42,6 +52,7 @@ int zlib_inflateReset(z_streamp strm)
state->write = 0;
state->whave = 0;
+ INFLATE_RESET_HOOK(strm);
return Z_OK;
}
@@ -66,7 +77,15 @@ int zlib_inflateInit2(z_streamp strm, int windowBits)
return Z_STREAM_ERROR;
}
state->wbits = (unsigned)windowBits;
+#ifdef CONFIG_ZLIB_DFLTCC
+ /*
+ * DFLTCC requires the window to be page aligned.
+ * Thus, we overallocate and take the aligned portion of the buffer.
+ */
+ state->window = PTR_ALIGN(&WS(strm)->working_window[0], PAGE_SIZE);
+#else
state->window = &WS(strm)->working_window[0];
+#endif
return zlib_inflateReset(strm);
}
@@ -227,11 +246,6 @@ static int zlib_inflateSyncPacket(z_streamp strm)
bits -= bits & 7; \
} while (0)
-/* Reverse the bytes in a 32-bit value */
-#define REVERSE(q) \
- ((((q) >> 24) & 0xff) + (((q) >> 8) & 0xff00) + \
- (((q) & 0xff00) << 8) + (((q) & 0xff) << 24))
-
/*
inflate() uses a state machine to process as much input data and generate as
much output data as possible before returning. The state machine is
@@ -382,7 +396,7 @@ int zlib_inflate(z_streamp strm, int flush)
strm->adler = state->check = REVERSE(hold);
INITBITS();
state->mode = DICT;
- /* fall through */
+ fallthrough;
case DICT:
if (state->havedict == 0) {
RESTORE();
@@ -390,11 +404,12 @@ int zlib_inflate(z_streamp strm, int flush)
}
strm->adler = state->check = zlib_adler32(0L, NULL, 0);
state->mode = TYPE;
- /* fall through */
+ fallthrough;
case TYPE:
if (flush == Z_BLOCK) goto inf_leave;
- /* fall through */
+ fallthrough;
case TYPEDO:
+ INFLATE_TYPEDO_HOOK(strm, flush);
if (state->last) {
BYTEBITS();
state->mode = CHECK;
@@ -431,7 +446,7 @@ int zlib_inflate(z_streamp strm, int flush)
state->length = (unsigned)hold & 0xffff;
INITBITS();
state->mode = COPY;
- /* fall through */
+ fallthrough;
case COPY:
copy = state->length;
if (copy) {
@@ -465,7 +480,7 @@ int zlib_inflate(z_streamp strm, int flush)
#endif
state->have = 0;
state->mode = LENLENS;
- /* fall through */
+ fallthrough;
case LENLENS:
while (state->have < state->ncode) {
NEEDBITS(3);
@@ -486,7 +501,7 @@ int zlib_inflate(z_streamp strm, int flush)
}
state->have = 0;
state->mode = CODELENS;
- /* fall through */
+ fallthrough;
case CODELENS:
while (state->have < state->nlen + state->ndist) {
for (;;) {
@@ -560,7 +575,7 @@ int zlib_inflate(z_streamp strm, int flush)
break;
}
state->mode = LEN;
- /* fall through */
+ fallthrough;
case LEN:
if (have >= 6 && left >= 258) {
RESTORE();
@@ -600,7 +615,7 @@ int zlib_inflate(z_streamp strm, int flush)
}
state->extra = (unsigned)(this.op) & 15;
state->mode = LENEXT;
- /* fall through */
+ fallthrough;
case LENEXT:
if (state->extra) {
NEEDBITS(state->extra);
@@ -608,7 +623,7 @@ int zlib_inflate(z_streamp strm, int flush)
DROPBITS(state->extra);
}
state->mode = DIST;
- /* fall through */
+ fallthrough;
case DIST:
for (;;) {
this = state->distcode[BITS(state->distbits)];
@@ -634,7 +649,7 @@ int zlib_inflate(z_streamp strm, int flush)
state->offset = (unsigned)this.val;
state->extra = (unsigned)(this.op) & 15;
state->mode = DISTEXT;
- /* fall through */
+ fallthrough;
case DISTEXT:
if (state->extra) {
NEEDBITS(state->extra);
@@ -654,7 +669,7 @@ int zlib_inflate(z_streamp strm, int flush)
break;
}
state->mode = MATCH;
- /* fall through */
+ fallthrough;
case MATCH:
if (left == 0) goto inf_leave;
copy = out - left;
@@ -692,7 +707,7 @@ int zlib_inflate(z_streamp strm, int flush)
out -= left;
strm->total_out += out;
state->total += out;
- if (out)
+ if (INFLATE_NEED_CHECKSUM(strm) && out)
strm->adler = state->check =
UPDATE(state->check, put - out, out);
out = left;
@@ -705,7 +720,7 @@ int zlib_inflate(z_streamp strm, int flush)
INITBITS();
}
state->mode = DONE;
- /* fall through */
+ fallthrough;
case DONE:
ret = Z_STREAM_END;
goto inf_leave;
@@ -726,7 +741,8 @@ int zlib_inflate(z_streamp strm, int flush)
*/
inf_leave:
RESTORE();
- if (state->wsize || (state->mode < CHECK && out != strm->avail_out))
+ if (INFLATE_NEED_UPDATEWINDOW(strm) &&
+ (state->wsize || (state->mode < CHECK && out != strm->avail_out)))
zlib_updatewindow(strm, out);
in -= strm->avail_in;
@@ -734,7 +750,7 @@ int zlib_inflate(z_streamp strm, int flush)
strm->total_in += in;
strm->total_out += out;
state->total += out;
- if (state->wrap && out)
+ if (INFLATE_NEED_CHECKSUM(strm) && state->wrap && out)
strm->adler = state->check =
UPDATE(state->check, strm->next_out - out, out);
diff --git a/lib/zlib_inflate/inflate.h b/lib/zlib_inflate/inflate.h
index 3d17b3d1b21f..f79337ddf98c 100644
--- a/lib/zlib_inflate/inflate.h
+++ b/lib/zlib_inflate/inflate.h
@@ -11,6 +11,8 @@
subject to change. Applications should only use zlib.h.
*/
+#include "inftrees.h"
+
/* Possible inflate modes between inflate() calls */
typedef enum {
HEAD, /* i: waiting for magic header */
@@ -108,4 +110,10 @@ struct inflate_state {
unsigned short work[288]; /* work area for code table building */
code codes[ENOUGH]; /* space for code tables */
};
+
+/* Reverse the bytes in a 32-bit value */
+#define REVERSE(q) \
+ ((((q) >> 24) & 0xff) + (((q) >> 8) & 0xff00) + \
+ (((q) & 0xff00) << 8) + (((q) & 0xff) << 24))
+
#endif
diff --git a/lib/zlib_inflate/infutil.h b/lib/zlib_inflate/infutil.h
index eb1a9007bd86..784ab33b7842 100644
--- a/lib/zlib_inflate/infutil.h
+++ b/lib/zlib_inflate/infutil.h
@@ -12,14 +12,28 @@
#define _INFUTIL_H
#include <linux/zlib.h>
+#ifdef CONFIG_ZLIB_DFLTCC
+#include "../zlib_dfltcc/dfltcc.h"
+#include <asm/page.h>
+#endif
/* memory allocation for inflation */
struct inflate_workspace {
struct inflate_state inflate_state;
- unsigned char working_window[1 << MAX_WBITS];
+#ifdef CONFIG_ZLIB_DFLTCC
+ struct dfltcc_state dfltcc_state;
+ unsigned char working_window[(1 << MAX_WBITS) + PAGE_SIZE];
+#else
+ unsigned char working_window[(1 << MAX_WBITS)];
+#endif
};
-#define WS(z) ((struct inflate_workspace *)(z->workspace))
+#ifdef CONFIG_ZLIB_DFLTCC
+/* dfltcc_state must be doubleword aligned for DFLTCC call */
+static_assert(offsetof(struct inflate_workspace, dfltcc_state) % 8 == 0);
+#endif
+
+#define WS(strm) ((struct inflate_workspace *)(strm->workspace))
#endif
diff --git a/lib/zstd/Makefile b/lib/zstd/Makefile
index f5d778e7e5c7..20f08c644b71 100644
--- a/lib/zstd/Makefile
+++ b/lib/zstd/Makefile
@@ -1,10 +1,43 @@
-# SPDX-License-Identifier: GPL-2.0-only
+# SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause
+# ################################################################
+# Copyright (c) Facebook, Inc.
+# All rights reserved.
+#
+# This source code is licensed under both the BSD-style license (found in the
+# LICENSE file in the root directory of this source tree) and the GPLv2 (found
+# in the COPYING file in the root directory of this source tree).
+# You may select, at your option, one of the above-listed licenses.
+# ################################################################
obj-$(CONFIG_ZSTD_COMPRESS) += zstd_compress.o
obj-$(CONFIG_ZSTD_DECOMPRESS) += zstd_decompress.o
+obj-$(CONFIG_ZSTD_COMMON) += zstd_common.o
-ccflags-y += -O3
+zstd_compress-y := \
+ zstd_compress_module.o \
+ compress/fse_compress.o \
+ compress/hist.o \
+ compress/huf_compress.o \
+ compress/zstd_compress.o \
+ compress/zstd_compress_literals.o \
+ compress/zstd_compress_sequences.o \
+ compress/zstd_compress_superblock.o \
+ compress/zstd_double_fast.o \
+ compress/zstd_fast.o \
+ compress/zstd_lazy.o \
+ compress/zstd_ldm.o \
+ compress/zstd_opt.o \
-zstd_compress-y := fse_compress.o huf_compress.o compress.o \
- entropy_common.o fse_decompress.o zstd_common.o
-zstd_decompress-y := huf_decompress.o decompress.o \
- entropy_common.o fse_decompress.o zstd_common.o
+zstd_decompress-y := \
+ zstd_decompress_module.o \
+ decompress/huf_decompress.o \
+ decompress/zstd_ddict.o \
+ decompress/zstd_decompress.o \
+ decompress/zstd_decompress_block.o \
+
+zstd_common-y := \
+ zstd_common_module.o \
+ common/debug.o \
+ common/entropy_common.o \
+ common/error_private.o \
+ common/fse_decompress.o \
+ common/zstd_common.o \
diff --git a/lib/zstd/bitstream.h b/lib/zstd/bitstream.h
deleted file mode 100644
index 3a49784d5c61..000000000000
--- a/lib/zstd/bitstream.h
+++ /dev/null
@@ -1,379 +0,0 @@
-/*
- * bitstream
- * Part of FSE library
- * header file (to include)
- * Copyright (C) 2013-2016, Yann Collet.
- *
- * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following disclaimer
- * in the documentation and/or other materials provided with the
- * distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * This program is free software; you can redistribute it and/or modify it under
- * the terms of the GNU General Public License version 2 as published by the
- * Free Software Foundation. This program is dual-licensed; you may select
- * either version 2 of the GNU General Public License ("GPL") or BSD license
- * ("BSD").
- *
- * You can contact the author at :
- * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
- */
-#ifndef BITSTREAM_H_MODULE
-#define BITSTREAM_H_MODULE
-
-/*
-* This API consists of small unitary functions, which must be inlined for best performance.
-* Since link-time-optimization is not available for all compilers,
-* these functions are defined into a .h to be included.
-*/
-
-/*-****************************************
-* Dependencies
-******************************************/
-#include "error_private.h" /* error codes and messages */
-#include "mem.h" /* unaligned access routines */
-
-/*=========================================
-* Target specific
-=========================================*/
-#define STREAM_ACCUMULATOR_MIN_32 25
-#define STREAM_ACCUMULATOR_MIN_64 57
-#define STREAM_ACCUMULATOR_MIN ((U32)(ZSTD_32bits() ? STREAM_ACCUMULATOR_MIN_32 : STREAM_ACCUMULATOR_MIN_64))
-
-/*-******************************************
-* bitStream encoding API (write forward)
-********************************************/
-/* bitStream can mix input from multiple sources.
-* A critical property of these streams is that they encode and decode in **reverse** direction.
-* So the first bit sequence you add will be the last to be read, like a LIFO stack.
-*/
-typedef struct {
- size_t bitContainer;
- int bitPos;
- char *startPtr;
- char *ptr;
- char *endPtr;
-} BIT_CStream_t;
-
-ZSTD_STATIC size_t BIT_initCStream(BIT_CStream_t *bitC, void *dstBuffer, size_t dstCapacity);
-ZSTD_STATIC void BIT_addBits(BIT_CStream_t *bitC, size_t value, unsigned nbBits);
-ZSTD_STATIC void BIT_flushBits(BIT_CStream_t *bitC);
-ZSTD_STATIC size_t BIT_closeCStream(BIT_CStream_t *bitC);
-
-/* Start with initCStream, providing the size of buffer to write into.
-* bitStream will never write outside of this buffer.
-* `dstCapacity` must be >= sizeof(bitD->bitContainer), otherwise @return will be an error code.
-*
-* bits are first added to a local register.
-* Local register is size_t, hence 64-bits on 64-bits systems, or 32-bits on 32-bits systems.
-* Writing data into memory is an explicit operation, performed by the flushBits function.
-* Hence keep track how many bits are potentially stored into local register to avoid register overflow.
-* After a flushBits, a maximum of 7 bits might still be stored into local register.
-*
-* Avoid storing elements of more than 24 bits if you want compatibility with 32-bits bitstream readers.
-*
-* Last operation is to close the bitStream.
-* The function returns the final size of CStream in bytes.
-* If data couldn't fit into `dstBuffer`, it will return a 0 ( == not storable)
-*/
-
-/*-********************************************
-* bitStream decoding API (read backward)
-**********************************************/
-typedef struct {
- size_t bitContainer;
- unsigned bitsConsumed;
- const char *ptr;
- const char *start;
-} BIT_DStream_t;
-
-typedef enum {
- BIT_DStream_unfinished = 0,
- BIT_DStream_endOfBuffer = 1,
- BIT_DStream_completed = 2,
- BIT_DStream_overflow = 3
-} BIT_DStream_status; /* result of BIT_reloadDStream() */
-/* 1,2,4,8 would be better for bitmap combinations, but slows down performance a bit ... :( */
-
-ZSTD_STATIC size_t BIT_initDStream(BIT_DStream_t *bitD, const void *srcBuffer, size_t srcSize);
-ZSTD_STATIC size_t BIT_readBits(BIT_DStream_t *bitD, unsigned nbBits);
-ZSTD_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t *bitD);
-ZSTD_STATIC unsigned BIT_endOfDStream(const BIT_DStream_t *bitD);
-
-/* Start by invoking BIT_initDStream().
-* A chunk of the bitStream is then stored into a local register.
-* Local register size is 64-bits on 64-bits systems, 32-bits on 32-bits systems (size_t).
-* You can then retrieve bitFields stored into the local register, **in reverse order**.
-* Local register is explicitly reloaded from memory by the BIT_reloadDStream() method.
-* A reload guarantee a minimum of ((8*sizeof(bitD->bitContainer))-7) bits when its result is BIT_DStream_unfinished.
-* Otherwise, it can be less than that, so proceed accordingly.
-* Checking if DStream has reached its end can be performed with BIT_endOfDStream().
-*/
-
-/*-****************************************
-* unsafe API
-******************************************/
-ZSTD_STATIC void BIT_addBitsFast(BIT_CStream_t *bitC, size_t value, unsigned nbBits);
-/* faster, but works only if value is "clean", meaning all high bits above nbBits are 0 */
-
-ZSTD_STATIC void BIT_flushBitsFast(BIT_CStream_t *bitC);
-/* unsafe version; does not check buffer overflow */
-
-ZSTD_STATIC size_t BIT_readBitsFast(BIT_DStream_t *bitD, unsigned nbBits);
-/* faster, but works only if nbBits >= 1 */
-
-/*-**************************************************************
-* Internal functions
-****************************************************************/
-ZSTD_STATIC unsigned BIT_highbit32(register U32 val) { return 31 - __builtin_clz(val); }
-
-/*===== Local Constants =====*/
-static const unsigned BIT_mask[] = {0, 1, 3, 7, 0xF, 0x1F, 0x3F, 0x7F, 0xFF,
- 0x1FF, 0x3FF, 0x7FF, 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, 0x1FFFF,
- 0x3FFFF, 0x7FFFF, 0xFFFFF, 0x1FFFFF, 0x3FFFFF, 0x7FFFFF, 0xFFFFFF, 0x1FFFFFF, 0x3FFFFFF}; /* up to 26 bits */
-
-/*-**************************************************************
-* bitStream encoding
-****************************************************************/
-/*! BIT_initCStream() :
- * `dstCapacity` must be > sizeof(void*)
- * @return : 0 if success,
- otherwise an error code (can be tested using ERR_isError() ) */
-ZSTD_STATIC size_t BIT_initCStream(BIT_CStream_t *bitC, void *startPtr, size_t dstCapacity)
-{
- bitC->bitContainer = 0;
- bitC->bitPos = 0;
- bitC->startPtr = (char *)startPtr;
- bitC->ptr = bitC->startPtr;
- bitC->endPtr = bitC->startPtr + dstCapacity - sizeof(bitC->ptr);
- if (dstCapacity <= sizeof(bitC->ptr))
- return ERROR(dstSize_tooSmall);
- return 0;
-}
-
-/*! BIT_addBits() :
- can add up to 26 bits into `bitC`.
- Does not check for register overflow ! */
-ZSTD_STATIC void BIT_addBits(BIT_CStream_t *bitC, size_t value, unsigned nbBits)
-{
- bitC->bitContainer |= (value & BIT_mask[nbBits]) << bitC->bitPos;
- bitC->bitPos += nbBits;
-}
-
-/*! BIT_addBitsFast() :
- * works only if `value` is _clean_, meaning all high bits above nbBits are 0 */
-ZSTD_STATIC void BIT_addBitsFast(BIT_CStream_t *bitC, size_t value, unsigned nbBits)
-{
- bitC->bitContainer |= value << bitC->bitPos;
- bitC->bitPos += nbBits;
-}
-
-/*! BIT_flushBitsFast() :
- * unsafe version; does not check buffer overflow */
-ZSTD_STATIC void BIT_flushBitsFast(BIT_CStream_t *bitC)
-{
- size_t const nbBytes = bitC->bitPos >> 3;
- ZSTD_writeLEST(bitC->ptr, bitC->bitContainer);
- bitC->ptr += nbBytes;
- bitC->bitPos &= 7;
- bitC->bitContainer >>= nbBytes * 8; /* if bitPos >= sizeof(bitContainer)*8 --> undefined behavior */
-}
-
-/*! BIT_flushBits() :
- * safe version; check for buffer overflow, and prevents it.
- * note : does not signal buffer overflow. This will be revealed later on using BIT_closeCStream() */
-ZSTD_STATIC void BIT_flushBits(BIT_CStream_t *bitC)
-{
- size_t const nbBytes = bitC->bitPos >> 3;
- ZSTD_writeLEST(bitC->ptr, bitC->bitContainer);
- bitC->ptr += nbBytes;
- if (bitC->ptr > bitC->endPtr)
- bitC->ptr = bitC->endPtr;
- bitC->bitPos &= 7;
- bitC->bitContainer >>= nbBytes * 8; /* if bitPos >= sizeof(bitContainer)*8 --> undefined behavior */
-}
-
-/*! BIT_closeCStream() :
- * @return : size of CStream, in bytes,
- or 0 if it could not fit into dstBuffer */
-ZSTD_STATIC size_t BIT_closeCStream(BIT_CStream_t *bitC)
-{
- BIT_addBitsFast(bitC, 1, 1); /* endMark */
- BIT_flushBits(bitC);
-
- if (bitC->ptr >= bitC->endPtr)
- return 0; /* doesn't fit within authorized budget : cancel */
-
- return (bitC->ptr - bitC->startPtr) + (bitC->bitPos > 0);
-}
-
-/*-********************************************************
-* bitStream decoding
-**********************************************************/
-/*! BIT_initDStream() :
-* Initialize a BIT_DStream_t.
-* `bitD` : a pointer to an already allocated BIT_DStream_t structure.
-* `srcSize` must be the *exact* size of the bitStream, in bytes.
-* @return : size of stream (== srcSize) or an errorCode if a problem is detected
-*/
-ZSTD_STATIC size_t BIT_initDStream(BIT_DStream_t *bitD, const void *srcBuffer, size_t srcSize)
-{
- if (srcSize < 1) {
- memset(bitD, 0, sizeof(*bitD));
- return ERROR(srcSize_wrong);
- }
-
- if (srcSize >= sizeof(bitD->bitContainer)) { /* normal case */
- bitD->start = (const char *)srcBuffer;
- bitD->ptr = (const char *)srcBuffer + srcSize - sizeof(bitD->bitContainer);
- bitD->bitContainer = ZSTD_readLEST(bitD->ptr);
- {
- BYTE const lastByte = ((const BYTE *)srcBuffer)[srcSize - 1];
- bitD->bitsConsumed = lastByte ? 8 - BIT_highbit32(lastByte) : 0; /* ensures bitsConsumed is always set */
- if (lastByte == 0)
- return ERROR(GENERIC); /* endMark not present */
- }
- } else {
- bitD->start = (const char *)srcBuffer;
- bitD->ptr = bitD->start;
- bitD->bitContainer = *(const BYTE *)(bitD->start);
- switch (srcSize) {
- case 7: bitD->bitContainer += (size_t)(((const BYTE *)(srcBuffer))[6]) << (sizeof(bitD->bitContainer) * 8 - 16);
- /* fall through */
- case 6: bitD->bitContainer += (size_t)(((const BYTE *)(srcBuffer))[5]) << (sizeof(bitD->bitContainer) * 8 - 24);
- /* fall through */
- case 5: bitD->bitContainer += (size_t)(((const BYTE *)(srcBuffer))[4]) << (sizeof(bitD->bitContainer) * 8 - 32);
- /* fall through */
- case 4: bitD->bitContainer += (size_t)(((const BYTE *)(srcBuffer))[3]) << 24;
- /* fall through */
- case 3: bitD->bitContainer += (size_t)(((const BYTE *)(srcBuffer))[2]) << 16;
- /* fall through */
- case 2: bitD->bitContainer += (size_t)(((const BYTE *)(srcBuffer))[1]) << 8;
- default:;
- }
- {
- BYTE const lastByte = ((const BYTE *)srcBuffer)[srcSize - 1];
- bitD->bitsConsumed = lastByte ? 8 - BIT_highbit32(lastByte) : 0;
- if (lastByte == 0)
- return ERROR(GENERIC); /* endMark not present */
- }
- bitD->bitsConsumed += (U32)(sizeof(bitD->bitContainer) - srcSize) * 8;
- }
-
- return srcSize;
-}
-
-ZSTD_STATIC size_t BIT_getUpperBits(size_t bitContainer, U32 const start) { return bitContainer >> start; }
-
-ZSTD_STATIC size_t BIT_getMiddleBits(size_t bitContainer, U32 const start, U32 const nbBits) { return (bitContainer >> start) & BIT_mask[nbBits]; }
-
-ZSTD_STATIC size_t BIT_getLowerBits(size_t bitContainer, U32 const nbBits) { return bitContainer & BIT_mask[nbBits]; }
-
-/*! BIT_lookBits() :
- * Provides next n bits from local register.
- * local register is not modified.
- * On 32-bits, maxNbBits==24.
- * On 64-bits, maxNbBits==56.
- * @return : value extracted
- */
-ZSTD_STATIC size_t BIT_lookBits(const BIT_DStream_t *bitD, U32 nbBits)
-{
- U32 const bitMask = sizeof(bitD->bitContainer) * 8 - 1;
- return ((bitD->bitContainer << (bitD->bitsConsumed & bitMask)) >> 1) >> ((bitMask - nbBits) & bitMask);
-}
-
-/*! BIT_lookBitsFast() :
-* unsafe version; only works only if nbBits >= 1 */
-ZSTD_STATIC size_t BIT_lookBitsFast(const BIT_DStream_t *bitD, U32 nbBits)
-{
- U32 const bitMask = sizeof(bitD->bitContainer) * 8 - 1;
- return (bitD->bitContainer << (bitD->bitsConsumed & bitMask)) >> (((bitMask + 1) - nbBits) & bitMask);
-}
-
-ZSTD_STATIC void BIT_skipBits(BIT_DStream_t *bitD, U32 nbBits) { bitD->bitsConsumed += nbBits; }
-
-/*! BIT_readBits() :
- * Read (consume) next n bits from local register and update.
- * Pay attention to not read more than nbBits contained into local register.
- * @return : extracted value.
- */
-ZSTD_STATIC size_t BIT_readBits(BIT_DStream_t *bitD, U32 nbBits)
-{
- size_t const value = BIT_lookBits(bitD, nbBits);
- BIT_skipBits(bitD, nbBits);
- return value;
-}
-
-/*! BIT_readBitsFast() :
-* unsafe version; only works only if nbBits >= 1 */
-ZSTD_STATIC size_t BIT_readBitsFast(BIT_DStream_t *bitD, U32 nbBits)
-{
- size_t const value = BIT_lookBitsFast(bitD, nbBits);
- BIT_skipBits(bitD, nbBits);
- return value;
-}
-
-/*! BIT_reloadDStream() :
-* Refill `bitD` from buffer previously set in BIT_initDStream() .
-* This function is safe, it guarantees it will not read beyond src buffer.
-* @return : status of `BIT_DStream_t` internal register.
- if status == BIT_DStream_unfinished, internal register is filled with >= (sizeof(bitD->bitContainer)*8 - 7) bits */
-ZSTD_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t *bitD)
-{
- if (bitD->bitsConsumed > (sizeof(bitD->bitContainer) * 8)) /* should not happen => corruption detected */
- return BIT_DStream_overflow;
-
- if (bitD->ptr >= bitD->start + sizeof(bitD->bitContainer)) {
- bitD->ptr -= bitD->bitsConsumed >> 3;
- bitD->bitsConsumed &= 7;
- bitD->bitContainer = ZSTD_readLEST(bitD->ptr);
- return BIT_DStream_unfinished;
- }
- if (bitD->ptr == bitD->start) {
- if (bitD->bitsConsumed < sizeof(bitD->bitContainer) * 8)
- return BIT_DStream_endOfBuffer;
- return BIT_DStream_completed;
- }
- {
- U32 nbBytes = bitD->bitsConsumed >> 3;
- BIT_DStream_status result = BIT_DStream_unfinished;
- if (bitD->ptr - nbBytes < bitD->start) {
- nbBytes = (U32)(bitD->ptr - bitD->start); /* ptr > start */
- result = BIT_DStream_endOfBuffer;
- }
- bitD->ptr -= nbBytes;
- bitD->bitsConsumed -= nbBytes * 8;
- bitD->bitContainer = ZSTD_readLEST(bitD->ptr); /* reminder : srcSize > sizeof(bitD) */
- return result;
- }
-}
-
-/*! BIT_endOfDStream() :
-* @return Tells if DStream has exactly reached its end (all bits consumed).
-*/
-ZSTD_STATIC unsigned BIT_endOfDStream(const BIT_DStream_t *DStream)
-{
- return ((DStream->ptr == DStream->start) && (DStream->bitsConsumed == sizeof(DStream->bitContainer) * 8));
-}
-
-#endif /* BITSTREAM_H_MODULE */
diff --git a/lib/zstd/common/bitstream.h b/lib/zstd/common/bitstream.h
new file mode 100644
index 000000000000..feef3a1b1d60
--- /dev/null
+++ b/lib/zstd/common/bitstream.h
@@ -0,0 +1,446 @@
+/* ******************************************************************
+ * bitstream
+ * Part of FSE library
+ * Copyright (c) Yann Collet, Facebook, Inc.
+ *
+ * You can contact the author at :
+ * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+****************************************************************** */
+#ifndef BITSTREAM_H_MODULE
+#define BITSTREAM_H_MODULE
+
+/*
+* This API consists of small unitary functions, which must be inlined for best performance.
+* Since link-time-optimization is not available for all compilers,
+* these functions are defined into a .h to be included.
+*/
+
+/*-****************************************
+* Dependencies
+******************************************/
+#include "mem.h" /* unaligned access routines */
+#include "compiler.h" /* UNLIKELY() */
+#include "debug.h" /* assert(), DEBUGLOG(), RAWLOG() */
+#include "error_private.h" /* error codes and messages */
+
+
+/*=========================================
+* Target specific
+=========================================*/
+
+#define STREAM_ACCUMULATOR_MIN_32 25
+#define STREAM_ACCUMULATOR_MIN_64 57
+#define STREAM_ACCUMULATOR_MIN ((U32)(MEM_32bits() ? STREAM_ACCUMULATOR_MIN_32 : STREAM_ACCUMULATOR_MIN_64))
+
+
+/*-******************************************
+* bitStream encoding API (write forward)
+********************************************/
+/* bitStream can mix input from multiple sources.
+ * A critical property of these streams is that they encode and decode in **reverse** direction.
+ * So the first bit sequence you add will be the last to be read, like a LIFO stack.
+ */
+typedef struct {
+ size_t bitContainer;
+ unsigned bitPos;
+ char* startPtr;
+ char* ptr;
+ char* endPtr;
+} BIT_CStream_t;
+
+MEM_STATIC size_t BIT_initCStream(BIT_CStream_t* bitC, void* dstBuffer, size_t dstCapacity);
+MEM_STATIC void BIT_addBits(BIT_CStream_t* bitC, size_t value, unsigned nbBits);
+MEM_STATIC void BIT_flushBits(BIT_CStream_t* bitC);
+MEM_STATIC size_t BIT_closeCStream(BIT_CStream_t* bitC);
+
+/* Start with initCStream, providing the size of buffer to write into.
+* bitStream will never write outside of this buffer.
+* `dstCapacity` must be >= sizeof(bitD->bitContainer), otherwise @return will be an error code.
+*
+* bits are first added to a local register.
+* Local register is size_t, hence 64-bits on 64-bits systems, or 32-bits on 32-bits systems.
+* Writing data into memory is an explicit operation, performed by the flushBits function.
+* Hence keep track how many bits are potentially stored into local register to avoid register overflow.
+* After a flushBits, a maximum of 7 bits might still be stored into local register.
+*
+* Avoid storing elements of more than 24 bits if you want compatibility with 32-bits bitstream readers.
+*
+* Last operation is to close the bitStream.
+* The function returns the final size of CStream in bytes.
+* If data couldn't fit into `dstBuffer`, it will return a 0 ( == not storable)
+*/
+
+
+/*-********************************************
+* bitStream decoding API (read backward)
+**********************************************/
+typedef struct {
+ size_t bitContainer;
+ unsigned bitsConsumed;
+ const char* ptr;
+ const char* start;
+ const char* limitPtr;
+} BIT_DStream_t;
+
+typedef enum { BIT_DStream_unfinished = 0,
+ BIT_DStream_endOfBuffer = 1,
+ BIT_DStream_completed = 2,
+ BIT_DStream_overflow = 3 } BIT_DStream_status; /* result of BIT_reloadDStream() */
+ /* 1,2,4,8 would be better for bitmap combinations, but slows down performance a bit ... :( */
+
+MEM_STATIC size_t BIT_initDStream(BIT_DStream_t* bitD, const void* srcBuffer, size_t srcSize);
+MEM_STATIC size_t BIT_readBits(BIT_DStream_t* bitD, unsigned nbBits);
+MEM_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD);
+MEM_STATIC unsigned BIT_endOfDStream(const BIT_DStream_t* bitD);
+
+
+/* Start by invoking BIT_initDStream().
+* A chunk of the bitStream is then stored into a local register.
+* Local register size is 64-bits on 64-bits systems, 32-bits on 32-bits systems (size_t).
+* You can then retrieve bitFields stored into the local register, **in reverse order**.
+* Local register is explicitly reloaded from memory by the BIT_reloadDStream() method.
+* A reload guarantee a minimum of ((8*sizeof(bitD->bitContainer))-7) bits when its result is BIT_DStream_unfinished.
+* Otherwise, it can be less than that, so proceed accordingly.
+* Checking if DStream has reached its end can be performed with BIT_endOfDStream().
+*/
+
+
+/*-****************************************
+* unsafe API
+******************************************/
+MEM_STATIC void BIT_addBitsFast(BIT_CStream_t* bitC, size_t value, unsigned nbBits);
+/* faster, but works only if value is "clean", meaning all high bits above nbBits are 0 */
+
+MEM_STATIC void BIT_flushBitsFast(BIT_CStream_t* bitC);
+/* unsafe version; does not check buffer overflow */
+
+MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, unsigned nbBits);
+/* faster, but works only if nbBits >= 1 */
+
+
+
+/*-**************************************************************
+* Internal functions
+****************************************************************/
+MEM_STATIC unsigned BIT_highbit32 (U32 val)
+{
+ assert(val != 0);
+ {
+# if (__GNUC__ >= 3) /* Use GCC Intrinsic */
+ return __builtin_clz (val) ^ 31;
+# else /* Software version */
+ static const unsigned DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29,
+ 11, 14, 16, 18, 22, 25, 3, 30,
+ 8, 12, 20, 28, 15, 17, 24, 7,
+ 19, 27, 23, 6, 26, 5, 4, 31 };
+ U32 v = val;
+ v |= v >> 1;
+ v |= v >> 2;
+ v |= v >> 4;
+ v |= v >> 8;
+ v |= v >> 16;
+ return DeBruijnClz[ (U32) (v * 0x07C4ACDDU) >> 27];
+# endif
+ }
+}
+
+/*===== Local Constants =====*/
+static const unsigned BIT_mask[] = {
+ 0, 1, 3, 7, 0xF, 0x1F,
+ 0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF,
+ 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, 0x1FFFF,
+ 0x3FFFF, 0x7FFFF, 0xFFFFF, 0x1FFFFF, 0x3FFFFF, 0x7FFFFF,
+ 0xFFFFFF, 0x1FFFFFF, 0x3FFFFFF, 0x7FFFFFF, 0xFFFFFFF, 0x1FFFFFFF,
+ 0x3FFFFFFF, 0x7FFFFFFF}; /* up to 31 bits */
+#define BIT_MASK_SIZE (sizeof(BIT_mask) / sizeof(BIT_mask[0]))
+
+/*-**************************************************************
+* bitStream encoding
+****************************************************************/
+/*! BIT_initCStream() :
+ * `dstCapacity` must be > sizeof(size_t)
+ * @return : 0 if success,
+ * otherwise an error code (can be tested using ERR_isError()) */
+MEM_STATIC size_t BIT_initCStream(BIT_CStream_t* bitC,
+ void* startPtr, size_t dstCapacity)
+{
+ bitC->bitContainer = 0;
+ bitC->bitPos = 0;
+ bitC->startPtr = (char*)startPtr;
+ bitC->ptr = bitC->startPtr;
+ bitC->endPtr = bitC->startPtr + dstCapacity - sizeof(bitC->bitContainer);
+ if (dstCapacity <= sizeof(bitC->bitContainer)) return ERROR(dstSize_tooSmall);
+ return 0;
+}
+
+/*! BIT_addBits() :
+ * can add up to 31 bits into `bitC`.
+ * Note : does not check for register overflow ! */
+MEM_STATIC void BIT_addBits(BIT_CStream_t* bitC,
+ size_t value, unsigned nbBits)
+{
+ DEBUG_STATIC_ASSERT(BIT_MASK_SIZE == 32);
+ assert(nbBits < BIT_MASK_SIZE);
+ assert(nbBits + bitC->bitPos < sizeof(bitC->bitContainer) * 8);
+ bitC->bitContainer |= (value & BIT_mask[nbBits]) << bitC->bitPos;
+ bitC->bitPos += nbBits;
+}
+
+/*! BIT_addBitsFast() :
+ * works only if `value` is _clean_,
+ * meaning all high bits above nbBits are 0 */
+MEM_STATIC void BIT_addBitsFast(BIT_CStream_t* bitC,
+ size_t value, unsigned nbBits)
+{
+ assert((value>>nbBits) == 0);
+ assert(nbBits + bitC->bitPos < sizeof(bitC->bitContainer) * 8);
+ bitC->bitContainer |= value << bitC->bitPos;
+ bitC->bitPos += nbBits;
+}
+
+/*! BIT_flushBitsFast() :
+ * assumption : bitContainer has not overflowed
+ * unsafe version; does not check buffer overflow */
+MEM_STATIC void BIT_flushBitsFast(BIT_CStream_t* bitC)
+{
+ size_t const nbBytes = bitC->bitPos >> 3;
+ assert(bitC->bitPos < sizeof(bitC->bitContainer) * 8);
+ assert(bitC->ptr <= bitC->endPtr);
+ MEM_writeLEST(bitC->ptr, bitC->bitContainer);
+ bitC->ptr += nbBytes;
+ bitC->bitPos &= 7;
+ bitC->bitContainer >>= nbBytes*8;
+}
+
+/*! BIT_flushBits() :
+ * assumption : bitContainer has not overflowed
+ * safe version; check for buffer overflow, and prevents it.
+ * note : does not signal buffer overflow.
+ * overflow will be revealed later on using BIT_closeCStream() */
+MEM_STATIC void BIT_flushBits(BIT_CStream_t* bitC)
+{
+ size_t const nbBytes = bitC->bitPos >> 3;
+ assert(bitC->bitPos < sizeof(bitC->bitContainer) * 8);
+ assert(bitC->ptr <= bitC->endPtr);
+ MEM_writeLEST(bitC->ptr, bitC->bitContainer);
+ bitC->ptr += nbBytes;
+ if (bitC->ptr > bitC->endPtr) bitC->ptr = bitC->endPtr;
+ bitC->bitPos &= 7;
+ bitC->bitContainer >>= nbBytes*8;
+}
+
+/*! BIT_closeCStream() :
+ * @return : size of CStream, in bytes,
+ * or 0 if it could not fit into dstBuffer */
+MEM_STATIC size_t BIT_closeCStream(BIT_CStream_t* bitC)
+{
+ BIT_addBitsFast(bitC, 1, 1); /* endMark */
+ BIT_flushBits(bitC);
+ if (bitC->ptr >= bitC->endPtr) return 0; /* overflow detected */
+ return (bitC->ptr - bitC->startPtr) + (bitC->bitPos > 0);
+}
+
+
+/*-********************************************************
+* bitStream decoding
+**********************************************************/
+/*! BIT_initDStream() :
+ * Initialize a BIT_DStream_t.
+ * `bitD` : a pointer to an already allocated BIT_DStream_t structure.
+ * `srcSize` must be the *exact* size of the bitStream, in bytes.
+ * @return : size of stream (== srcSize), or an errorCode if a problem is detected
+ */
+MEM_STATIC size_t BIT_initDStream(BIT_DStream_t* bitD, const void* srcBuffer, size_t srcSize)
+{
+ if (srcSize < 1) { ZSTD_memset(bitD, 0, sizeof(*bitD)); return ERROR(srcSize_wrong); }
+
+ bitD->start = (const char*)srcBuffer;
+ bitD->limitPtr = bitD->start + sizeof(bitD->bitContainer);
+
+ if (srcSize >= sizeof(bitD->bitContainer)) { /* normal case */
+ bitD->ptr = (const char*)srcBuffer + srcSize - sizeof(bitD->bitContainer);
+ bitD->bitContainer = MEM_readLEST(bitD->ptr);
+ { BYTE const lastByte = ((const BYTE*)srcBuffer)[srcSize-1];
+ bitD->bitsConsumed = lastByte ? 8 - BIT_highbit32(lastByte) : 0; /* ensures bitsConsumed is always set */
+ if (lastByte == 0) return ERROR(GENERIC); /* endMark not present */ }
+ } else {
+ bitD->ptr = bitD->start;
+ bitD->bitContainer = *(const BYTE*)(bitD->start);
+ switch(srcSize)
+ {
+ case 7: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[6]) << (sizeof(bitD->bitContainer)*8 - 16);
+ ZSTD_FALLTHROUGH;
+
+ case 6: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[5]) << (sizeof(bitD->bitContainer)*8 - 24);
+ ZSTD_FALLTHROUGH;
+
+ case 5: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[4]) << (sizeof(bitD->bitContainer)*8 - 32);
+ ZSTD_FALLTHROUGH;
+
+ case 4: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[3]) << 24;
+ ZSTD_FALLTHROUGH;
+
+ case 3: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[2]) << 16;
+ ZSTD_FALLTHROUGH;
+
+ case 2: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[1]) << 8;
+ ZSTD_FALLTHROUGH;
+
+ default: break;
+ }
+ { BYTE const lastByte = ((const BYTE*)srcBuffer)[srcSize-1];
+ bitD->bitsConsumed = lastByte ? 8 - BIT_highbit32(lastByte) : 0;
+ if (lastByte == 0) return ERROR(corruption_detected); /* endMark not present */
+ }
+ bitD->bitsConsumed += (U32)(sizeof(bitD->bitContainer) - srcSize)*8;
+ }
+
+ return srcSize;
+}
+
+MEM_STATIC FORCE_INLINE_ATTR size_t BIT_getUpperBits(size_t bitContainer, U32 const start)
+{
+ return bitContainer >> start;
+}
+
+MEM_STATIC FORCE_INLINE_ATTR size_t BIT_getMiddleBits(size_t bitContainer, U32 const start, U32 const nbBits)
+{
+ U32 const regMask = sizeof(bitContainer)*8 - 1;
+ /* if start > regMask, bitstream is corrupted, and result is undefined */
+ assert(nbBits < BIT_MASK_SIZE);
+ /* x86 transform & ((1 << nbBits) - 1) to bzhi instruction, it is better
+ * than accessing memory. When bmi2 instruction is not present, we consider
+ * such cpus old (pre-Haswell, 2013) and their performance is not of that
+ * importance.
+ */
+#if defined(__x86_64__) || defined(_M_X86)
+ return (bitContainer >> (start & regMask)) & ((((U64)1) << nbBits) - 1);
+#else
+ return (bitContainer >> (start & regMask)) & BIT_mask[nbBits];
+#endif
+}
+
+MEM_STATIC FORCE_INLINE_ATTR size_t BIT_getLowerBits(size_t bitContainer, U32 const nbBits)
+{
+ assert(nbBits < BIT_MASK_SIZE);
+ return bitContainer & BIT_mask[nbBits];
+}
+
+/*! BIT_lookBits() :
+ * Provides next n bits from local register.
+ * local register is not modified.
+ * On 32-bits, maxNbBits==24.
+ * On 64-bits, maxNbBits==56.
+ * @return : value extracted */
+MEM_STATIC FORCE_INLINE_ATTR size_t BIT_lookBits(const BIT_DStream_t* bitD, U32 nbBits)
+{
+ /* arbitrate between double-shift and shift+mask */
+#if 1
+ /* if bitD->bitsConsumed + nbBits > sizeof(bitD->bitContainer)*8,
+ * bitstream is likely corrupted, and result is undefined */
+ return BIT_getMiddleBits(bitD->bitContainer, (sizeof(bitD->bitContainer)*8) - bitD->bitsConsumed - nbBits, nbBits);
+#else
+ /* this code path is slower on my os-x laptop */
+ U32 const regMask = sizeof(bitD->bitContainer)*8 - 1;
+ return ((bitD->bitContainer << (bitD->bitsConsumed & regMask)) >> 1) >> ((regMask-nbBits) & regMask);
+#endif
+}
+
+/*! BIT_lookBitsFast() :
+ * unsafe version; only works if nbBits >= 1 */
+MEM_STATIC size_t BIT_lookBitsFast(const BIT_DStream_t* bitD, U32 nbBits)
+{
+ U32 const regMask = sizeof(bitD->bitContainer)*8 - 1;
+ assert(nbBits >= 1);
+ return (bitD->bitContainer << (bitD->bitsConsumed & regMask)) >> (((regMask+1)-nbBits) & regMask);
+}
+
+MEM_STATIC FORCE_INLINE_ATTR void BIT_skipBits(BIT_DStream_t* bitD, U32 nbBits)
+{
+ bitD->bitsConsumed += nbBits;
+}
+
+/*! BIT_readBits() :
+ * Read (consume) next n bits from local register and update.
+ * Pay attention to not read more than nbBits contained into local register.
+ * @return : extracted value. */
+MEM_STATIC FORCE_INLINE_ATTR size_t BIT_readBits(BIT_DStream_t* bitD, unsigned nbBits)
+{
+ size_t const value = BIT_lookBits(bitD, nbBits);
+ BIT_skipBits(bitD, nbBits);
+ return value;
+}
+
+/*! BIT_readBitsFast() :
+ * unsafe version; only works only if nbBits >= 1 */
+MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, unsigned nbBits)
+{
+ size_t const value = BIT_lookBitsFast(bitD, nbBits);
+ assert(nbBits >= 1);
+ BIT_skipBits(bitD, nbBits);
+ return value;
+}
+
+/*! BIT_reloadDStreamFast() :
+ * Similar to BIT_reloadDStream(), but with two differences:
+ * 1. bitsConsumed <= sizeof(bitD->bitContainer)*8 must hold!
+ * 2. Returns BIT_DStream_overflow when bitD->ptr < bitD->limitPtr, at this
+ * point you must use BIT_reloadDStream() to reload.
+ */
+MEM_STATIC BIT_DStream_status BIT_reloadDStreamFast(BIT_DStream_t* bitD)
+{
+ if (UNLIKELY(bitD->ptr < bitD->limitPtr))
+ return BIT_DStream_overflow;
+ assert(bitD->bitsConsumed <= sizeof(bitD->bitContainer)*8);
+ bitD->ptr -= bitD->bitsConsumed >> 3;
+ bitD->bitsConsumed &= 7;
+ bitD->bitContainer = MEM_readLEST(bitD->ptr);
+ return BIT_DStream_unfinished;
+}
+
+/*! BIT_reloadDStream() :
+ * Refill `bitD` from buffer previously set in BIT_initDStream() .
+ * This function is safe, it guarantees it will not read beyond src buffer.
+ * @return : status of `BIT_DStream_t` internal register.
+ * when status == BIT_DStream_unfinished, internal register is filled with at least 25 or 57 bits */
+MEM_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD)
+{
+ if (bitD->bitsConsumed > (sizeof(bitD->bitContainer)*8)) /* overflow detected, like end of stream */
+ return BIT_DStream_overflow;
+
+ if (bitD->ptr >= bitD->limitPtr) {
+ return BIT_reloadDStreamFast(bitD);
+ }
+ if (bitD->ptr == bitD->start) {
+ if (bitD->bitsConsumed < sizeof(bitD->bitContainer)*8) return BIT_DStream_endOfBuffer;
+ return BIT_DStream_completed;
+ }
+ /* start < ptr < limitPtr */
+ { U32 nbBytes = bitD->bitsConsumed >> 3;
+ BIT_DStream_status result = BIT_DStream_unfinished;
+ if (bitD->ptr - nbBytes < bitD->start) {
+ nbBytes = (U32)(bitD->ptr - bitD->start); /* ptr > start */
+ result = BIT_DStream_endOfBuffer;
+ }
+ bitD->ptr -= nbBytes;
+ bitD->bitsConsumed -= nbBytes*8;
+ bitD->bitContainer = MEM_readLEST(bitD->ptr); /* reminder : srcSize > sizeof(bitD->bitContainer), otherwise bitD->ptr == bitD->start */
+ return result;
+ }
+}
+
+/*! BIT_endOfDStream() :
+ * @return : 1 if DStream has _exactly_ reached its end (all bits consumed).
+ */
+MEM_STATIC unsigned BIT_endOfDStream(const BIT_DStream_t* DStream)
+{
+ return ((DStream->ptr == DStream->start) && (DStream->bitsConsumed == sizeof(DStream->bitContainer)*8));
+}
+
+
+#endif /* BITSTREAM_H_MODULE */
diff --git a/lib/zstd/common/compiler.h b/lib/zstd/common/compiler.h
new file mode 100644
index 000000000000..c42d39faf9bd
--- /dev/null
+++ b/lib/zstd/common/compiler.h
@@ -0,0 +1,184 @@
+/*
+ * Copyright (c) Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#ifndef ZSTD_COMPILER_H
+#define ZSTD_COMPILER_H
+
+#include "portability_macros.h"
+
+/*-*******************************************************
+* Compiler specifics
+*********************************************************/
+/* force inlining */
+
+#if !defined(ZSTD_NO_INLINE)
+#if (defined(__GNUC__) && !defined(__STRICT_ANSI__)) || defined(__cplusplus) || defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */
+# define INLINE_KEYWORD inline
+#else
+# define INLINE_KEYWORD
+#endif
+
+#define FORCE_INLINE_ATTR __attribute__((always_inline))
+
+#else
+
+#define INLINE_KEYWORD
+#define FORCE_INLINE_ATTR
+
+#endif
+
+/*
+ On MSVC qsort requires that functions passed into it use the __cdecl calling conversion(CC).
+ This explicitly marks such functions as __cdecl so that the code will still compile
+ if a CC other than __cdecl has been made the default.
+*/
+#define WIN_CDECL
+
+/*
+ * FORCE_INLINE_TEMPLATE is used to define C "templates", which take constant
+ * parameters. They must be inlined for the compiler to eliminate the constant
+ * branches.
+ */
+#define FORCE_INLINE_TEMPLATE static INLINE_KEYWORD FORCE_INLINE_ATTR
+/*
+ * HINT_INLINE is used to help the compiler generate better code. It is *not*
+ * used for "templates", so it can be tweaked based on the compilers
+ * performance.
+ *
+ * gcc-4.8 and gcc-4.9 have been shown to benefit from leaving off the
+ * always_inline attribute.
+ *
+ * clang up to 5.0.0 (trunk) benefit tremendously from the always_inline
+ * attribute.
+ */
+#if !defined(__clang__) && defined(__GNUC__) && __GNUC__ >= 4 && __GNUC_MINOR__ >= 8 && __GNUC__ < 5
+# define HINT_INLINE static INLINE_KEYWORD
+#else
+# define HINT_INLINE static INLINE_KEYWORD FORCE_INLINE_ATTR
+#endif
+
+/* UNUSED_ATTR tells the compiler it is okay if the function is unused. */
+#define UNUSED_ATTR __attribute__((unused))
+
+/* force no inlining */
+#define FORCE_NOINLINE static __attribute__((__noinline__))
+
+
+/* target attribute */
+#define TARGET_ATTRIBUTE(target) __attribute__((__target__(target)))
+
+/* Target attribute for BMI2 dynamic dispatch.
+ * Enable lzcnt, bmi, and bmi2.
+ * We test for bmi1 & bmi2. lzcnt is included in bmi1.
+ */
+#define BMI2_TARGET_ATTRIBUTE TARGET_ATTRIBUTE("lzcnt,bmi,bmi2")
+
+/* prefetch
+ * can be disabled, by declaring NO_PREFETCH build macro */
+#if ( (__GNUC__ >= 4) || ( (__GNUC__ == 3) && (__GNUC_MINOR__ >= 1) ) )
+# define PREFETCH_L1(ptr) __builtin_prefetch((ptr), 0 /* rw==read */, 3 /* locality */)
+# define PREFETCH_L2(ptr) __builtin_prefetch((ptr), 0 /* rw==read */, 2 /* locality */)
+#elif defined(__aarch64__)
+# define PREFETCH_L1(ptr) __asm__ __volatile__("prfm pldl1keep, %0" ::"Q"(*(ptr)))
+# define PREFETCH_L2(ptr) __asm__ __volatile__("prfm pldl2keep, %0" ::"Q"(*(ptr)))
+#else
+# define PREFETCH_L1(ptr) (void)(ptr) /* disabled */
+# define PREFETCH_L2(ptr) (void)(ptr) /* disabled */
+#endif /* NO_PREFETCH */
+
+#define CACHELINE_SIZE 64
+
+#define PREFETCH_AREA(p, s) { \
+ const char* const _ptr = (const char*)(p); \
+ size_t const _size = (size_t)(s); \
+ size_t _pos; \
+ for (_pos=0; _pos<_size; _pos+=CACHELINE_SIZE) { \
+ PREFETCH_L2(_ptr + _pos); \
+ } \
+}
+
+/* vectorization
+ * older GCC (pre gcc-4.3 picked as the cutoff) uses a different syntax,
+ * and some compilers, like Intel ICC and MCST LCC, do not support it at all. */
+#if !defined(__INTEL_COMPILER) && !defined(__clang__) && defined(__GNUC__) && !defined(__LCC__)
+# if (__GNUC__ == 4 && __GNUC_MINOR__ > 3) || (__GNUC__ >= 5)
+# define DONT_VECTORIZE __attribute__((optimize("no-tree-vectorize")))
+# else
+# define DONT_VECTORIZE _Pragma("GCC optimize(\"no-tree-vectorize\")")
+# endif
+#else
+# define DONT_VECTORIZE
+#endif
+
+/* Tell the compiler that a branch is likely or unlikely.
+ * Only use these macros if it causes the compiler to generate better code.
+ * If you can remove a LIKELY/UNLIKELY annotation without speed changes in gcc
+ * and clang, please do.
+ */
+#define LIKELY(x) (__builtin_expect((x), 1))
+#define UNLIKELY(x) (__builtin_expect((x), 0))
+
+#if __has_builtin(__builtin_unreachable) || (defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 5)))
+# define ZSTD_UNREACHABLE { assert(0), __builtin_unreachable(); }
+#else
+# define ZSTD_UNREACHABLE { assert(0); }
+#endif
+
+/* disable warnings */
+
+/*Like DYNAMIC_BMI2 but for compile time determination of BMI2 support*/
+
+
+/* compile time determination of SIMD support */
+
+/* C-language Attributes are added in C23. */
+#if defined(__STDC_VERSION__) && (__STDC_VERSION__ > 201710L) && defined(__has_c_attribute)
+# define ZSTD_HAS_C_ATTRIBUTE(x) __has_c_attribute(x)
+#else
+# define ZSTD_HAS_C_ATTRIBUTE(x) 0
+#endif
+
+/* Only use C++ attributes in C++. Some compilers report support for C++
+ * attributes when compiling with C.
+ */
+#define ZSTD_HAS_CPP_ATTRIBUTE(x) 0
+
+/* Define ZSTD_FALLTHROUGH macro for annotating switch case with the 'fallthrough' attribute.
+ * - C23: https://en.cppreference.com/w/c/language/attributes/fallthrough
+ * - CPP17: https://en.cppreference.com/w/cpp/language/attributes/fallthrough
+ * - Else: __attribute__((__fallthrough__))
+ */
+#define ZSTD_FALLTHROUGH fallthrough
+
+/*-**************************************************************
+* Alignment check
+*****************************************************************/
+
+/* this test was initially positioned in mem.h,
+ * but this file is removed (or replaced) for linux kernel
+ * so it's now hosted in compiler.h,
+ * which remains valid for both user & kernel spaces.
+ */
+
+#ifndef ZSTD_ALIGNOF
+/* covers gcc, clang & MSVC */
+/* note : this section must come first, before C11,
+ * due to a limitation in the kernel source generator */
+# define ZSTD_ALIGNOF(T) __alignof(T)
+
+#endif /* ZSTD_ALIGNOF */
+
+/*-**************************************************************
+* Sanitizer
+*****************************************************************/
+
+
+
+#endif /* ZSTD_COMPILER_H */
diff --git a/lib/zstd/common/cpu.h b/lib/zstd/common/cpu.h
new file mode 100644
index 000000000000..0db7b42407ee
--- /dev/null
+++ b/lib/zstd/common/cpu.h
@@ -0,0 +1,194 @@
+/*
+ * Copyright (c) Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#ifndef ZSTD_COMMON_CPU_H
+#define ZSTD_COMMON_CPU_H
+
+/*
+ * Implementation taken from folly/CpuId.h
+ * https://github.com/facebook/folly/blob/master/folly/CpuId.h
+ */
+
+#include "mem.h"
+
+
+typedef struct {
+ U32 f1c;
+ U32 f1d;
+ U32 f7b;
+ U32 f7c;
+} ZSTD_cpuid_t;
+
+MEM_STATIC ZSTD_cpuid_t ZSTD_cpuid(void) {
+ U32 f1c = 0;
+ U32 f1d = 0;
+ U32 f7b = 0;
+ U32 f7c = 0;
+#if defined(__i386__) && defined(__PIC__) && !defined(__clang__) && defined(__GNUC__)
+ /* The following block like the normal cpuid branch below, but gcc
+ * reserves ebx for use of its pic register so we must specially
+ * handle the save and restore to avoid clobbering the register
+ */
+ U32 n;
+ __asm__(
+ "pushl %%ebx\n\t"
+ "cpuid\n\t"
+ "popl %%ebx\n\t"
+ : "=a"(n)
+ : "a"(0)
+ : "ecx", "edx");
+ if (n >= 1) {
+ U32 f1a;
+ __asm__(
+ "pushl %%ebx\n\t"
+ "cpuid\n\t"
+ "popl %%ebx\n\t"
+ : "=a"(f1a), "=c"(f1c), "=d"(f1d)
+ : "a"(1));
+ }
+ if (n >= 7) {
+ __asm__(
+ "pushl %%ebx\n\t"
+ "cpuid\n\t"
+ "movl %%ebx, %%eax\n\t"
+ "popl %%ebx"
+ : "=a"(f7b), "=c"(f7c)
+ : "a"(7), "c"(0)
+ : "edx");
+ }
+#elif defined(__x86_64__) || defined(_M_X64) || defined(__i386__)
+ U32 n;
+ __asm__("cpuid" : "=a"(n) : "a"(0) : "ebx", "ecx", "edx");
+ if (n >= 1) {
+ U32 f1a;
+ __asm__("cpuid" : "=a"(f1a), "=c"(f1c), "=d"(f1d) : "a"(1) : "ebx");
+ }
+ if (n >= 7) {
+ U32 f7a;
+ __asm__("cpuid"
+ : "=a"(f7a), "=b"(f7b), "=c"(f7c)
+ : "a"(7), "c"(0)
+ : "edx");
+ }
+#endif
+ {
+ ZSTD_cpuid_t cpuid;
+ cpuid.f1c = f1c;
+ cpuid.f1d = f1d;
+ cpuid.f7b = f7b;
+ cpuid.f7c = f7c;
+ return cpuid;
+ }
+}
+
+#define X(name, r, bit) \
+ MEM_STATIC int ZSTD_cpuid_##name(ZSTD_cpuid_t const cpuid) { \
+ return ((cpuid.r) & (1U << bit)) != 0; \
+ }
+
+/* cpuid(1): Processor Info and Feature Bits. */
+#define C(name, bit) X(name, f1c, bit)
+ C(sse3, 0)
+ C(pclmuldq, 1)
+ C(dtes64, 2)
+ C(monitor, 3)
+ C(dscpl, 4)
+ C(vmx, 5)
+ C(smx, 6)
+ C(eist, 7)
+ C(tm2, 8)
+ C(ssse3, 9)
+ C(cnxtid, 10)
+ C(fma, 12)
+ C(cx16, 13)
+ C(xtpr, 14)
+ C(pdcm, 15)
+ C(pcid, 17)
+ C(dca, 18)
+ C(sse41, 19)
+ C(sse42, 20)
+ C(x2apic, 21)
+ C(movbe, 22)
+ C(popcnt, 23)
+ C(tscdeadline, 24)
+ C(aes, 25)
+ C(xsave, 26)
+ C(osxsave, 27)
+ C(avx, 28)
+ C(f16c, 29)
+ C(rdrand, 30)
+#undef C
+#define D(name, bit) X(name, f1d, bit)
+ D(fpu, 0)
+ D(vme, 1)
+ D(de, 2)
+ D(pse, 3)
+ D(tsc, 4)
+ D(msr, 5)
+ D(pae, 6)
+ D(mce, 7)
+ D(cx8, 8)
+ D(apic, 9)
+ D(sep, 11)
+ D(mtrr, 12)
+ D(pge, 13)
+ D(mca, 14)
+ D(cmov, 15)
+ D(pat, 16)
+ D(pse36, 17)
+ D(psn, 18)
+ D(clfsh, 19)
+ D(ds, 21)
+ D(acpi, 22)
+ D(mmx, 23)
+ D(fxsr, 24)
+ D(sse, 25)
+ D(sse2, 26)
+ D(ss, 27)
+ D(htt, 28)
+ D(tm, 29)
+ D(pbe, 31)
+#undef D
+
+/* cpuid(7): Extended Features. */
+#define B(name, bit) X(name, f7b, bit)
+ B(bmi1, 3)
+ B(hle, 4)
+ B(avx2, 5)
+ B(smep, 7)
+ B(bmi2, 8)
+ B(erms, 9)
+ B(invpcid, 10)
+ B(rtm, 11)
+ B(mpx, 14)
+ B(avx512f, 16)
+ B(avx512dq, 17)
+ B(rdseed, 18)
+ B(adx, 19)
+ B(smap, 20)
+ B(avx512ifma, 21)
+ B(pcommit, 22)
+ B(clflushopt, 23)
+ B(clwb, 24)
+ B(avx512pf, 26)
+ B(avx512er, 27)
+ B(avx512cd, 28)
+ B(sha, 29)
+ B(avx512bw, 30)
+ B(avx512vl, 31)
+#undef B
+#define C(name, bit) X(name, f7c, bit)
+ C(prefetchwt1, 0)
+ C(avx512vbmi, 1)
+#undef C
+
+#undef X
+
+#endif /* ZSTD_COMMON_CPU_H */
diff --git a/lib/zstd/common/debug.c b/lib/zstd/common/debug.c
new file mode 100644
index 000000000000..bb863c9ea616
--- /dev/null
+++ b/lib/zstd/common/debug.c
@@ -0,0 +1,24 @@
+/* ******************************************************************
+ * debug
+ * Part of FSE library
+ * Copyright (c) Yann Collet, Facebook, Inc.
+ *
+ * You can contact the author at :
+ * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+****************************************************************** */
+
+
+/*
+ * This module only hosts one global variable
+ * which can be used to dynamically influence the verbosity of traces,
+ * such as DEBUGLOG and RAWLOG
+ */
+
+#include "debug.h"
+
+int g_debuglevel = DEBUGLEVEL;
diff --git a/lib/zstd/common/debug.h b/lib/zstd/common/debug.h
new file mode 100644
index 000000000000..6dd88d1fbd02
--- /dev/null
+++ b/lib/zstd/common/debug.h
@@ -0,0 +1,101 @@
+/* ******************************************************************
+ * debug
+ * Part of FSE library
+ * Copyright (c) Yann Collet, Facebook, Inc.
+ *
+ * You can contact the author at :
+ * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+****************************************************************** */
+
+
+/*
+ * The purpose of this header is to enable debug functions.
+ * They regroup assert(), DEBUGLOG() and RAWLOG() for run-time,
+ * and DEBUG_STATIC_ASSERT() for compile-time.
+ *
+ * By default, DEBUGLEVEL==0, which means run-time debug is disabled.
+ *
+ * Level 1 enables assert() only.
+ * Starting level 2, traces can be generated and pushed to stderr.
+ * The higher the level, the more verbose the traces.
+ *
+ * It's possible to dynamically adjust level using variable g_debug_level,
+ * which is only declared if DEBUGLEVEL>=2,
+ * and is a global variable, not multi-thread protected (use with care)
+ */
+
+#ifndef DEBUG_H_12987983217
+#define DEBUG_H_12987983217
+
+
+
+/* static assert is triggered at compile time, leaving no runtime artefact.
+ * static assert only works with compile-time constants.
+ * Also, this variant can only be used inside a function. */
+#define DEBUG_STATIC_ASSERT(c) (void)sizeof(char[(c) ? 1 : -1])
+
+
+/* DEBUGLEVEL is expected to be defined externally,
+ * typically through compiler command line.
+ * Value must be a number. */
+#ifndef DEBUGLEVEL
+# define DEBUGLEVEL 0
+#endif
+
+
+/* recommended values for DEBUGLEVEL :
+ * 0 : release mode, no debug, all run-time checks disabled
+ * 1 : enables assert() only, no display
+ * 2 : reserved, for currently active debug path
+ * 3 : events once per object lifetime (CCtx, CDict, etc.)
+ * 4 : events once per frame
+ * 5 : events once per block
+ * 6 : events once per sequence (verbose)
+ * 7+: events at every position (*very* verbose)
+ *
+ * It's generally inconvenient to output traces > 5.
+ * In which case, it's possible to selectively trigger high verbosity levels
+ * by modifying g_debug_level.
+ */
+
+#if (DEBUGLEVEL>=1)
+# define ZSTD_DEPS_NEED_ASSERT
+# include "zstd_deps.h"
+#else
+# ifndef assert /* assert may be already defined, due to prior #include <assert.h> */
+# define assert(condition) ((void)0) /* disable assert (default) */
+# endif
+#endif
+
+#if (DEBUGLEVEL>=2)
+# define ZSTD_DEPS_NEED_IO
+# include "zstd_deps.h"
+extern int g_debuglevel; /* the variable is only declared,
+ it actually lives in debug.c,
+ and is shared by the whole process.
+ It's not thread-safe.
+ It's useful when enabling very verbose levels
+ on selective conditions (such as position in src) */
+
+# define RAWLOG(l, ...) { \
+ if (l<=g_debuglevel) { \
+ ZSTD_DEBUG_PRINT(__VA_ARGS__); \
+ } }
+# define DEBUGLOG(l, ...) { \
+ if (l<=g_debuglevel) { \
+ ZSTD_DEBUG_PRINT(__FILE__ ": " __VA_ARGS__); \
+ ZSTD_DEBUG_PRINT(" \n"); \
+ } }
+#else
+# define RAWLOG(l, ...) {} /* disabled */
+# define DEBUGLOG(l, ...) {} /* disabled */
+#endif
+
+
+
+#endif /* DEBUG_H_12987983217 */
diff --git a/lib/zstd/common/entropy_common.c b/lib/zstd/common/entropy_common.c
new file mode 100644
index 000000000000..fef67056f052
--- /dev/null
+++ b/lib/zstd/common/entropy_common.c
@@ -0,0 +1,357 @@
+/* ******************************************************************
+ * Common functions of New Generation Entropy library
+ * Copyright (c) Yann Collet, Facebook, Inc.
+ *
+ * You can contact the author at :
+ * - FSE+HUF source repository : https://github.com/Cyan4973/FiniteStateEntropy
+ * - Public forum : https://groups.google.com/forum/#!forum/lz4c
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+****************************************************************** */
+
+/* *************************************
+* Dependencies
+***************************************/
+#include "mem.h"
+#include "error_private.h" /* ERR_*, ERROR */
+#define FSE_STATIC_LINKING_ONLY /* FSE_MIN_TABLELOG */
+#include "fse.h"
+#define HUF_STATIC_LINKING_ONLY /* HUF_TABLELOG_ABSOLUTEMAX */
+#include "huf.h"
+
+
+/*=== Version ===*/
+unsigned FSE_versionNumber(void) { return FSE_VERSION_NUMBER; }
+
+
+/*=== Error Management ===*/
+unsigned FSE_isError(size_t code) { return ERR_isError(code); }
+const char* FSE_getErrorName(size_t code) { return ERR_getErrorName(code); }
+
+unsigned HUF_isError(size_t code) { return ERR_isError(code); }
+const char* HUF_getErrorName(size_t code) { return ERR_getErrorName(code); }
+
+
+/*-**************************************************************
+* FSE NCount encoding-decoding
+****************************************************************/
+static U32 FSE_ctz(U32 val)
+{
+ assert(val != 0);
+ {
+# if (__GNUC__ >= 3) /* GCC Intrinsic */
+ return __builtin_ctz(val);
+# else /* Software version */
+ U32 count = 0;
+ while ((val & 1) == 0) {
+ val >>= 1;
+ ++count;
+ }
+ return count;
+# endif
+ }
+}
+
+FORCE_INLINE_TEMPLATE
+size_t FSE_readNCount_body(short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr,
+ const void* headerBuffer, size_t hbSize)
+{
+ const BYTE* const istart = (const BYTE*) headerBuffer;
+ const BYTE* const iend = istart + hbSize;
+ const BYTE* ip = istart;
+ int nbBits;
+ int remaining;
+ int threshold;
+ U32 bitStream;
+ int bitCount;
+ unsigned charnum = 0;
+ unsigned const maxSV1 = *maxSVPtr + 1;
+ int previous0 = 0;
+
+ if (hbSize < 8) {
+ /* This function only works when hbSize >= 8 */
+ char buffer[8] = {0};
+ ZSTD_memcpy(buffer, headerBuffer, hbSize);
+ { size_t const countSize = FSE_readNCount(normalizedCounter, maxSVPtr, tableLogPtr,
+ buffer, sizeof(buffer));
+ if (FSE_isError(countSize)) return countSize;
+ if (countSize > hbSize) return ERROR(corruption_detected);
+ return countSize;
+ } }
+ assert(hbSize >= 8);
+
+ /* init */
+ ZSTD_memset(normalizedCounter, 0, (*maxSVPtr+1) * sizeof(normalizedCounter[0])); /* all symbols not present in NCount have a frequency of 0 */
+ bitStream = MEM_readLE32(ip);
+ nbBits = (bitStream & 0xF) + FSE_MIN_TABLELOG; /* extract tableLog */
+ if (nbBits > FSE_TABLELOG_ABSOLUTE_MAX) return ERROR(tableLog_tooLarge);
+ bitStream >>= 4;
+ bitCount = 4;
+ *tableLogPtr = nbBits;
+ remaining = (1<<nbBits)+1;
+ threshold = 1<<nbBits;
+ nbBits++;
+
+ for (;;) {
+ if (previous0) {
+ /* Count the number of repeats. Each time the
+ * 2-bit repeat code is 0b11 there is another
+ * repeat.
+ * Avoid UB by setting the high bit to 1.
+ */
+ int repeats = FSE_ctz(~bitStream | 0x80000000) >> 1;
+ while (repeats >= 12) {
+ charnum += 3 * 12;
+ if (LIKELY(ip <= iend-7)) {
+ ip += 3;
+ } else {
+ bitCount -= (int)(8 * (iend - 7 - ip));
+ bitCount &= 31;
+ ip = iend - 4;
+ }
+ bitStream = MEM_readLE32(ip) >> bitCount;
+ repeats = FSE_ctz(~bitStream | 0x80000000) >> 1;
+ }
+ charnum += 3 * repeats;
+ bitStream >>= 2 * repeats;
+ bitCount += 2 * repeats;
+
+ /* Add the final repeat which isn't 0b11. */
+ assert((bitStream & 3) < 3);
+ charnum += bitStream & 3;
+ bitCount += 2;
+
+ /* This is an error, but break and return an error
+ * at the end, because returning out of a loop makes
+ * it harder for the compiler to optimize.
+ */
+ if (charnum >= maxSV1) break;
+
+ /* We don't need to set the normalized count to 0
+ * because we already memset the whole buffer to 0.
+ */
+
+ if (LIKELY(ip <= iend-7) || (ip + (bitCount>>3) <= iend-4)) {
+ assert((bitCount >> 3) <= 3); /* For first condition to work */
+ ip += bitCount>>3;
+ bitCount &= 7;
+ } else {
+ bitCount -= (int)(8 * (iend - 4 - ip));
+ bitCount &= 31;
+ ip = iend - 4;
+ }
+ bitStream = MEM_readLE32(ip) >> bitCount;
+ }
+ {
+ int const max = (2*threshold-1) - remaining;
+ int count;
+
+ if ((bitStream & (threshold-1)) < (U32)max) {
+ count = bitStream & (threshold-1);
+ bitCount += nbBits-1;
+ } else {
+ count = bitStream & (2*threshold-1);
+ if (count >= threshold) count -= max;
+ bitCount += nbBits;
+ }
+
+ count--; /* extra accuracy */
+ /* When it matters (small blocks), this is a
+ * predictable branch, because we don't use -1.
+ */
+ if (count >= 0) {
+ remaining -= count;
+ } else {
+ assert(count == -1);
+ remaining += count;
+ }
+ normalizedCounter[charnum++] = (short)count;
+ previous0 = !count;
+
+ assert(threshold > 1);
+ if (remaining < threshold) {
+ /* This branch can be folded into the
+ * threshold update condition because we
+ * know that threshold > 1.
+ */
+ if (remaining <= 1) break;
+ nbBits = BIT_highbit32(remaining) + 1;
+ threshold = 1 << (nbBits - 1);
+ }
+ if (charnum >= maxSV1) break;
+
+ if (LIKELY(ip <= iend-7) || (ip + (bitCount>>3) <= iend-4)) {
+ ip += bitCount>>3;
+ bitCount &= 7;
+ } else {
+ bitCount -= (int)(8 * (iend - 4 - ip));
+ bitCount &= 31;
+ ip = iend - 4;
+ }
+ bitStream = MEM_readLE32(ip) >> bitCount;
+ } }
+ if (remaining != 1) return ERROR(corruption_detected);
+ /* Only possible when there are too many zeros. */
+ if (charnum > maxSV1) return ERROR(maxSymbolValue_tooSmall);
+ if (bitCount > 32) return ERROR(corruption_detected);
+ *maxSVPtr = charnum-1;
+
+ ip += (bitCount+7)>>3;
+ return ip-istart;
+}
+
+/* Avoids the FORCE_INLINE of the _body() function. */
+static size_t FSE_readNCount_body_default(
+ short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr,
+ const void* headerBuffer, size_t hbSize)
+{
+ return FSE_readNCount_body(normalizedCounter, maxSVPtr, tableLogPtr, headerBuffer, hbSize);
+}
+
+#if DYNAMIC_BMI2
+BMI2_TARGET_ATTRIBUTE static size_t FSE_readNCount_body_bmi2(
+ short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr,
+ const void* headerBuffer, size_t hbSize)
+{
+ return FSE_readNCount_body(normalizedCounter, maxSVPtr, tableLogPtr, headerBuffer, hbSize);
+}
+#endif
+
+size_t FSE_readNCount_bmi2(
+ short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr,
+ const void* headerBuffer, size_t hbSize, int bmi2)
+{
+#if DYNAMIC_BMI2
+ if (bmi2) {
+ return FSE_readNCount_body_bmi2(normalizedCounter, maxSVPtr, tableLogPtr, headerBuffer, hbSize);
+ }
+#endif
+ (void)bmi2;
+ return FSE_readNCount_body_default(normalizedCounter, maxSVPtr, tableLogPtr, headerBuffer, hbSize);
+}
+
+size_t FSE_readNCount(
+ short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr,
+ const void* headerBuffer, size_t hbSize)
+{
+ return FSE_readNCount_bmi2(normalizedCounter, maxSVPtr, tableLogPtr, headerBuffer, hbSize, /* bmi2 */ 0);
+}
+
+
+/*! HUF_readStats() :
+ Read compact Huffman tree, saved by HUF_writeCTable().
+ `huffWeight` is destination buffer.
+ `rankStats` is assumed to be a table of at least HUF_TABLELOG_MAX U32.
+ @return : size read from `src` , or an error Code .
+ Note : Needed by HUF_readCTable() and HUF_readDTableX?() .
+*/
+size_t HUF_readStats(BYTE* huffWeight, size_t hwSize, U32* rankStats,
+ U32* nbSymbolsPtr, U32* tableLogPtr,
+ const void* src, size_t srcSize)
+{
+ U32 wksp[HUF_READ_STATS_WORKSPACE_SIZE_U32];
+ return HUF_readStats_wksp(huffWeight, hwSize, rankStats, nbSymbolsPtr, tableLogPtr, src, srcSize, wksp, sizeof(wksp), /* bmi2 */ 0);
+}
+
+FORCE_INLINE_TEMPLATE size_t
+HUF_readStats_body(BYTE* huffWeight, size_t hwSize, U32* rankStats,
+ U32* nbSymbolsPtr, U32* tableLogPtr,
+ const void* src, size_t srcSize,
+ void* workSpace, size_t wkspSize,
+ int bmi2)
+{
+ U32 weightTotal;
+ const BYTE* ip = (const BYTE*) src;
+ size_t iSize;
+ size_t oSize;
+
+ if (!srcSize) return ERROR(srcSize_wrong);
+ iSize = ip[0];
+ /* ZSTD_memset(huffWeight, 0, hwSize); *//* is not necessary, even though some analyzer complain ... */
+
+ if (iSize >= 128) { /* special header */
+ oSize = iSize - 127;
+ iSize = ((oSize+1)/2);
+ if (iSize+1 > srcSize) return ERROR(srcSize_wrong);
+ if (oSize >= hwSize) return ERROR(corruption_detected);
+ ip += 1;
+ { U32 n;
+ for (n=0; n<oSize; n+=2) {
+ huffWeight[n] = ip[n/2] >> 4;
+ huffWeight[n+1] = ip[n/2] & 15;
+ } } }
+ else { /* header compressed with FSE (normal case) */
+ if (iSize+1 > srcSize) return ERROR(srcSize_wrong);
+ /* max (hwSize-1) values decoded, as last one is implied */
+ oSize = FSE_decompress_wksp_bmi2(huffWeight, hwSize-1, ip+1, iSize, 6, workSpace, wkspSize, bmi2);
+ if (FSE_isError(oSize)) return oSize;
+ }
+
+ /* collect weight stats */
+ ZSTD_memset(rankStats, 0, (HUF_TABLELOG_MAX + 1) * sizeof(U32));
+ weightTotal = 0;
+ { U32 n; for (n=0; n<oSize; n++) {
+ if (huffWeight[n] > HUF_TABLELOG_MAX) return ERROR(corruption_detected);
+ rankStats[huffWeight[n]]++;
+ weightTotal += (1 << huffWeight[n]) >> 1;
+ } }
+ if (weightTotal == 0) return ERROR(corruption_detected);
+
+ /* get last non-null symbol weight (implied, total must be 2^n) */
+ { U32 const tableLog = BIT_highbit32(weightTotal) + 1;
+ if (tableLog > HUF_TABLELOG_MAX) return ERROR(corruption_detected);
+ *tableLogPtr = tableLog;
+ /* determine last weight */
+ { U32 const total = 1 << tableLog;
+ U32 const rest = total - weightTotal;
+ U32 const verif = 1 << BIT_highbit32(rest);
+ U32 const lastWeight = BIT_highbit32(rest) + 1;
+ if (verif != rest) return ERROR(corruption_detected); /* last value must be a clean power of 2 */
+ huffWeight[oSize] = (BYTE)lastWeight;
+ rankStats[lastWeight]++;
+ } }
+
+ /* check tree construction validity */
+ if ((rankStats[1] < 2) || (rankStats[1] & 1)) return ERROR(corruption_detected); /* by construction : at least 2 elts of rank 1, must be even */
+
+ /* results */
+ *nbSymbolsPtr = (U32)(oSize+1);
+ return iSize+1;
+}
+
+/* Avoids the FORCE_INLINE of the _body() function. */
+static size_t HUF_readStats_body_default(BYTE* huffWeight, size_t hwSize, U32* rankStats,
+ U32* nbSymbolsPtr, U32* tableLogPtr,
+ const void* src, size_t srcSize,
+ void* workSpace, size_t wkspSize)
+{
+ return HUF_readStats_body(huffWeight, hwSize, rankStats, nbSymbolsPtr, tableLogPtr, src, srcSize, workSpace, wkspSize, 0);
+}
+
+#if DYNAMIC_BMI2
+static BMI2_TARGET_ATTRIBUTE size_t HUF_readStats_body_bmi2(BYTE* huffWeight, size_t hwSize, U32* rankStats,
+ U32* nbSymbolsPtr, U32* tableLogPtr,
+ const void* src, size_t srcSize,
+ void* workSpace, size_t wkspSize)
+{
+ return HUF_readStats_body(huffWeight, hwSize, rankStats, nbSymbolsPtr, tableLogPtr, src, srcSize, workSpace, wkspSize, 1);
+}
+#endif
+
+size_t HUF_readStats_wksp(BYTE* huffWeight, size_t hwSize, U32* rankStats,
+ U32* nbSymbolsPtr, U32* tableLogPtr,
+ const void* src, size_t srcSize,
+ void* workSpace, size_t wkspSize,
+ int bmi2)
+{
+#if DYNAMIC_BMI2
+ if (bmi2) {
+ return HUF_readStats_body_bmi2(huffWeight, hwSize, rankStats, nbSymbolsPtr, tableLogPtr, src, srcSize, workSpace, wkspSize);
+ }
+#endif
+ (void)bmi2;
+ return HUF_readStats_body_default(huffWeight, hwSize, rankStats, nbSymbolsPtr, tableLogPtr, src, srcSize, workSpace, wkspSize);
+}
diff --git a/lib/zstd/common/error_private.c b/lib/zstd/common/error_private.c
new file mode 100644
index 000000000000..6d1135f8c373
--- /dev/null
+++ b/lib/zstd/common/error_private.c
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+/* The purpose of this file is to have a single list of error strings embedded in binary */
+
+#include "error_private.h"
+
+const char* ERR_getErrorString(ERR_enum code)
+{
+#ifdef ZSTD_STRIP_ERROR_STRINGS
+ (void)code;
+ return "Error strings stripped";
+#else
+ static const char* const notErrorCode = "Unspecified error code";
+ switch( code )
+ {
+ case PREFIX(no_error): return "No error detected";
+ case PREFIX(GENERIC): return "Error (generic)";
+ case PREFIX(prefix_unknown): return "Unknown frame descriptor";
+ case PREFIX(version_unsupported): return "Version not supported";
+ case PREFIX(frameParameter_unsupported): return "Unsupported frame parameter";
+ case PREFIX(frameParameter_windowTooLarge): return "Frame requires too much memory for decoding";
+ case PREFIX(corruption_detected): return "Corrupted block detected";
+ case PREFIX(checksum_wrong): return "Restored data doesn't match checksum";
+ case PREFIX(parameter_unsupported): return "Unsupported parameter";
+ case PREFIX(parameter_outOfBound): return "Parameter is out of bound";
+ case PREFIX(init_missing): return "Context should be init first";
+ case PREFIX(memory_allocation): return "Allocation error : not enough memory";
+ case PREFIX(workSpace_tooSmall): return "workSpace buffer is not large enough";
+ case PREFIX(stage_wrong): return "Operation not authorized at current processing stage";
+ case PREFIX(tableLog_tooLarge): return "tableLog requires too much memory : unsupported";
+ case PREFIX(maxSymbolValue_tooLarge): return "Unsupported max Symbol Value : too large";
+ case PREFIX(maxSymbolValue_tooSmall): return "Specified maxSymbolValue is too small";
+ case PREFIX(dictionary_corrupted): return "Dictionary is corrupted";
+ case PREFIX(dictionary_wrong): return "Dictionary mismatch";
+ case PREFIX(dictionaryCreation_failed): return "Cannot create Dictionary from provided samples";
+ case PREFIX(dstSize_tooSmall): return "Destination buffer is too small";
+ case PREFIX(srcSize_wrong): return "Src size is incorrect";
+ case PREFIX(dstBuffer_null): return "Operation on NULL destination buffer";
+ /* following error codes are not stable and may be removed or changed in a future version */
+ case PREFIX(frameIndex_tooLarge): return "Frame index is too large";
+ case PREFIX(seekableIO): return "An I/O error occurred when reading/seeking";
+ case PREFIX(dstBuffer_wrong): return "Destination buffer is wrong";
+ case PREFIX(srcBuffer_wrong): return "Source buffer is wrong";
+ case PREFIX(maxCode):
+ default: return notErrorCode;
+ }
+#endif
+}
diff --git a/lib/zstd/common/error_private.h b/lib/zstd/common/error_private.h
new file mode 100644
index 000000000000..ca5101e542fa
--- /dev/null
+++ b/lib/zstd/common/error_private.h
@@ -0,0 +1,145 @@
+/*
+ * Copyright (c) Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+/* Note : this module is expected to remain private, do not expose it */
+
+#ifndef ERROR_H_MODULE
+#define ERROR_H_MODULE
+
+
+
+/* ****************************************
+* Dependencies
+******************************************/
+#include <linux/zstd_errors.h> /* enum list */
+#include "compiler.h"
+#include "debug.h"
+#include "zstd_deps.h" /* size_t */
+
+
+/* ****************************************
+* Compiler-specific
+******************************************/
+#define ERR_STATIC static __attribute__((unused))
+
+
+/*-****************************************
+* Customization (error_public.h)
+******************************************/
+typedef ZSTD_ErrorCode ERR_enum;
+#define PREFIX(name) ZSTD_error_##name
+
+
+/*-****************************************
+* Error codes handling
+******************************************/
+#undef ERROR /* already defined on Visual Studio */
+#define ERROR(name) ZSTD_ERROR(name)
+#define ZSTD_ERROR(name) ((size_t)-PREFIX(name))
+
+ERR_STATIC unsigned ERR_isError(size_t code) { return (code > ERROR(maxCode)); }
+
+ERR_STATIC ERR_enum ERR_getErrorCode(size_t code) { if (!ERR_isError(code)) return (ERR_enum)0; return (ERR_enum) (0-code); }
+
+/* check and forward error code */
+#define CHECK_V_F(e, f) size_t const e = f; if (ERR_isError(e)) return e
+#define CHECK_F(f) { CHECK_V_F(_var_err__, f); }
+
+
+/*-****************************************
+* Error Strings
+******************************************/
+
+const char* ERR_getErrorString(ERR_enum code); /* error_private.c */
+
+ERR_STATIC const char* ERR_getErrorName(size_t code)
+{
+ return ERR_getErrorString(ERR_getErrorCode(code));
+}
+
+/*
+ * Ignore: this is an internal helper.
+ *
+ * This is a helper function to help force C99-correctness during compilation.
+ * Under strict compilation modes, variadic macro arguments can't be empty.
+ * However, variadic function arguments can be. Using a function therefore lets
+ * us statically check that at least one (string) argument was passed,
+ * independent of the compilation flags.
+ */
+static INLINE_KEYWORD UNUSED_ATTR
+void _force_has_format_string(const char *format, ...) {
+ (void)format;
+}
+
+/*
+ * Ignore: this is an internal helper.
+ *
+ * We want to force this function invocation to be syntactically correct, but
+ * we don't want to force runtime evaluation of its arguments.
+ */
+#define _FORCE_HAS_FORMAT_STRING(...) \
+ if (0) { \
+ _force_has_format_string(__VA_ARGS__); \
+ }
+
+#define ERR_QUOTE(str) #str
+
+/*
+ * Return the specified error if the condition evaluates to true.
+ *
+ * In debug modes, prints additional information.
+ * In order to do that (particularly, printing the conditional that failed),
+ * this can't just wrap RETURN_ERROR().
+ */
+#define RETURN_ERROR_IF(cond, err, ...) \
+ if (cond) { \
+ RAWLOG(3, "%s:%d: ERROR!: check %s failed, returning %s", \
+ __FILE__, __LINE__, ERR_QUOTE(cond), ERR_QUOTE(ERROR(err))); \
+ _FORCE_HAS_FORMAT_STRING(__VA_ARGS__); \
+ RAWLOG(3, ": " __VA_ARGS__); \
+ RAWLOG(3, "\n"); \
+ return ERROR(err); \
+ }
+
+/*
+ * Unconditionally return the specified error.
+ *
+ * In debug modes, prints additional information.
+ */
+#define RETURN_ERROR(err, ...) \
+ do { \
+ RAWLOG(3, "%s:%d: ERROR!: unconditional check failed, returning %s", \
+ __FILE__, __LINE__, ERR_QUOTE(ERROR(err))); \
+ _FORCE_HAS_FORMAT_STRING(__VA_ARGS__); \
+ RAWLOG(3, ": " __VA_ARGS__); \
+ RAWLOG(3, "\n"); \
+ return ERROR(err); \
+ } while(0);
+
+/*
+ * If the provided expression evaluates to an error code, returns that error code.
+ *
+ * In debug modes, prints additional information.
+ */
+#define FORWARD_IF_ERROR(err, ...) \
+ do { \
+ size_t const err_code = (err); \
+ if (ERR_isError(err_code)) { \
+ RAWLOG(3, "%s:%d: ERROR!: forwarding error in %s: %s", \
+ __FILE__, __LINE__, ERR_QUOTE(err), ERR_getErrorName(err_code)); \
+ _FORCE_HAS_FORMAT_STRING(__VA_ARGS__); \
+ RAWLOG(3, ": " __VA_ARGS__); \
+ RAWLOG(3, "\n"); \
+ return err_code; \
+ } \
+ } while(0);
+
+
+#endif /* ERROR_H_MODULE */
diff --git a/lib/zstd/common/fse.h b/lib/zstd/common/fse.h
new file mode 100644
index 000000000000..4507043b2287
--- /dev/null
+++ b/lib/zstd/common/fse.h
@@ -0,0 +1,711 @@
+/* ******************************************************************
+ * FSE : Finite State Entropy codec
+ * Public Prototypes declaration
+ * Copyright (c) Yann Collet, Facebook, Inc.
+ *
+ * You can contact the author at :
+ * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+****************************************************************** */
+
+
+#ifndef FSE_H
+#define FSE_H
+
+
+/*-*****************************************
+* Dependencies
+******************************************/
+#include "zstd_deps.h" /* size_t, ptrdiff_t */
+
+
+/*-*****************************************
+* FSE_PUBLIC_API : control library symbols visibility
+******************************************/
+#if defined(FSE_DLL_EXPORT) && (FSE_DLL_EXPORT==1) && defined(__GNUC__) && (__GNUC__ >= 4)
+# define FSE_PUBLIC_API __attribute__ ((visibility ("default")))
+#elif defined(FSE_DLL_EXPORT) && (FSE_DLL_EXPORT==1) /* Visual expected */
+# define FSE_PUBLIC_API __declspec(dllexport)
+#elif defined(FSE_DLL_IMPORT) && (FSE_DLL_IMPORT==1)
+# define FSE_PUBLIC_API __declspec(dllimport) /* It isn't required but allows to generate better code, saving a function pointer load from the IAT and an indirect jump.*/
+#else
+# define FSE_PUBLIC_API
+#endif
+
+/*------ Version ------*/
+#define FSE_VERSION_MAJOR 0
+#define FSE_VERSION_MINOR 9
+#define FSE_VERSION_RELEASE 0
+
+#define FSE_LIB_VERSION FSE_VERSION_MAJOR.FSE_VERSION_MINOR.FSE_VERSION_RELEASE
+#define FSE_QUOTE(str) #str
+#define FSE_EXPAND_AND_QUOTE(str) FSE_QUOTE(str)
+#define FSE_VERSION_STRING FSE_EXPAND_AND_QUOTE(FSE_LIB_VERSION)
+
+#define FSE_VERSION_NUMBER (FSE_VERSION_MAJOR *100*100 + FSE_VERSION_MINOR *100 + FSE_VERSION_RELEASE)
+FSE_PUBLIC_API unsigned FSE_versionNumber(void); /*< library version number; to be used when checking dll version */
+
+
+/*-****************************************
+* FSE simple functions
+******************************************/
+/*! FSE_compress() :
+ Compress content of buffer 'src', of size 'srcSize', into destination buffer 'dst'.
+ 'dst' buffer must be already allocated. Compression runs faster is dstCapacity >= FSE_compressBound(srcSize).
+ @return : size of compressed data (<= dstCapacity).
+ Special values : if return == 0, srcData is not compressible => Nothing is stored within dst !!!
+ if return == 1, srcData is a single byte symbol * srcSize times. Use RLE compression instead.
+ if FSE_isError(return), compression failed (more details using FSE_getErrorName())
+*/
+FSE_PUBLIC_API size_t FSE_compress(void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize);
+
+/*! FSE_decompress():
+ Decompress FSE data from buffer 'cSrc', of size 'cSrcSize',
+ into already allocated destination buffer 'dst', of size 'dstCapacity'.
+ @return : size of regenerated data (<= maxDstSize),
+ or an error code, which can be tested using FSE_isError() .
+
+ ** Important ** : FSE_decompress() does not decompress non-compressible nor RLE data !!!
+ Why ? : making this distinction requires a header.
+ Header management is intentionally delegated to the user layer, which can better manage special cases.
+*/
+FSE_PUBLIC_API size_t FSE_decompress(void* dst, size_t dstCapacity,
+ const void* cSrc, size_t cSrcSize);
+
+
+/*-*****************************************
+* Tool functions
+******************************************/
+FSE_PUBLIC_API size_t FSE_compressBound(size_t size); /* maximum compressed size */
+
+/* Error Management */
+FSE_PUBLIC_API unsigned FSE_isError(size_t code); /* tells if a return value is an error code */
+FSE_PUBLIC_API const char* FSE_getErrorName(size_t code); /* provides error code string (useful for debugging) */
+
+
+/*-*****************************************
+* FSE advanced functions
+******************************************/
+/*! FSE_compress2() :
+ Same as FSE_compress(), but allows the selection of 'maxSymbolValue' and 'tableLog'
+ Both parameters can be defined as '0' to mean : use default value
+ @return : size of compressed data
+ Special values : if return == 0, srcData is not compressible => Nothing is stored within cSrc !!!
+ if return == 1, srcData is a single byte symbol * srcSize times. Use RLE compression.
+ if FSE_isError(return), it's an error code.
+*/
+FSE_PUBLIC_API size_t FSE_compress2 (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog);
+
+
+/*-*****************************************
+* FSE detailed API
+******************************************/
+/*!
+FSE_compress() does the following:
+1. count symbol occurrence from source[] into table count[] (see hist.h)
+2. normalize counters so that sum(count[]) == Power_of_2 (2^tableLog)
+3. save normalized counters to memory buffer using writeNCount()
+4. build encoding table 'CTable' from normalized counters
+5. encode the data stream using encoding table 'CTable'
+
+FSE_decompress() does the following:
+1. read normalized counters with readNCount()
+2. build decoding table 'DTable' from normalized counters
+3. decode the data stream using decoding table 'DTable'
+
+The following API allows targeting specific sub-functions for advanced tasks.
+For example, it's possible to compress several blocks using the same 'CTable',
+or to save and provide normalized distribution using external method.
+*/
+
+/* *** COMPRESSION *** */
+
+/*! FSE_optimalTableLog():
+ dynamically downsize 'tableLog' when conditions are met.
+ It saves CPU time, by using smaller tables, while preserving or even improving compression ratio.
+ @return : recommended tableLog (necessarily <= 'maxTableLog') */
+FSE_PUBLIC_API unsigned FSE_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue);
+
+/*! FSE_normalizeCount():
+ normalize counts so that sum(count[]) == Power_of_2 (2^tableLog)
+ 'normalizedCounter' is a table of short, of minimum size (maxSymbolValue+1).
+ useLowProbCount is a boolean parameter which trades off compressed size for
+ faster header decoding. When it is set to 1, the compressed data will be slightly
+ smaller. And when it is set to 0, FSE_readNCount() and FSE_buildDTable() will be
+ faster. If you are compressing a small amount of data (< 2 KB) then useLowProbCount=0
+ is a good default, since header deserialization makes a big speed difference.
+ Otherwise, useLowProbCount=1 is a good default, since the speed difference is small.
+ @return : tableLog,
+ or an errorCode, which can be tested using FSE_isError() */
+FSE_PUBLIC_API size_t FSE_normalizeCount(short* normalizedCounter, unsigned tableLog,
+ const unsigned* count, size_t srcSize, unsigned maxSymbolValue, unsigned useLowProbCount);
+
+/*! FSE_NCountWriteBound():
+ Provides the maximum possible size of an FSE normalized table, given 'maxSymbolValue' and 'tableLog'.
+ Typically useful for allocation purpose. */
+FSE_PUBLIC_API size_t FSE_NCountWriteBound(unsigned maxSymbolValue, unsigned tableLog);
+
+/*! FSE_writeNCount():
+ Compactly save 'normalizedCounter' into 'buffer'.
+ @return : size of the compressed table,
+ or an errorCode, which can be tested using FSE_isError(). */
+FSE_PUBLIC_API size_t FSE_writeNCount (void* buffer, size_t bufferSize,
+ const short* normalizedCounter,
+ unsigned maxSymbolValue, unsigned tableLog);
+
+/*! Constructor and Destructor of FSE_CTable.
+ Note that FSE_CTable size depends on 'tableLog' and 'maxSymbolValue' */
+typedef unsigned FSE_CTable; /* don't allocate that. It's only meant to be more restrictive than void* */
+FSE_PUBLIC_API FSE_CTable* FSE_createCTable (unsigned maxSymbolValue, unsigned tableLog);
+FSE_PUBLIC_API void FSE_freeCTable (FSE_CTable* ct);
+
+/*! FSE_buildCTable():
+ Builds `ct`, which must be already allocated, using FSE_createCTable().
+ @return : 0, or an errorCode, which can be tested using FSE_isError() */
+FSE_PUBLIC_API size_t FSE_buildCTable(FSE_CTable* ct, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog);
+
+/*! FSE_compress_usingCTable():
+ Compress `src` using `ct` into `dst` which must be already allocated.
+ @return : size of compressed data (<= `dstCapacity`),
+ or 0 if compressed data could not fit into `dst`,
+ or an errorCode, which can be tested using FSE_isError() */
+FSE_PUBLIC_API size_t FSE_compress_usingCTable (void* dst, size_t dstCapacity, const void* src, size_t srcSize, const FSE_CTable* ct);
+
+/*!
+Tutorial :
+----------
+The first step is to count all symbols. FSE_count() does this job very fast.
+Result will be saved into 'count', a table of unsigned int, which must be already allocated, and have 'maxSymbolValuePtr[0]+1' cells.
+'src' is a table of bytes of size 'srcSize'. All values within 'src' MUST be <= maxSymbolValuePtr[0]
+maxSymbolValuePtr[0] will be updated, with its real value (necessarily <= original value)
+FSE_count() will return the number of occurrence of the most frequent symbol.
+This can be used to know if there is a single symbol within 'src', and to quickly evaluate its compressibility.
+If there is an error, the function will return an ErrorCode (which can be tested using FSE_isError()).
+
+The next step is to normalize the frequencies.
+FSE_normalizeCount() will ensure that sum of frequencies is == 2 ^'tableLog'.
+It also guarantees a minimum of 1 to any Symbol with frequency >= 1.
+You can use 'tableLog'==0 to mean "use default tableLog value".
+If you are unsure of which tableLog value to use, you can ask FSE_optimalTableLog(),
+which will provide the optimal valid tableLog given sourceSize, maxSymbolValue, and a user-defined maximum (0 means "default").
+
+The result of FSE_normalizeCount() will be saved into a table,
+called 'normalizedCounter', which is a table of signed short.
+'normalizedCounter' must be already allocated, and have at least 'maxSymbolValue+1' cells.
+The return value is tableLog if everything proceeded as expected.
+It is 0 if there is a single symbol within distribution.
+If there is an error (ex: invalid tableLog value), the function will return an ErrorCode (which can be tested using FSE_isError()).
+
+'normalizedCounter' can be saved in a compact manner to a memory area using FSE_writeNCount().
+'buffer' must be already allocated.
+For guaranteed success, buffer size must be at least FSE_headerBound().
+The result of the function is the number of bytes written into 'buffer'.
+If there is an error, the function will return an ErrorCode (which can be tested using FSE_isError(); ex : buffer size too small).
+
+'normalizedCounter' can then be used to create the compression table 'CTable'.
+The space required by 'CTable' must be already allocated, using FSE_createCTable().
+You can then use FSE_buildCTable() to fill 'CTable'.
+If there is an error, both functions will return an ErrorCode (which can be tested using FSE_isError()).
+
+'CTable' can then be used to compress 'src', with FSE_compress_usingCTable().
+Similar to FSE_count(), the convention is that 'src' is assumed to be a table of char of size 'srcSize'
+The function returns the size of compressed data (without header), necessarily <= `dstCapacity`.
+If it returns '0', compressed data could not fit into 'dst'.
+If there is an error, the function will return an ErrorCode (which can be tested using FSE_isError()).
+*/
+
+
+/* *** DECOMPRESSION *** */
+
+/*! FSE_readNCount():
+ Read compactly saved 'normalizedCounter' from 'rBuffer'.
+ @return : size read from 'rBuffer',
+ or an errorCode, which can be tested using FSE_isError().
+ maxSymbolValuePtr[0] and tableLogPtr[0] will also be updated with their respective values */
+FSE_PUBLIC_API size_t FSE_readNCount (short* normalizedCounter,
+ unsigned* maxSymbolValuePtr, unsigned* tableLogPtr,
+ const void* rBuffer, size_t rBuffSize);
+
+/*! FSE_readNCount_bmi2():
+ * Same as FSE_readNCount() but pass bmi2=1 when your CPU supports BMI2 and 0 otherwise.
+ */
+FSE_PUBLIC_API size_t FSE_readNCount_bmi2(short* normalizedCounter,
+ unsigned* maxSymbolValuePtr, unsigned* tableLogPtr,
+ const void* rBuffer, size_t rBuffSize, int bmi2);
+
+/*! Constructor and Destructor of FSE_DTable.
+ Note that its size depends on 'tableLog' */
+typedef unsigned FSE_DTable; /* don't allocate that. It's just a way to be more restrictive than void* */
+FSE_PUBLIC_API FSE_DTable* FSE_createDTable(unsigned tableLog);
+FSE_PUBLIC_API void FSE_freeDTable(FSE_DTable* dt);
+
+/*! FSE_buildDTable():
+ Builds 'dt', which must be already allocated, using FSE_createDTable().
+ return : 0, or an errorCode, which can be tested using FSE_isError() */
+FSE_PUBLIC_API size_t FSE_buildDTable (FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog);
+
+/*! FSE_decompress_usingDTable():
+ Decompress compressed source `cSrc` of size `cSrcSize` using `dt`
+ into `dst` which must be already allocated.
+ @return : size of regenerated data (necessarily <= `dstCapacity`),
+ or an errorCode, which can be tested using FSE_isError() */
+FSE_PUBLIC_API size_t FSE_decompress_usingDTable(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, const FSE_DTable* dt);
+
+/*!
+Tutorial :
+----------
+(Note : these functions only decompress FSE-compressed blocks.
+ If block is uncompressed, use memcpy() instead
+ If block is a single repeated byte, use memset() instead )
+
+The first step is to obtain the normalized frequencies of symbols.
+This can be performed by FSE_readNCount() if it was saved using FSE_writeNCount().
+'normalizedCounter' must be already allocated, and have at least 'maxSymbolValuePtr[0]+1' cells of signed short.
+In practice, that means it's necessary to know 'maxSymbolValue' beforehand,
+or size the table to handle worst case situations (typically 256).
+FSE_readNCount() will provide 'tableLog' and 'maxSymbolValue'.
+The result of FSE_readNCount() is the number of bytes read from 'rBuffer'.
+Note that 'rBufferSize' must be at least 4 bytes, even if useful information is less than that.
+If there is an error, the function will return an error code, which can be tested using FSE_isError().
+
+The next step is to build the decompression tables 'FSE_DTable' from 'normalizedCounter'.
+This is performed by the function FSE_buildDTable().
+The space required by 'FSE_DTable' must be already allocated using FSE_createDTable().
+If there is an error, the function will return an error code, which can be tested using FSE_isError().
+
+`FSE_DTable` can then be used to decompress `cSrc`, with FSE_decompress_usingDTable().
+`cSrcSize` must be strictly correct, otherwise decompression will fail.
+FSE_decompress_usingDTable() result will tell how many bytes were regenerated (<=`dstCapacity`).
+If there is an error, the function will return an error code, which can be tested using FSE_isError(). (ex: dst buffer too small)
+*/
+
+#endif /* FSE_H */
+
+#if !defined(FSE_H_FSE_STATIC_LINKING_ONLY)
+#define FSE_H_FSE_STATIC_LINKING_ONLY
+
+/* *** Dependency *** */
+#include "bitstream.h"
+
+
+/* *****************************************
+* Static allocation
+*******************************************/
+/* FSE buffer bounds */
+#define FSE_NCOUNTBOUND 512
+#define FSE_BLOCKBOUND(size) ((size) + ((size)>>7) + 4 /* fse states */ + sizeof(size_t) /* bitContainer */)
+#define FSE_COMPRESSBOUND(size) (FSE_NCOUNTBOUND + FSE_BLOCKBOUND(size)) /* Macro version, useful for static allocation */
+
+/* It is possible to statically allocate FSE CTable/DTable as a table of FSE_CTable/FSE_DTable using below macros */
+#define FSE_CTABLE_SIZE_U32(maxTableLog, maxSymbolValue) (1 + (1<<((maxTableLog)-1)) + (((maxSymbolValue)+1)*2))
+#define FSE_DTABLE_SIZE_U32(maxTableLog) (1 + (1<<(maxTableLog)))
+
+/* or use the size to malloc() space directly. Pay attention to alignment restrictions though */
+#define FSE_CTABLE_SIZE(maxTableLog, maxSymbolValue) (FSE_CTABLE_SIZE_U32(maxTableLog, maxSymbolValue) * sizeof(FSE_CTable))
+#define FSE_DTABLE_SIZE(maxTableLog) (FSE_DTABLE_SIZE_U32(maxTableLog) * sizeof(FSE_DTable))
+
+
+/* *****************************************
+ * FSE advanced API
+ ***************************************** */
+
+unsigned FSE_optimalTableLog_internal(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue, unsigned minus);
+/*< same as FSE_optimalTableLog(), which used `minus==2` */
+
+/* FSE_compress_wksp() :
+ * Same as FSE_compress2(), but using an externally allocated scratch buffer (`workSpace`).
+ * FSE_COMPRESS_WKSP_SIZE_U32() provides the minimum size required for `workSpace` as a table of FSE_CTable.
+ */
+#define FSE_COMPRESS_WKSP_SIZE_U32(maxTableLog, maxSymbolValue) ( FSE_CTABLE_SIZE_U32(maxTableLog, maxSymbolValue) + ((maxTableLog > 12) ? (1 << (maxTableLog - 2)) : 1024) )
+size_t FSE_compress_wksp (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize);
+
+size_t FSE_buildCTable_raw (FSE_CTable* ct, unsigned nbBits);
+/*< build a fake FSE_CTable, designed for a flat distribution, where each symbol uses nbBits */
+
+size_t FSE_buildCTable_rle (FSE_CTable* ct, unsigned char symbolValue);
+/*< build a fake FSE_CTable, designed to compress always the same symbolValue */
+
+/* FSE_buildCTable_wksp() :
+ * Same as FSE_buildCTable(), but using an externally allocated scratch buffer (`workSpace`).
+ * `wkspSize` must be >= `FSE_BUILD_CTABLE_WORKSPACE_SIZE_U32(maxSymbolValue, tableLog)` of `unsigned`.
+ * See FSE_buildCTable_wksp() for breakdown of workspace usage.
+ */
+#define FSE_BUILD_CTABLE_WORKSPACE_SIZE_U32(maxSymbolValue, tableLog) (((maxSymbolValue + 2) + (1ull << (tableLog)))/2 + sizeof(U64)/sizeof(U32) /* additional 8 bytes for potential table overwrite */)
+#define FSE_BUILD_CTABLE_WORKSPACE_SIZE(maxSymbolValue, tableLog) (sizeof(unsigned) * FSE_BUILD_CTABLE_WORKSPACE_SIZE_U32(maxSymbolValue, tableLog))
+size_t FSE_buildCTable_wksp(FSE_CTable* ct, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize);
+
+#define FSE_BUILD_DTABLE_WKSP_SIZE(maxTableLog, maxSymbolValue) (sizeof(short) * (maxSymbolValue + 1) + (1ULL << maxTableLog) + 8)
+#define FSE_BUILD_DTABLE_WKSP_SIZE_U32(maxTableLog, maxSymbolValue) ((FSE_BUILD_DTABLE_WKSP_SIZE(maxTableLog, maxSymbolValue) + sizeof(unsigned) - 1) / sizeof(unsigned))
+FSE_PUBLIC_API size_t FSE_buildDTable_wksp(FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize);
+/*< Same as FSE_buildDTable(), using an externally allocated `workspace` produced with `FSE_BUILD_DTABLE_WKSP_SIZE_U32(maxSymbolValue)` */
+
+size_t FSE_buildDTable_raw (FSE_DTable* dt, unsigned nbBits);
+/*< build a fake FSE_DTable, designed to read a flat distribution where each symbol uses nbBits */
+
+size_t FSE_buildDTable_rle (FSE_DTable* dt, unsigned char symbolValue);
+/*< build a fake FSE_DTable, designed to always generate the same symbolValue */
+
+#define FSE_DECOMPRESS_WKSP_SIZE_U32(maxTableLog, maxSymbolValue) (FSE_DTABLE_SIZE_U32(maxTableLog) + FSE_BUILD_DTABLE_WKSP_SIZE_U32(maxTableLog, maxSymbolValue) + (FSE_MAX_SYMBOL_VALUE + 1) / 2 + 1)
+#define FSE_DECOMPRESS_WKSP_SIZE(maxTableLog, maxSymbolValue) (FSE_DECOMPRESS_WKSP_SIZE_U32(maxTableLog, maxSymbolValue) * sizeof(unsigned))
+size_t FSE_decompress_wksp(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, unsigned maxLog, void* workSpace, size_t wkspSize);
+/*< same as FSE_decompress(), using an externally allocated `workSpace` produced with `FSE_DECOMPRESS_WKSP_SIZE_U32(maxLog, maxSymbolValue)` */
+
+size_t FSE_decompress_wksp_bmi2(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, unsigned maxLog, void* workSpace, size_t wkspSize, int bmi2);
+/*< Same as FSE_decompress_wksp() but with dynamic BMI2 support. Pass 1 if your CPU supports BMI2 or 0 if it doesn't. */
+
+typedef enum {
+ FSE_repeat_none, /*< Cannot use the previous table */
+ FSE_repeat_check, /*< Can use the previous table but it must be checked */
+ FSE_repeat_valid /*< Can use the previous table and it is assumed to be valid */
+ } FSE_repeat;
+
+/* *****************************************
+* FSE symbol compression API
+*******************************************/
+/*!
+ This API consists of small unitary functions, which highly benefit from being inlined.
+ Hence their body are included in next section.
+*/
+typedef struct {
+ ptrdiff_t value;
+ const void* stateTable;
+ const void* symbolTT;
+ unsigned stateLog;
+} FSE_CState_t;
+
+static void FSE_initCState(FSE_CState_t* CStatePtr, const FSE_CTable* ct);
+
+static void FSE_encodeSymbol(BIT_CStream_t* bitC, FSE_CState_t* CStatePtr, unsigned symbol);
+
+static void FSE_flushCState(BIT_CStream_t* bitC, const FSE_CState_t* CStatePtr);
+
+/*<
+These functions are inner components of FSE_compress_usingCTable().
+They allow the creation of custom streams, mixing multiple tables and bit sources.
+
+A key property to keep in mind is that encoding and decoding are done **in reverse direction**.
+So the first symbol you will encode is the last you will decode, like a LIFO stack.
+
+You will need a few variables to track your CStream. They are :
+
+FSE_CTable ct; // Provided by FSE_buildCTable()
+BIT_CStream_t bitStream; // bitStream tracking structure
+FSE_CState_t state; // State tracking structure (can have several)
+
+
+The first thing to do is to init bitStream and state.
+ size_t errorCode = BIT_initCStream(&bitStream, dstBuffer, maxDstSize);
+ FSE_initCState(&state, ct);
+
+Note that BIT_initCStream() can produce an error code, so its result should be tested, using FSE_isError();
+You can then encode your input data, byte after byte.
+FSE_encodeSymbol() outputs a maximum of 'tableLog' bits at a time.
+Remember decoding will be done in reverse direction.
+ FSE_encodeByte(&bitStream, &state, symbol);
+
+At any time, you can also add any bit sequence.
+Note : maximum allowed nbBits is 25, for compatibility with 32-bits decoders
+ BIT_addBits(&bitStream, bitField, nbBits);
+
+The above methods don't commit data to memory, they just store it into local register, for speed.
+Local register size is 64-bits on 64-bits systems, 32-bits on 32-bits systems (size_t).
+Writing data to memory is a manual operation, performed by the flushBits function.
+ BIT_flushBits(&bitStream);
+
+Your last FSE encoding operation shall be to flush your last state value(s).
+ FSE_flushState(&bitStream, &state);
+
+Finally, you must close the bitStream.
+The function returns the size of CStream in bytes.
+If data couldn't fit into dstBuffer, it will return a 0 ( == not compressible)
+If there is an error, it returns an errorCode (which can be tested using FSE_isError()).
+ size_t size = BIT_closeCStream(&bitStream);
+*/
+
+
+/* *****************************************
+* FSE symbol decompression API
+*******************************************/
+typedef struct {
+ size_t state;
+ const void* table; /* precise table may vary, depending on U16 */
+} FSE_DState_t;
+
+
+static void FSE_initDState(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD, const FSE_DTable* dt);
+
+static unsigned char FSE_decodeSymbol(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD);
+
+static unsigned FSE_endOfDState(const FSE_DState_t* DStatePtr);
+
+/*<
+Let's now decompose FSE_decompress_usingDTable() into its unitary components.
+You will decode FSE-encoded symbols from the bitStream,
+and also any other bitFields you put in, **in reverse order**.
+
+You will need a few variables to track your bitStream. They are :
+
+BIT_DStream_t DStream; // Stream context
+FSE_DState_t DState; // State context. Multiple ones are possible
+FSE_DTable* DTablePtr; // Decoding table, provided by FSE_buildDTable()
+
+The first thing to do is to init the bitStream.
+ errorCode = BIT_initDStream(&DStream, srcBuffer, srcSize);
+
+You should then retrieve your initial state(s)
+(in reverse flushing order if you have several ones) :
+ errorCode = FSE_initDState(&DState, &DStream, DTablePtr);
+
+You can then decode your data, symbol after symbol.
+For information the maximum number of bits read by FSE_decodeSymbol() is 'tableLog'.
+Keep in mind that symbols are decoded in reverse order, like a LIFO stack (last in, first out).
+ unsigned char symbol = FSE_decodeSymbol(&DState, &DStream);
+
+You can retrieve any bitfield you eventually stored into the bitStream (in reverse order)
+Note : maximum allowed nbBits is 25, for 32-bits compatibility
+ size_t bitField = BIT_readBits(&DStream, nbBits);
+
+All above operations only read from local register (which size depends on size_t).
+Refueling the register from memory is manually performed by the reload method.
+ endSignal = FSE_reloadDStream(&DStream);
+
+BIT_reloadDStream() result tells if there is still some more data to read from DStream.
+BIT_DStream_unfinished : there is still some data left into the DStream.
+BIT_DStream_endOfBuffer : Dstream reached end of buffer. Its container may no longer be completely filled.
+BIT_DStream_completed : Dstream reached its exact end, corresponding in general to decompression completed.
+BIT_DStream_tooFar : Dstream went too far. Decompression result is corrupted.
+
+When reaching end of buffer (BIT_DStream_endOfBuffer), progress slowly, notably if you decode multiple symbols per loop,
+to properly detect the exact end of stream.
+After each decoded symbol, check if DStream is fully consumed using this simple test :
+ BIT_reloadDStream(&DStream) >= BIT_DStream_completed
+
+When it's done, verify decompression is fully completed, by checking both DStream and the relevant states.
+Checking if DStream has reached its end is performed by :
+ BIT_endOfDStream(&DStream);
+Check also the states. There might be some symbols left there, if some high probability ones (>50%) are possible.
+ FSE_endOfDState(&DState);
+*/
+
+
+/* *****************************************
+* FSE unsafe API
+*******************************************/
+static unsigned char FSE_decodeSymbolFast(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD);
+/* faster, but works only if nbBits is always >= 1 (otherwise, result will be corrupted) */
+
+
+/* *****************************************
+* Implementation of inlined functions
+*******************************************/
+typedef struct {
+ int deltaFindState;
+ U32 deltaNbBits;
+} FSE_symbolCompressionTransform; /* total 8 bytes */
+
+MEM_STATIC void FSE_initCState(FSE_CState_t* statePtr, const FSE_CTable* ct)
+{
+ const void* ptr = ct;
+ const U16* u16ptr = (const U16*) ptr;
+ const U32 tableLog = MEM_read16(ptr);
+ statePtr->value = (ptrdiff_t)1<<tableLog;
+ statePtr->stateTable = u16ptr+2;
+ statePtr->symbolTT = ct + 1 + (tableLog ? (1<<(tableLog-1)) : 1);
+ statePtr->stateLog = tableLog;
+}
+
+
+/*! FSE_initCState2() :
+* Same as FSE_initCState(), but the first symbol to include (which will be the last to be read)
+* uses the smallest state value possible, saving the cost of this symbol */
+MEM_STATIC void FSE_initCState2(FSE_CState_t* statePtr, const FSE_CTable* ct, U32 symbol)
+{
+ FSE_initCState(statePtr, ct);
+ { const FSE_symbolCompressionTransform symbolTT = ((const FSE_symbolCompressionTransform*)(statePtr->symbolTT))[symbol];
+ const U16* stateTable = (const U16*)(statePtr->stateTable);
+ U32 nbBitsOut = (U32)((symbolTT.deltaNbBits + (1<<15)) >> 16);
+ statePtr->value = (nbBitsOut << 16) - symbolTT.deltaNbBits;
+ statePtr->value = stateTable[(statePtr->value >> nbBitsOut) + symbolTT.deltaFindState];
+ }
+}
+
+MEM_STATIC void FSE_encodeSymbol(BIT_CStream_t* bitC, FSE_CState_t* statePtr, unsigned symbol)
+{
+ FSE_symbolCompressionTransform const symbolTT = ((const FSE_symbolCompressionTransform*)(statePtr->symbolTT))[symbol];
+ const U16* const stateTable = (const U16*)(statePtr->stateTable);
+ U32 const nbBitsOut = (U32)((statePtr->value + symbolTT.deltaNbBits) >> 16);
+ BIT_addBits(bitC, statePtr->value, nbBitsOut);
+ statePtr->value = stateTable[ (statePtr->value >> nbBitsOut) + symbolTT.deltaFindState];
+}
+
+MEM_STATIC void FSE_flushCState(BIT_CStream_t* bitC, const FSE_CState_t* statePtr)
+{
+ BIT_addBits(bitC, statePtr->value, statePtr->stateLog);
+ BIT_flushBits(bitC);
+}
+
+
+/* FSE_getMaxNbBits() :
+ * Approximate maximum cost of a symbol, in bits.
+ * Fractional get rounded up (i.e : a symbol with a normalized frequency of 3 gives the same result as a frequency of 2)
+ * note 1 : assume symbolValue is valid (<= maxSymbolValue)
+ * note 2 : if freq[symbolValue]==0, @return a fake cost of tableLog+1 bits */
+MEM_STATIC U32 FSE_getMaxNbBits(const void* symbolTTPtr, U32 symbolValue)
+{
+ const FSE_symbolCompressionTransform* symbolTT = (const FSE_symbolCompressionTransform*) symbolTTPtr;
+ return (symbolTT[symbolValue].deltaNbBits + ((1<<16)-1)) >> 16;
+}
+
+/* FSE_bitCost() :
+ * Approximate symbol cost, as fractional value, using fixed-point format (accuracyLog fractional bits)
+ * note 1 : assume symbolValue is valid (<= maxSymbolValue)
+ * note 2 : if freq[symbolValue]==0, @return a fake cost of tableLog+1 bits */
+MEM_STATIC U32 FSE_bitCost(const void* symbolTTPtr, U32 tableLog, U32 symbolValue, U32 accuracyLog)
+{
+ const FSE_symbolCompressionTransform* symbolTT = (const FSE_symbolCompressionTransform*) symbolTTPtr;
+ U32 const minNbBits = symbolTT[symbolValue].deltaNbBits >> 16;
+ U32 const threshold = (minNbBits+1) << 16;
+ assert(tableLog < 16);
+ assert(accuracyLog < 31-tableLog); /* ensure enough room for renormalization double shift */
+ { U32 const tableSize = 1 << tableLog;
+ U32 const deltaFromThreshold = threshold - (symbolTT[symbolValue].deltaNbBits + tableSize);
+ U32 const normalizedDeltaFromThreshold = (deltaFromThreshold << accuracyLog) >> tableLog; /* linear interpolation (very approximate) */
+ U32 const bitMultiplier = 1 << accuracyLog;
+ assert(symbolTT[symbolValue].deltaNbBits + tableSize <= threshold);
+ assert(normalizedDeltaFromThreshold <= bitMultiplier);
+ return (minNbBits+1)*bitMultiplier - normalizedDeltaFromThreshold;
+ }
+}
+
+
+/* ====== Decompression ====== */
+
+typedef struct {
+ U16 tableLog;
+ U16 fastMode;
+} FSE_DTableHeader; /* sizeof U32 */
+
+typedef struct
+{
+ unsigned short newState;
+ unsigned char symbol;
+ unsigned char nbBits;
+} FSE_decode_t; /* size == U32 */
+
+MEM_STATIC void FSE_initDState(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD, const FSE_DTable* dt)
+{
+ const void* ptr = dt;
+ const FSE_DTableHeader* const DTableH = (const FSE_DTableHeader*)ptr;
+ DStatePtr->state = BIT_readBits(bitD, DTableH->tableLog);
+ BIT_reloadDStream(bitD);
+ DStatePtr->table = dt + 1;
+}
+
+MEM_STATIC BYTE FSE_peekSymbol(const FSE_DState_t* DStatePtr)
+{
+ FSE_decode_t const DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state];
+ return DInfo.symbol;
+}
+
+MEM_STATIC void FSE_updateState(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD)
+{
+ FSE_decode_t const DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state];
+ U32 const nbBits = DInfo.nbBits;
+ size_t const lowBits = BIT_readBits(bitD, nbBits);
+ DStatePtr->state = DInfo.newState + lowBits;
+}
+
+MEM_STATIC BYTE FSE_decodeSymbol(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD)
+{
+ FSE_decode_t const DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state];
+ U32 const nbBits = DInfo.nbBits;
+ BYTE const symbol = DInfo.symbol;
+ size_t const lowBits = BIT_readBits(bitD, nbBits);
+
+ DStatePtr->state = DInfo.newState + lowBits;
+ return symbol;
+}
+
+/*! FSE_decodeSymbolFast() :
+ unsafe, only works if no symbol has a probability > 50% */
+MEM_STATIC BYTE FSE_decodeSymbolFast(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD)
+{
+ FSE_decode_t const DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state];
+ U32 const nbBits = DInfo.nbBits;
+ BYTE const symbol = DInfo.symbol;
+ size_t const lowBits = BIT_readBitsFast(bitD, nbBits);
+
+ DStatePtr->state = DInfo.newState + lowBits;
+ return symbol;
+}
+
+MEM_STATIC unsigned FSE_endOfDState(const FSE_DState_t* DStatePtr)
+{
+ return DStatePtr->state == 0;
+}
+
+
+
+#ifndef FSE_COMMONDEFS_ONLY
+
+/* **************************************************************
+* Tuning parameters
+****************************************************************/
+/*!MEMORY_USAGE :
+* Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.)
+* Increasing memory usage improves compression ratio
+* Reduced memory usage can improve speed, due to cache effect
+* Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */
+#ifndef FSE_MAX_MEMORY_USAGE
+# define FSE_MAX_MEMORY_USAGE 14
+#endif
+#ifndef FSE_DEFAULT_MEMORY_USAGE
+# define FSE_DEFAULT_MEMORY_USAGE 13
+#endif
+#if (FSE_DEFAULT_MEMORY_USAGE > FSE_MAX_MEMORY_USAGE)
+# error "FSE_DEFAULT_MEMORY_USAGE must be <= FSE_MAX_MEMORY_USAGE"
+#endif
+
+/*!FSE_MAX_SYMBOL_VALUE :
+* Maximum symbol value authorized.
+* Required for proper stack allocation */
+#ifndef FSE_MAX_SYMBOL_VALUE
+# define FSE_MAX_SYMBOL_VALUE 255
+#endif
+
+/* **************************************************************
+* template functions type & suffix
+****************************************************************/
+#define FSE_FUNCTION_TYPE BYTE
+#define FSE_FUNCTION_EXTENSION
+#define FSE_DECODE_TYPE FSE_decode_t
+
+
+#endif /* !FSE_COMMONDEFS_ONLY */
+
+
+/* ***************************************************************
+* Constants
+*****************************************************************/
+#define FSE_MAX_TABLELOG (FSE_MAX_MEMORY_USAGE-2)
+#define FSE_MAX_TABLESIZE (1U<<FSE_MAX_TABLELOG)
+#define FSE_MAXTABLESIZE_MASK (FSE_MAX_TABLESIZE-1)
+#define FSE_DEFAULT_TABLELOG (FSE_DEFAULT_MEMORY_USAGE-2)
+#define FSE_MIN_TABLELOG 5
+
+#define FSE_TABLELOG_ABSOLUTE_MAX 15
+#if FSE_MAX_TABLELOG > FSE_TABLELOG_ABSOLUTE_MAX
+# error "FSE_MAX_TABLELOG > FSE_TABLELOG_ABSOLUTE_MAX is not supported"
+#endif
+
+#define FSE_TABLESTEP(tableSize) (((tableSize)>>1) + ((tableSize)>>3) + 3)
+
+
+#endif /* FSE_STATIC_LINKING_ONLY */
+
+
diff --git a/lib/zstd/common/fse_decompress.c b/lib/zstd/common/fse_decompress.c
new file mode 100644
index 000000000000..8dcb8ca39767
--- /dev/null
+++ b/lib/zstd/common/fse_decompress.c
@@ -0,0 +1,390 @@
+/* ******************************************************************
+ * FSE : Finite State Entropy decoder
+ * Copyright (c) Yann Collet, Facebook, Inc.
+ *
+ * You can contact the author at :
+ * - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy
+ * - Public forum : https://groups.google.com/forum/#!forum/lz4c
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+****************************************************************** */
+
+
+/* **************************************************************
+* Includes
+****************************************************************/
+#include "debug.h" /* assert */
+#include "bitstream.h"
+#include "compiler.h"
+#define FSE_STATIC_LINKING_ONLY
+#include "fse.h"
+#include "error_private.h"
+#define ZSTD_DEPS_NEED_MALLOC
+#include "zstd_deps.h"
+
+
+/* **************************************************************
+* Error Management
+****************************************************************/
+#define FSE_isError ERR_isError
+#define FSE_STATIC_ASSERT(c) DEBUG_STATIC_ASSERT(c) /* use only *after* variable declarations */
+
+
+/* **************************************************************
+* Templates
+****************************************************************/
+/*
+ designed to be included
+ for type-specific functions (template emulation in C)
+ Objective is to write these functions only once, for improved maintenance
+*/
+
+/* safety checks */
+#ifndef FSE_FUNCTION_EXTENSION
+# error "FSE_FUNCTION_EXTENSION must be defined"
+#endif
+#ifndef FSE_FUNCTION_TYPE
+# error "FSE_FUNCTION_TYPE must be defined"
+#endif
+
+/* Function names */
+#define FSE_CAT(X,Y) X##Y
+#define FSE_FUNCTION_NAME(X,Y) FSE_CAT(X,Y)
+#define FSE_TYPE_NAME(X,Y) FSE_CAT(X,Y)
+
+
+/* Function templates */
+FSE_DTable* FSE_createDTable (unsigned tableLog)
+{
+ if (tableLog > FSE_TABLELOG_ABSOLUTE_MAX) tableLog = FSE_TABLELOG_ABSOLUTE_MAX;
+ return (FSE_DTable*)ZSTD_malloc( FSE_DTABLE_SIZE_U32(tableLog) * sizeof (U32) );
+}
+
+void FSE_freeDTable (FSE_DTable* dt)
+{
+ ZSTD_free(dt);
+}
+
+static size_t FSE_buildDTable_internal(FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize)
+{
+ void* const tdPtr = dt+1; /* because *dt is unsigned, 32-bits aligned on 32-bits */
+ FSE_DECODE_TYPE* const tableDecode = (FSE_DECODE_TYPE*) (tdPtr);
+ U16* symbolNext = (U16*)workSpace;
+ BYTE* spread = (BYTE*)(symbolNext + maxSymbolValue + 1);
+
+ U32 const maxSV1 = maxSymbolValue + 1;
+ U32 const tableSize = 1 << tableLog;
+ U32 highThreshold = tableSize-1;
+
+ /* Sanity Checks */
+ if (FSE_BUILD_DTABLE_WKSP_SIZE(tableLog, maxSymbolValue) > wkspSize) return ERROR(maxSymbolValue_tooLarge);
+ if (maxSymbolValue > FSE_MAX_SYMBOL_VALUE) return ERROR(maxSymbolValue_tooLarge);
+ if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge);
+
+ /* Init, lay down lowprob symbols */
+ { FSE_DTableHeader DTableH;
+ DTableH.tableLog = (U16)tableLog;
+ DTableH.fastMode = 1;
+ { S16 const largeLimit= (S16)(1 << (tableLog-1));
+ U32 s;
+ for (s=0; s<maxSV1; s++) {
+ if (normalizedCounter[s]==-1) {
+ tableDecode[highThreshold--].symbol = (FSE_FUNCTION_TYPE)s;
+ symbolNext[s] = 1;
+ } else {
+ if (normalizedCounter[s] >= largeLimit) DTableH.fastMode=0;
+ symbolNext[s] = normalizedCounter[s];
+ } } }
+ ZSTD_memcpy(dt, &DTableH, sizeof(DTableH));
+ }
+
+ /* Spread symbols */
+ if (highThreshold == tableSize - 1) {
+ size_t const tableMask = tableSize-1;
+ size_t const step = FSE_TABLESTEP(tableSize);
+ /* First lay down the symbols in order.
+ * We use a uint64_t to lay down 8 bytes at a time. This reduces branch
+ * misses since small blocks generally have small table logs, so nearly
+ * all symbols have counts <= 8. We ensure we have 8 bytes at the end of
+ * our buffer to handle the over-write.
+ */
+ {
+ U64 const add = 0x0101010101010101ull;
+ size_t pos = 0;
+ U64 sv = 0;
+ U32 s;
+ for (s=0; s<maxSV1; ++s, sv += add) {
+ int i;
+ int const n = normalizedCounter[s];
+ MEM_write64(spread + pos, sv);
+ for (i = 8; i < n; i += 8) {
+ MEM_write64(spread + pos + i, sv);
+ }
+ pos += n;
+ }
+ }
+ /* Now we spread those positions across the table.
+ * The benefit of doing it in two stages is that we avoid the the
+ * variable size inner loop, which caused lots of branch misses.
+ * Now we can run through all the positions without any branch misses.
+ * We unroll the loop twice, since that is what emperically worked best.
+ */
+ {
+ size_t position = 0;
+ size_t s;
+ size_t const unroll = 2;
+ assert(tableSize % unroll == 0); /* FSE_MIN_TABLELOG is 5 */
+ for (s = 0; s < (size_t)tableSize; s += unroll) {
+ size_t u;
+ for (u = 0; u < unroll; ++u) {
+ size_t const uPosition = (position + (u * step)) & tableMask;
+ tableDecode[uPosition].symbol = spread[s + u];
+ }
+ position = (position + (unroll * step)) & tableMask;
+ }
+ assert(position == 0);
+ }
+ } else {
+ U32 const tableMask = tableSize-1;
+ U32 const step = FSE_TABLESTEP(tableSize);
+ U32 s, position = 0;
+ for (s=0; s<maxSV1; s++) {
+ int i;
+ for (i=0; i<normalizedCounter[s]; i++) {
+ tableDecode[position].symbol = (FSE_FUNCTION_TYPE)s;
+ position = (position + step) & tableMask;
+ while (position > highThreshold) position = (position + step) & tableMask; /* lowprob area */
+ } }
+ if (position!=0) return ERROR(GENERIC); /* position must reach all cells once, otherwise normalizedCounter is incorrect */
+ }
+
+ /* Build Decoding table */
+ { U32 u;
+ for (u=0; u<tableSize; u++) {
+ FSE_FUNCTION_TYPE const symbol = (FSE_FUNCTION_TYPE)(tableDecode[u].symbol);
+ U32 const nextState = symbolNext[symbol]++;
+ tableDecode[u].nbBits = (BYTE) (tableLog - BIT_highbit32(nextState) );
+ tableDecode[u].newState = (U16) ( (nextState << tableDecode[u].nbBits) - tableSize);
+ } }
+
+ return 0;
+}
+
+size_t FSE_buildDTable_wksp(FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize)
+{
+ return FSE_buildDTable_internal(dt, normalizedCounter, maxSymbolValue, tableLog, workSpace, wkspSize);
+}
+
+
+#ifndef FSE_COMMONDEFS_ONLY
+
+/*-*******************************************************
+* Decompression (Byte symbols)
+*********************************************************/
+size_t FSE_buildDTable_rle (FSE_DTable* dt, BYTE symbolValue)
+{
+ void* ptr = dt;
+ FSE_DTableHeader* const DTableH = (FSE_DTableHeader*)ptr;
+ void* dPtr = dt + 1;
+ FSE_decode_t* const cell = (FSE_decode_t*)dPtr;
+
+ DTableH->tableLog = 0;
+ DTableH->fastMode = 0;
+
+ cell->newState = 0;
+ cell->symbol = symbolValue;
+ cell->nbBits = 0;
+
+ return 0;
+}
+
+
+size_t FSE_buildDTable_raw (FSE_DTable* dt, unsigned nbBits)
+{
+ void* ptr = dt;
+ FSE_DTableHeader* const DTableH = (FSE_DTableHeader*)ptr;
+ void* dPtr = dt + 1;
+ FSE_decode_t* const dinfo = (FSE_decode_t*)dPtr;
+ const unsigned tableSize = 1 << nbBits;
+ const unsigned tableMask = tableSize - 1;
+ const unsigned maxSV1 = tableMask+1;
+ unsigned s;
+
+ /* Sanity checks */
+ if (nbBits < 1) return ERROR(GENERIC); /* min size */
+
+ /* Build Decoding Table */
+ DTableH->tableLog = (U16)nbBits;
+ DTableH->fastMode = 1;
+ for (s=0; s<maxSV1; s++) {
+ dinfo[s].newState = 0;
+ dinfo[s].symbol = (BYTE)s;
+ dinfo[s].nbBits = (BYTE)nbBits;
+ }
+
+ return 0;
+}
+
+FORCE_INLINE_TEMPLATE size_t FSE_decompress_usingDTable_generic(
+ void* dst, size_t maxDstSize,
+ const void* cSrc, size_t cSrcSize,
+ const FSE_DTable* dt, const unsigned fast)
+{
+ BYTE* const ostart = (BYTE*) dst;
+ BYTE* op = ostart;
+ BYTE* const omax = op + maxDstSize;
+ BYTE* const olimit = omax-3;
+
+ BIT_DStream_t bitD;
+ FSE_DState_t state1;
+ FSE_DState_t state2;
+
+ /* Init */
+ CHECK_F(BIT_initDStream(&bitD, cSrc, cSrcSize));
+
+ FSE_initDState(&state1, &bitD, dt);
+ FSE_initDState(&state2, &bitD, dt);
+
+#define FSE_GETSYMBOL(statePtr) fast ? FSE_decodeSymbolFast(statePtr, &bitD) : FSE_decodeSymbol(statePtr, &bitD)
+
+ /* 4 symbols per loop */
+ for ( ; (BIT_reloadDStream(&bitD)==BIT_DStream_unfinished) & (op<olimit) ; op+=4) {
+ op[0] = FSE_GETSYMBOL(&state1);
+
+ if (FSE_MAX_TABLELOG*2+7 > sizeof(bitD.bitContainer)*8) /* This test must be static */
+ BIT_reloadDStream(&bitD);
+
+ op[1] = FSE_GETSYMBOL(&state2);
+
+ if (FSE_MAX_TABLELOG*4+7 > sizeof(bitD.bitContainer)*8) /* This test must be static */
+ { if (BIT_reloadDStream(&bitD) > BIT_DStream_unfinished) { op+=2; break; } }
+
+ op[2] = FSE_GETSYMBOL(&state1);
+
+ if (FSE_MAX_TABLELOG*2+7 > sizeof(bitD.bitContainer)*8) /* This test must be static */
+ BIT_reloadDStream(&bitD);
+
+ op[3] = FSE_GETSYMBOL(&state2);
+ }
+
+ /* tail */
+ /* note : BIT_reloadDStream(&bitD) >= FSE_DStream_partiallyFilled; Ends at exactly BIT_DStream_completed */
+ while (1) {
+ if (op>(omax-2)) return ERROR(dstSize_tooSmall);
+ *op++ = FSE_GETSYMBOL(&state1);
+ if (BIT_reloadDStream(&bitD)==BIT_DStream_overflow) {
+ *op++ = FSE_GETSYMBOL(&state2);
+ break;
+ }
+
+ if (op>(omax-2)) return ERROR(dstSize_tooSmall);
+ *op++ = FSE_GETSYMBOL(&state2);
+ if (BIT_reloadDStream(&bitD)==BIT_DStream_overflow) {
+ *op++ = FSE_GETSYMBOL(&state1);
+ break;
+ } }
+
+ return op-ostart;
+}
+
+
+size_t FSE_decompress_usingDTable(void* dst, size_t originalSize,
+ const void* cSrc, size_t cSrcSize,
+ const FSE_DTable* dt)
+{
+ const void* ptr = dt;
+ const FSE_DTableHeader* DTableH = (const FSE_DTableHeader*)ptr;
+ const U32 fastMode = DTableH->fastMode;
+
+ /* select fast mode (static) */
+ if (fastMode) return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 1);
+ return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 0);
+}
+
+
+size_t FSE_decompress_wksp(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, unsigned maxLog, void* workSpace, size_t wkspSize)
+{
+ return FSE_decompress_wksp_bmi2(dst, dstCapacity, cSrc, cSrcSize, maxLog, workSpace, wkspSize, /* bmi2 */ 0);
+}
+
+typedef struct {
+ short ncount[FSE_MAX_SYMBOL_VALUE + 1];
+ FSE_DTable dtable[]; /* Dynamically sized */
+} FSE_DecompressWksp;
+
+
+FORCE_INLINE_TEMPLATE size_t FSE_decompress_wksp_body(
+ void* dst, size_t dstCapacity,
+ const void* cSrc, size_t cSrcSize,
+ unsigned maxLog, void* workSpace, size_t wkspSize,
+ int bmi2)
+{
+ const BYTE* const istart = (const BYTE*)cSrc;
+ const BYTE* ip = istart;
+ unsigned tableLog;
+ unsigned maxSymbolValue = FSE_MAX_SYMBOL_VALUE;
+ FSE_DecompressWksp* const wksp = (FSE_DecompressWksp*)workSpace;
+
+ DEBUG_STATIC_ASSERT((FSE_MAX_SYMBOL_VALUE + 1) % 2 == 0);
+ if (wkspSize < sizeof(*wksp)) return ERROR(GENERIC);
+
+ /* normal FSE decoding mode */
+ {
+ size_t const NCountLength = FSE_readNCount_bmi2(wksp->ncount, &maxSymbolValue, &tableLog, istart, cSrcSize, bmi2);
+ if (FSE_isError(NCountLength)) return NCountLength;
+ if (tableLog > maxLog) return ERROR(tableLog_tooLarge);
+ assert(NCountLength <= cSrcSize);
+ ip += NCountLength;
+ cSrcSize -= NCountLength;
+ }
+
+ if (FSE_DECOMPRESS_WKSP_SIZE(tableLog, maxSymbolValue) > wkspSize) return ERROR(tableLog_tooLarge);
+ workSpace = wksp->dtable + FSE_DTABLE_SIZE_U32(tableLog);
+ wkspSize -= sizeof(*wksp) + FSE_DTABLE_SIZE(tableLog);
+
+ CHECK_F( FSE_buildDTable_internal(wksp->dtable, wksp->ncount, maxSymbolValue, tableLog, workSpace, wkspSize) );
+
+ {
+ const void* ptr = wksp->dtable;
+ const FSE_DTableHeader* DTableH = (const FSE_DTableHeader*)ptr;
+ const U32 fastMode = DTableH->fastMode;
+
+ /* select fast mode (static) */
+ if (fastMode) return FSE_decompress_usingDTable_generic(dst, dstCapacity, ip, cSrcSize, wksp->dtable, 1);
+ return FSE_decompress_usingDTable_generic(dst, dstCapacity, ip, cSrcSize, wksp->dtable, 0);
+ }
+}
+
+/* Avoids the FORCE_INLINE of the _body() function. */
+static size_t FSE_decompress_wksp_body_default(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, unsigned maxLog, void* workSpace, size_t wkspSize)
+{
+ return FSE_decompress_wksp_body(dst, dstCapacity, cSrc, cSrcSize, maxLog, workSpace, wkspSize, 0);
+}
+
+#if DYNAMIC_BMI2
+BMI2_TARGET_ATTRIBUTE static size_t FSE_decompress_wksp_body_bmi2(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, unsigned maxLog, void* workSpace, size_t wkspSize)
+{
+ return FSE_decompress_wksp_body(dst, dstCapacity, cSrc, cSrcSize, maxLog, workSpace, wkspSize, 1);
+}
+#endif
+
+size_t FSE_decompress_wksp_bmi2(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, unsigned maxLog, void* workSpace, size_t wkspSize, int bmi2)
+{
+#if DYNAMIC_BMI2
+ if (bmi2) {
+ return FSE_decompress_wksp_body_bmi2(dst, dstCapacity, cSrc, cSrcSize, maxLog, workSpace, wkspSize);
+ }
+#endif
+ (void)bmi2;
+ return FSE_decompress_wksp_body_default(dst, dstCapacity, cSrc, cSrcSize, maxLog, workSpace, wkspSize);
+}
+
+
+typedef FSE_DTable DTable_max_t[FSE_DTABLE_SIZE_U32(FSE_MAX_TABLELOG)];
+
+
+
+#endif /* FSE_COMMONDEFS_ONLY */
diff --git a/lib/zstd/common/huf.h b/lib/zstd/common/huf.h
new file mode 100644
index 000000000000..5042ff870308
--- /dev/null
+++ b/lib/zstd/common/huf.h
@@ -0,0 +1,358 @@
+/* ******************************************************************
+ * huff0 huffman codec,
+ * part of Finite State Entropy library
+ * Copyright (c) Yann Collet, Facebook, Inc.
+ *
+ * You can contact the author at :
+ * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+****************************************************************** */
+
+
+#ifndef HUF_H_298734234
+#define HUF_H_298734234
+
+/* *** Dependencies *** */
+#include "zstd_deps.h" /* size_t */
+
+
+/* *** library symbols visibility *** */
+/* Note : when linking with -fvisibility=hidden on gcc, or by default on Visual,
+ * HUF symbols remain "private" (internal symbols for library only).
+ * Set macro FSE_DLL_EXPORT to 1 if you want HUF symbols visible on DLL interface */
+#if defined(FSE_DLL_EXPORT) && (FSE_DLL_EXPORT==1) && defined(__GNUC__) && (__GNUC__ >= 4)
+# define HUF_PUBLIC_API __attribute__ ((visibility ("default")))
+#elif defined(FSE_DLL_EXPORT) && (FSE_DLL_EXPORT==1) /* Visual expected */
+# define HUF_PUBLIC_API __declspec(dllexport)
+#elif defined(FSE_DLL_IMPORT) && (FSE_DLL_IMPORT==1)
+# define HUF_PUBLIC_API __declspec(dllimport) /* not required, just to generate faster code (saves a function pointer load from IAT and an indirect jump) */
+#else
+# define HUF_PUBLIC_API
+#endif
+
+
+/* ========================== */
+/* *** simple functions *** */
+/* ========================== */
+
+/* HUF_compress() :
+ * Compress content from buffer 'src', of size 'srcSize', into buffer 'dst'.
+ * 'dst' buffer must be already allocated.
+ * Compression runs faster if `dstCapacity` >= HUF_compressBound(srcSize).
+ * `srcSize` must be <= `HUF_BLOCKSIZE_MAX` == 128 KB.
+ * @return : size of compressed data (<= `dstCapacity`).
+ * Special values : if return == 0, srcData is not compressible => Nothing is stored within dst !!!
+ * if HUF_isError(return), compression failed (more details using HUF_getErrorName())
+ */
+HUF_PUBLIC_API size_t HUF_compress(void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize);
+
+/* HUF_decompress() :
+ * Decompress HUF data from buffer 'cSrc', of size 'cSrcSize',
+ * into already allocated buffer 'dst', of minimum size 'dstSize'.
+ * `originalSize` : **must** be the ***exact*** size of original (uncompressed) data.
+ * Note : in contrast with FSE, HUF_decompress can regenerate
+ * RLE (cSrcSize==1) and uncompressed (cSrcSize==dstSize) data,
+ * because it knows size to regenerate (originalSize).
+ * @return : size of regenerated data (== originalSize),
+ * or an error code, which can be tested using HUF_isError()
+ */
+HUF_PUBLIC_API size_t HUF_decompress(void* dst, size_t originalSize,
+ const void* cSrc, size_t cSrcSize);
+
+
+/* *** Tool functions *** */
+#define HUF_BLOCKSIZE_MAX (128 * 1024) /*< maximum input size for a single block compressed with HUF_compress */
+HUF_PUBLIC_API size_t HUF_compressBound(size_t size); /*< maximum compressed size (worst case) */
+
+/* Error Management */
+HUF_PUBLIC_API unsigned HUF_isError(size_t code); /*< tells if a return value is an error code */
+HUF_PUBLIC_API const char* HUF_getErrorName(size_t code); /*< provides error code string (useful for debugging) */
+
+
+/* *** Advanced function *** */
+
+/* HUF_compress2() :
+ * Same as HUF_compress(), but offers control over `maxSymbolValue` and `tableLog`.
+ * `maxSymbolValue` must be <= HUF_SYMBOLVALUE_MAX .
+ * `tableLog` must be `<= HUF_TABLELOG_MAX` . */
+HUF_PUBLIC_API size_t HUF_compress2 (void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ unsigned maxSymbolValue, unsigned tableLog);
+
+/* HUF_compress4X_wksp() :
+ * Same as HUF_compress2(), but uses externally allocated `workSpace`.
+ * `workspace` must be at least as large as HUF_WORKSPACE_SIZE */
+#define HUF_WORKSPACE_SIZE ((8 << 10) + 512 /* sorting scratch space */)
+#define HUF_WORKSPACE_SIZE_U64 (HUF_WORKSPACE_SIZE / sizeof(U64))
+HUF_PUBLIC_API size_t HUF_compress4X_wksp (void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ unsigned maxSymbolValue, unsigned tableLog,
+ void* workSpace, size_t wkspSize);
+
+#endif /* HUF_H_298734234 */
+
+/* ******************************************************************
+ * WARNING !!
+ * The following section contains advanced and experimental definitions
+ * which shall never be used in the context of a dynamic library,
+ * because they are not guaranteed to remain stable in the future.
+ * Only consider them in association with static linking.
+ * *****************************************************************/
+#if !defined(HUF_H_HUF_STATIC_LINKING_ONLY)
+#define HUF_H_HUF_STATIC_LINKING_ONLY
+
+/* *** Dependencies *** */
+#include "mem.h" /* U32 */
+#define FSE_STATIC_LINKING_ONLY
+#include "fse.h"
+
+
+/* *** Constants *** */
+#define HUF_TABLELOG_MAX 12 /* max runtime value of tableLog (due to static allocation); can be modified up to HUF_TABLELOG_ABSOLUTEMAX */
+#define HUF_TABLELOG_DEFAULT 11 /* default tableLog value when none specified */
+#define HUF_SYMBOLVALUE_MAX 255
+
+#define HUF_TABLELOG_ABSOLUTEMAX 12 /* absolute limit of HUF_MAX_TABLELOG. Beyond that value, code does not work */
+#if (HUF_TABLELOG_MAX > HUF_TABLELOG_ABSOLUTEMAX)
+# error "HUF_TABLELOG_MAX is too large !"
+#endif
+
+
+/* ****************************************
+* Static allocation
+******************************************/
+/* HUF buffer bounds */
+#define HUF_CTABLEBOUND 129
+#define HUF_BLOCKBOUND(size) (size + (size>>8) + 8) /* only true when incompressible is pre-filtered with fast heuristic */
+#define HUF_COMPRESSBOUND(size) (HUF_CTABLEBOUND + HUF_BLOCKBOUND(size)) /* Macro version, useful for static allocation */
+
+/* static allocation of HUF's Compression Table */
+/* this is a private definition, just exposed for allocation and strict aliasing purpose. never EVER access its members directly */
+typedef size_t HUF_CElt; /* consider it an incomplete type */
+#define HUF_CTABLE_SIZE_ST(maxSymbolValue) ((maxSymbolValue)+2) /* Use tables of size_t, for proper alignment */
+#define HUF_CTABLE_SIZE(maxSymbolValue) (HUF_CTABLE_SIZE_ST(maxSymbolValue) * sizeof(size_t))
+#define HUF_CREATE_STATIC_CTABLE(name, maxSymbolValue) \
+ HUF_CElt name[HUF_CTABLE_SIZE_ST(maxSymbolValue)] /* no final ; */
+
+/* static allocation of HUF's DTable */
+typedef U32 HUF_DTable;
+#define HUF_DTABLE_SIZE(maxTableLog) (1 + (1<<(maxTableLog)))
+#define HUF_CREATE_STATIC_DTABLEX1(DTable, maxTableLog) \
+ HUF_DTable DTable[HUF_DTABLE_SIZE((maxTableLog)-1)] = { ((U32)((maxTableLog)-1) * 0x01000001) }
+#define HUF_CREATE_STATIC_DTABLEX2(DTable, maxTableLog) \
+ HUF_DTable DTable[HUF_DTABLE_SIZE(maxTableLog)] = { ((U32)(maxTableLog) * 0x01000001) }
+
+
+/* ****************************************
+* Advanced decompression functions
+******************************************/
+size_t HUF_decompress4X1 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /*< single-symbol decoder */
+#ifndef HUF_FORCE_DECOMPRESS_X1
+size_t HUF_decompress4X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /*< double-symbols decoder */
+#endif
+
+size_t HUF_decompress4X_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /*< decodes RLE and uncompressed */
+size_t HUF_decompress4X_hufOnly(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /*< considers RLE and uncompressed as errors */
+size_t HUF_decompress4X_hufOnly_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize); /*< considers RLE and uncompressed as errors */
+size_t HUF_decompress4X1_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /*< single-symbol decoder */
+size_t HUF_decompress4X1_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize); /*< single-symbol decoder */
+#ifndef HUF_FORCE_DECOMPRESS_X1
+size_t HUF_decompress4X2_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /*< double-symbols decoder */
+size_t HUF_decompress4X2_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize); /*< double-symbols decoder */
+#endif
+
+
+/* ****************************************
+ * HUF detailed API
+ * ****************************************/
+
+/*! HUF_compress() does the following:
+ * 1. count symbol occurrence from source[] into table count[] using FSE_count() (exposed within "fse.h")
+ * 2. (optional) refine tableLog using HUF_optimalTableLog()
+ * 3. build Huffman table from count using HUF_buildCTable()
+ * 4. save Huffman table to memory buffer using HUF_writeCTable()
+ * 5. encode the data stream using HUF_compress4X_usingCTable()
+ *
+ * The following API allows targeting specific sub-functions for advanced tasks.
+ * For example, it's possible to compress several blocks using the same 'CTable',
+ * or to save and regenerate 'CTable' using external methods.
+ */
+unsigned HUF_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue);
+size_t HUF_buildCTable (HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue, unsigned maxNbBits); /* @return : maxNbBits; CTable and count can overlap. In which case, CTable will overwrite count content */
+size_t HUF_writeCTable (void* dst, size_t maxDstSize, const HUF_CElt* CTable, unsigned maxSymbolValue, unsigned huffLog);
+size_t HUF_writeCTable_wksp(void* dst, size_t maxDstSize, const HUF_CElt* CTable, unsigned maxSymbolValue, unsigned huffLog, void* workspace, size_t workspaceSize);
+size_t HUF_compress4X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable);
+size_t HUF_compress4X_usingCTable_bmi2(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable, int bmi2);
+size_t HUF_estimateCompressedSize(const HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue);
+int HUF_validateCTable(const HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue);
+
+typedef enum {
+ HUF_repeat_none, /*< Cannot use the previous table */
+ HUF_repeat_check, /*< Can use the previous table but it must be checked. Note : The previous table must have been constructed by HUF_compress{1, 4}X_repeat */
+ HUF_repeat_valid /*< Can use the previous table and it is assumed to be valid */
+ } HUF_repeat;
+/* HUF_compress4X_repeat() :
+ * Same as HUF_compress4X_wksp(), but considers using hufTable if *repeat != HUF_repeat_none.
+ * If it uses hufTable it does not modify hufTable or repeat.
+ * If it doesn't, it sets *repeat = HUF_repeat_none, and it sets hufTable to the table used.
+ * If preferRepeat then the old table will always be used if valid.
+ * If suspectUncompressible then some sampling checks will be run to potentially skip huffman coding */
+size_t HUF_compress4X_repeat(void* dst, size_t dstSize,
+ const void* src, size_t srcSize,
+ unsigned maxSymbolValue, unsigned tableLog,
+ void* workSpace, size_t wkspSize, /*< `workSpace` must be aligned on 4-bytes boundaries, `wkspSize` must be >= HUF_WORKSPACE_SIZE */
+ HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat, int bmi2, unsigned suspectUncompressible);
+
+/* HUF_buildCTable_wksp() :
+ * Same as HUF_buildCTable(), but using externally allocated scratch buffer.
+ * `workSpace` must be aligned on 4-bytes boundaries, and its size must be >= HUF_CTABLE_WORKSPACE_SIZE.
+ */
+#define HUF_CTABLE_WORKSPACE_SIZE_U32 (2*HUF_SYMBOLVALUE_MAX +1 +1)
+#define HUF_CTABLE_WORKSPACE_SIZE (HUF_CTABLE_WORKSPACE_SIZE_U32 * sizeof(unsigned))
+size_t HUF_buildCTable_wksp (HUF_CElt* tree,
+ const unsigned* count, U32 maxSymbolValue, U32 maxNbBits,
+ void* workSpace, size_t wkspSize);
+
+/*! HUF_readStats() :
+ * Read compact Huffman tree, saved by HUF_writeCTable().
+ * `huffWeight` is destination buffer.
+ * @return : size read from `src` , or an error Code .
+ * Note : Needed by HUF_readCTable() and HUF_readDTableXn() . */
+size_t HUF_readStats(BYTE* huffWeight, size_t hwSize,
+ U32* rankStats, U32* nbSymbolsPtr, U32* tableLogPtr,
+ const void* src, size_t srcSize);
+
+/*! HUF_readStats_wksp() :
+ * Same as HUF_readStats() but takes an external workspace which must be
+ * 4-byte aligned and its size must be >= HUF_READ_STATS_WORKSPACE_SIZE.
+ * If the CPU has BMI2 support, pass bmi2=1, otherwise pass bmi2=0.
+ */
+#define HUF_READ_STATS_WORKSPACE_SIZE_U32 FSE_DECOMPRESS_WKSP_SIZE_U32(6, HUF_TABLELOG_MAX-1)
+#define HUF_READ_STATS_WORKSPACE_SIZE (HUF_READ_STATS_WORKSPACE_SIZE_U32 * sizeof(unsigned))
+size_t HUF_readStats_wksp(BYTE* huffWeight, size_t hwSize,
+ U32* rankStats, U32* nbSymbolsPtr, U32* tableLogPtr,
+ const void* src, size_t srcSize,
+ void* workspace, size_t wkspSize,
+ int bmi2);
+
+/* HUF_readCTable() :
+ * Loading a CTable saved with HUF_writeCTable() */
+size_t HUF_readCTable (HUF_CElt* CTable, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize, unsigned *hasZeroWeights);
+
+/* HUF_getNbBitsFromCTable() :
+ * Read nbBits from CTable symbolTable, for symbol `symbolValue` presumed <= HUF_SYMBOLVALUE_MAX
+ * Note 1 : is not inlined, as HUF_CElt definition is private */
+U32 HUF_getNbBitsFromCTable(const HUF_CElt* symbolTable, U32 symbolValue);
+
+/*
+ * HUF_decompress() does the following:
+ * 1. select the decompression algorithm (X1, X2) based on pre-computed heuristics
+ * 2. build Huffman table from save, using HUF_readDTableX?()
+ * 3. decode 1 or 4 segments in parallel using HUF_decompress?X?_usingDTable()
+ */
+
+/* HUF_selectDecoder() :
+ * Tells which decoder is likely to decode faster,
+ * based on a set of pre-computed metrics.
+ * @return : 0==HUF_decompress4X1, 1==HUF_decompress4X2 .
+ * Assumption : 0 < dstSize <= 128 KB */
+U32 HUF_selectDecoder (size_t dstSize, size_t cSrcSize);
+
+/*
+ * The minimum workspace size for the `workSpace` used in
+ * HUF_readDTableX1_wksp() and HUF_readDTableX2_wksp().
+ *
+ * The space used depends on HUF_TABLELOG_MAX, ranging from ~1500 bytes when
+ * HUF_TABLE_LOG_MAX=12 to ~1850 bytes when HUF_TABLE_LOG_MAX=15.
+ * Buffer overflow errors may potentially occur if code modifications result in
+ * a required workspace size greater than that specified in the following
+ * macro.
+ */
+#define HUF_DECOMPRESS_WORKSPACE_SIZE ((2 << 10) + (1 << 9))
+#define HUF_DECOMPRESS_WORKSPACE_SIZE_U32 (HUF_DECOMPRESS_WORKSPACE_SIZE / sizeof(U32))
+
+#ifndef HUF_FORCE_DECOMPRESS_X2
+size_t HUF_readDTableX1 (HUF_DTable* DTable, const void* src, size_t srcSize);
+size_t HUF_readDTableX1_wksp (HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize);
+#endif
+#ifndef HUF_FORCE_DECOMPRESS_X1
+size_t HUF_readDTableX2 (HUF_DTable* DTable, const void* src, size_t srcSize);
+size_t HUF_readDTableX2_wksp (HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize);
+#endif
+
+size_t HUF_decompress4X_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable);
+#ifndef HUF_FORCE_DECOMPRESS_X2
+size_t HUF_decompress4X1_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable);
+#endif
+#ifndef HUF_FORCE_DECOMPRESS_X1
+size_t HUF_decompress4X2_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable);
+#endif
+
+
+/* ====================== */
+/* single stream variants */
+/* ====================== */
+
+size_t HUF_compress1X (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog);
+size_t HUF_compress1X_wksp (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize); /*< `workSpace` must be a table of at least HUF_WORKSPACE_SIZE_U64 U64 */
+size_t HUF_compress1X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable);
+size_t HUF_compress1X_usingCTable_bmi2(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable, int bmi2);
+/* HUF_compress1X_repeat() :
+ * Same as HUF_compress1X_wksp(), but considers using hufTable if *repeat != HUF_repeat_none.
+ * If it uses hufTable it does not modify hufTable or repeat.
+ * If it doesn't, it sets *repeat = HUF_repeat_none, and it sets hufTable to the table used.
+ * If preferRepeat then the old table will always be used if valid.
+ * If suspectUncompressible then some sampling checks will be run to potentially skip huffman coding */
+size_t HUF_compress1X_repeat(void* dst, size_t dstSize,
+ const void* src, size_t srcSize,
+ unsigned maxSymbolValue, unsigned tableLog,
+ void* workSpace, size_t wkspSize, /*< `workSpace` must be aligned on 4-bytes boundaries, `wkspSize` must be >= HUF_WORKSPACE_SIZE */
+ HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat, int bmi2, unsigned suspectUncompressible);
+
+size_t HUF_decompress1X1 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /* single-symbol decoder */
+#ifndef HUF_FORCE_DECOMPRESS_X1
+size_t HUF_decompress1X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /* double-symbol decoder */
+#endif
+
+size_t HUF_decompress1X_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);
+size_t HUF_decompress1X_DCtx_wksp (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize);
+#ifndef HUF_FORCE_DECOMPRESS_X2
+size_t HUF_decompress1X1_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /*< single-symbol decoder */
+size_t HUF_decompress1X1_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize); /*< single-symbol decoder */
+#endif
+#ifndef HUF_FORCE_DECOMPRESS_X1
+size_t HUF_decompress1X2_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /*< double-symbols decoder */
+size_t HUF_decompress1X2_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize); /*< double-symbols decoder */
+#endif
+
+size_t HUF_decompress1X_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable); /*< automatic selection of sing or double symbol decoder, based on DTable */
+#ifndef HUF_FORCE_DECOMPRESS_X2
+size_t HUF_decompress1X1_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable);
+#endif
+#ifndef HUF_FORCE_DECOMPRESS_X1
+size_t HUF_decompress1X2_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable);
+#endif
+
+/* BMI2 variants.
+ * If the CPU has BMI2 support, pass bmi2=1, otherwise pass bmi2=0.
+ */
+size_t HUF_decompress1X_usingDTable_bmi2(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int bmi2);
+#ifndef HUF_FORCE_DECOMPRESS_X2
+size_t HUF_decompress1X1_DCtx_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int bmi2);
+#endif
+size_t HUF_decompress4X_usingDTable_bmi2(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int bmi2);
+size_t HUF_decompress4X_hufOnly_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int bmi2);
+#ifndef HUF_FORCE_DECOMPRESS_X2
+size_t HUF_readDTableX1_wksp_bmi2(HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize, int bmi2);
+#endif
+#ifndef HUF_FORCE_DECOMPRESS_X1
+size_t HUF_readDTableX2_wksp_bmi2(HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize, int bmi2);
+#endif
+
+#endif /* HUF_STATIC_LINKING_ONLY */
+
diff --git a/lib/zstd/common/mem.h b/lib/zstd/common/mem.h
new file mode 100644
index 000000000000..1d9cc03924ca
--- /dev/null
+++ b/lib/zstd/common/mem.h
@@ -0,0 +1,261 @@
+/* SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause */
+/*
+ * Copyright (c) Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#ifndef MEM_H_MODULE
+#define MEM_H_MODULE
+
+/*-****************************************
+* Dependencies
+******************************************/
+#include <asm/unaligned.h> /* get_unaligned, put_unaligned* */
+#include <linux/compiler.h> /* inline */
+#include <linux/swab.h> /* swab32, swab64 */
+#include <linux/types.h> /* size_t, ptrdiff_t */
+#include "debug.h" /* DEBUG_STATIC_ASSERT */
+
+/*-****************************************
+* Compiler specifics
+******************************************/
+#define MEM_STATIC static inline
+
+/*-**************************************************************
+* Basic Types
+*****************************************************************/
+typedef uint8_t BYTE;
+typedef uint8_t U8;
+typedef int8_t S8;
+typedef uint16_t U16;
+typedef int16_t S16;
+typedef uint32_t U32;
+typedef int32_t S32;
+typedef uint64_t U64;
+typedef int64_t S64;
+
+/*-**************************************************************
+* Memory I/O API
+*****************************************************************/
+/*=== Static platform detection ===*/
+MEM_STATIC unsigned MEM_32bits(void);
+MEM_STATIC unsigned MEM_64bits(void);
+MEM_STATIC unsigned MEM_isLittleEndian(void);
+
+/*=== Native unaligned read/write ===*/
+MEM_STATIC U16 MEM_read16(const void* memPtr);
+MEM_STATIC U32 MEM_read32(const void* memPtr);
+MEM_STATIC U64 MEM_read64(const void* memPtr);
+MEM_STATIC size_t MEM_readST(const void* memPtr);
+
+MEM_STATIC void MEM_write16(void* memPtr, U16 value);
+MEM_STATIC void MEM_write32(void* memPtr, U32 value);
+MEM_STATIC void MEM_write64(void* memPtr, U64 value);
+
+/*=== Little endian unaligned read/write ===*/
+MEM_STATIC U16 MEM_readLE16(const void* memPtr);
+MEM_STATIC U32 MEM_readLE24(const void* memPtr);
+MEM_STATIC U32 MEM_readLE32(const void* memPtr);
+MEM_STATIC U64 MEM_readLE64(const void* memPtr);
+MEM_STATIC size_t MEM_readLEST(const void* memPtr);
+
+MEM_STATIC void MEM_writeLE16(void* memPtr, U16 val);
+MEM_STATIC void MEM_writeLE24(void* memPtr, U32 val);
+MEM_STATIC void MEM_writeLE32(void* memPtr, U32 val32);
+MEM_STATIC void MEM_writeLE64(void* memPtr, U64 val64);
+MEM_STATIC void MEM_writeLEST(void* memPtr, size_t val);
+
+/*=== Big endian unaligned read/write ===*/
+MEM_STATIC U32 MEM_readBE32(const void* memPtr);
+MEM_STATIC U64 MEM_readBE64(const void* memPtr);
+MEM_STATIC size_t MEM_readBEST(const void* memPtr);
+
+MEM_STATIC void MEM_writeBE32(void* memPtr, U32 val32);
+MEM_STATIC void MEM_writeBE64(void* memPtr, U64 val64);
+MEM_STATIC void MEM_writeBEST(void* memPtr, size_t val);
+
+/*=== Byteswap ===*/
+MEM_STATIC U32 MEM_swap32(U32 in);
+MEM_STATIC U64 MEM_swap64(U64 in);
+MEM_STATIC size_t MEM_swapST(size_t in);
+
+/*-**************************************************************
+* Memory I/O Implementation
+*****************************************************************/
+MEM_STATIC unsigned MEM_32bits(void)
+{
+ return sizeof(size_t) == 4;
+}
+
+MEM_STATIC unsigned MEM_64bits(void)
+{
+ return sizeof(size_t) == 8;
+}
+
+#if defined(__LITTLE_ENDIAN)
+#define MEM_LITTLE_ENDIAN 1
+#else
+#define MEM_LITTLE_ENDIAN 0
+#endif
+
+MEM_STATIC unsigned MEM_isLittleEndian(void)
+{
+ return MEM_LITTLE_ENDIAN;
+}
+
+MEM_STATIC U16 MEM_read16(const void *memPtr)
+{
+ return get_unaligned((const U16 *)memPtr);
+}
+
+MEM_STATIC U32 MEM_read32(const void *memPtr)
+{
+ return get_unaligned((const U32 *)memPtr);
+}
+
+MEM_STATIC U64 MEM_read64(const void *memPtr)
+{
+ return get_unaligned((const U64 *)memPtr);
+}
+
+MEM_STATIC size_t MEM_readST(const void *memPtr)
+{
+ return get_unaligned((const size_t *)memPtr);
+}
+
+MEM_STATIC void MEM_write16(void *memPtr, U16 value)
+{
+ put_unaligned(value, (U16 *)memPtr);
+}
+
+MEM_STATIC void MEM_write32(void *memPtr, U32 value)
+{
+ put_unaligned(value, (U32 *)memPtr);
+}
+
+MEM_STATIC void MEM_write64(void *memPtr, U64 value)
+{
+ put_unaligned(value, (U64 *)memPtr);
+}
+
+/*=== Little endian r/w ===*/
+
+MEM_STATIC U16 MEM_readLE16(const void *memPtr)
+{
+ return get_unaligned_le16(memPtr);
+}
+
+MEM_STATIC void MEM_writeLE16(void *memPtr, U16 val)
+{
+ put_unaligned_le16(val, memPtr);
+}
+
+MEM_STATIC U32 MEM_readLE24(const void *memPtr)
+{
+ return MEM_readLE16(memPtr) + (((const BYTE *)memPtr)[2] << 16);
+}
+
+MEM_STATIC void MEM_writeLE24(void *memPtr, U32 val)
+{
+ MEM_writeLE16(memPtr, (U16)val);
+ ((BYTE *)memPtr)[2] = (BYTE)(val >> 16);
+}
+
+MEM_STATIC U32 MEM_readLE32(const void *memPtr)
+{
+ return get_unaligned_le32(memPtr);
+}
+
+MEM_STATIC void MEM_writeLE32(void *memPtr, U32 val32)
+{
+ put_unaligned_le32(val32, memPtr);
+}
+
+MEM_STATIC U64 MEM_readLE64(const void *memPtr)
+{
+ return get_unaligned_le64(memPtr);
+}
+
+MEM_STATIC void MEM_writeLE64(void *memPtr, U64 val64)
+{
+ put_unaligned_le64(val64, memPtr);
+}
+
+MEM_STATIC size_t MEM_readLEST(const void *memPtr)
+{
+ if (MEM_32bits())
+ return (size_t)MEM_readLE32(memPtr);
+ else
+ return (size_t)MEM_readLE64(memPtr);
+}
+
+MEM_STATIC void MEM_writeLEST(void *memPtr, size_t val)
+{
+ if (MEM_32bits())
+ MEM_writeLE32(memPtr, (U32)val);
+ else
+ MEM_writeLE64(memPtr, (U64)val);
+}
+
+/*=== Big endian r/w ===*/
+
+MEM_STATIC U32 MEM_readBE32(const void *memPtr)
+{
+ return get_unaligned_be32(memPtr);
+}
+
+MEM_STATIC void MEM_writeBE32(void *memPtr, U32 val32)
+{
+ put_unaligned_be32(val32, memPtr);
+}
+
+MEM_STATIC U64 MEM_readBE64(const void *memPtr)
+{
+ return get_unaligned_be64(memPtr);
+}
+
+MEM_STATIC void MEM_writeBE64(void *memPtr, U64 val64)
+{
+ put_unaligned_be64(val64, memPtr);
+}
+
+MEM_STATIC size_t MEM_readBEST(const void *memPtr)
+{
+ if (MEM_32bits())
+ return (size_t)MEM_readBE32(memPtr);
+ else
+ return (size_t)MEM_readBE64(memPtr);
+}
+
+MEM_STATIC void MEM_writeBEST(void *memPtr, size_t val)
+{
+ if (MEM_32bits())
+ MEM_writeBE32(memPtr, (U32)val);
+ else
+ MEM_writeBE64(memPtr, (U64)val);
+}
+
+MEM_STATIC U32 MEM_swap32(U32 in)
+{
+ return swab32(in);
+}
+
+MEM_STATIC U64 MEM_swap64(U64 in)
+{
+ return swab64(in);
+}
+
+MEM_STATIC size_t MEM_swapST(size_t in)
+{
+ if (MEM_32bits())
+ return (size_t)MEM_swap32((U32)in);
+ else
+ return (size_t)MEM_swap64((U64)in);
+}
+
+#endif /* MEM_H_MODULE */
diff --git a/lib/zstd/common/portability_macros.h b/lib/zstd/common/portability_macros.h
new file mode 100644
index 000000000000..0e3b2c0a527d
--- /dev/null
+++ b/lib/zstd/common/portability_macros.h
@@ -0,0 +1,93 @@
+/*
+ * Copyright (c) Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#ifndef ZSTD_PORTABILITY_MACROS_H
+#define ZSTD_PORTABILITY_MACROS_H
+
+/*
+ * This header file contains macro defintions to support portability.
+ * This header is shared between C and ASM code, so it MUST only
+ * contain macro definitions. It MUST not contain any C code.
+ *
+ * This header ONLY defines macros to detect platforms/feature support.
+ *
+ */
+
+
+/* compat. with non-clang compilers */
+#ifndef __has_attribute
+ #define __has_attribute(x) 0
+#endif
+
+/* compat. with non-clang compilers */
+#ifndef __has_builtin
+# define __has_builtin(x) 0
+#endif
+
+/* compat. with non-clang compilers */
+#ifndef __has_feature
+# define __has_feature(x) 0
+#endif
+
+/* detects whether we are being compiled under msan */
+
+/* detects whether we are being compiled under asan */
+
+/* detects whether we are being compiled under dfsan */
+
+/* Mark the internal assembly functions as hidden */
+#ifdef __ELF__
+# define ZSTD_HIDE_ASM_FUNCTION(func) .hidden func
+#else
+# define ZSTD_HIDE_ASM_FUNCTION(func)
+#endif
+
+/* Enable runtime BMI2 dispatch based on the CPU.
+ * Enabled for clang & gcc >=4.8 on x86 when BMI2 isn't enabled by default.
+ */
+#ifndef DYNAMIC_BMI2
+ #if ((defined(__clang__) && __has_attribute(__target__)) \
+ || (defined(__GNUC__) \
+ && (__GNUC__ >= 5 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)))) \
+ && (defined(__x86_64__) || defined(_M_X64)) \
+ && !defined(__BMI2__)
+ # define DYNAMIC_BMI2 1
+ #else
+ # define DYNAMIC_BMI2 0
+ #endif
+#endif
+
+/*
+ * Only enable assembly for GNUC comptabile compilers,
+ * because other platforms may not support GAS assembly syntax.
+ *
+ * Only enable assembly for Linux / MacOS, other platforms may
+ * work, but they haven't been tested. This could likely be
+ * extended to BSD systems.
+ *
+ * Disable assembly when MSAN is enabled, because MSAN requires
+ * 100% of code to be instrumented to work.
+ */
+#define ZSTD_ASM_SUPPORTED 1
+
+/*
+ * Determines whether we should enable assembly for x86-64
+ * with BMI2.
+ *
+ * Enable if all of the following conditions hold:
+ * - ASM hasn't been explicitly disabled by defining ZSTD_DISABLE_ASM
+ * - Assembly is supported
+ * - We are compiling for x86-64 and either:
+ * - DYNAMIC_BMI2 is enabled
+ * - BMI2 is supported at compile time
+ */
+#define ZSTD_ENABLE_ASM_X86_64_BMI2 0
+
+#endif /* ZSTD_PORTABILITY_MACROS_H */
diff --git a/lib/zstd/common/zstd_common.c b/lib/zstd/common/zstd_common.c
new file mode 100644
index 000000000000..3d7e35b309b5
--- /dev/null
+++ b/lib/zstd/common/zstd_common.c
@@ -0,0 +1,83 @@
+/*
+ * Copyright (c) Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+
+
+/*-*************************************
+* Dependencies
+***************************************/
+#define ZSTD_DEPS_NEED_MALLOC
+#include "zstd_deps.h" /* ZSTD_malloc, ZSTD_calloc, ZSTD_free, ZSTD_memset */
+#include "error_private.h"
+#include "zstd_internal.h"
+
+
+/*-****************************************
+* Version
+******************************************/
+unsigned ZSTD_versionNumber(void) { return ZSTD_VERSION_NUMBER; }
+
+const char* ZSTD_versionString(void) { return ZSTD_VERSION_STRING; }
+
+
+/*-****************************************
+* ZSTD Error Management
+******************************************/
+#undef ZSTD_isError /* defined within zstd_internal.h */
+/*! ZSTD_isError() :
+ * tells if a return value is an error code
+ * symbol is required for external callers */
+unsigned ZSTD_isError(size_t code) { return ERR_isError(code); }
+
+/*! ZSTD_getErrorName() :
+ * provides error code string from function result (useful for debugging) */
+const char* ZSTD_getErrorName(size_t code) { return ERR_getErrorName(code); }
+
+/*! ZSTD_getError() :
+ * convert a `size_t` function result into a proper ZSTD_errorCode enum */
+ZSTD_ErrorCode ZSTD_getErrorCode(size_t code) { return ERR_getErrorCode(code); }
+
+/*! ZSTD_getErrorString() :
+ * provides error code string from enum */
+const char* ZSTD_getErrorString(ZSTD_ErrorCode code) { return ERR_getErrorString(code); }
+
+
+
+/*=**************************************************************
+* Custom allocator
+****************************************************************/
+void* ZSTD_customMalloc(size_t size, ZSTD_customMem customMem)
+{
+ if (customMem.customAlloc)
+ return customMem.customAlloc(customMem.opaque, size);
+ return ZSTD_malloc(size);
+}
+
+void* ZSTD_customCalloc(size_t size, ZSTD_customMem customMem)
+{
+ if (customMem.customAlloc) {
+ /* calloc implemented as malloc+memset;
+ * not as efficient as calloc, but next best guess for custom malloc */
+ void* const ptr = customMem.customAlloc(customMem.opaque, size);
+ ZSTD_memset(ptr, 0, size);
+ return ptr;
+ }
+ return ZSTD_calloc(1, size);
+}
+
+void ZSTD_customFree(void* ptr, ZSTD_customMem customMem)
+{
+ if (ptr!=NULL) {
+ if (customMem.customFree)
+ customMem.customFree(customMem.opaque, ptr);
+ else
+ ZSTD_free(ptr);
+ }
+}
diff --git a/lib/zstd/common/zstd_deps.h b/lib/zstd/common/zstd_deps.h
new file mode 100644
index 000000000000..2c34e8a33a1c
--- /dev/null
+++ b/lib/zstd/common/zstd_deps.h
@@ -0,0 +1,107 @@
+/* SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause */
+/*
+ * Copyright (c) Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+/*
+ * This file provides common libc dependencies that zstd requires.
+ * The purpose is to allow replacing this file with a custom implementation
+ * to compile zstd without libc support.
+ */
+
+/* Need:
+ * NULL
+ * INT_MAX
+ * UINT_MAX
+ * ZSTD_memcpy()
+ * ZSTD_memset()
+ * ZSTD_memmove()
+ */
+#ifndef ZSTD_DEPS_COMMON
+#define ZSTD_DEPS_COMMON
+
+#include <linux/limits.h>
+#include <linux/stddef.h>
+
+#define ZSTD_memcpy(d,s,n) __builtin_memcpy((d),(s),(n))
+#define ZSTD_memmove(d,s,n) __builtin_memmove((d),(s),(n))
+#define ZSTD_memset(d,s,n) __builtin_memset((d),(s),(n))
+
+#endif /* ZSTD_DEPS_COMMON */
+
+/*
+ * Define malloc as always failing. That means the user must
+ * either use ZSTD_customMem or statically allocate memory.
+ * Need:
+ * ZSTD_malloc()
+ * ZSTD_free()
+ * ZSTD_calloc()
+ */
+#ifdef ZSTD_DEPS_NEED_MALLOC
+#ifndef ZSTD_DEPS_MALLOC
+#define ZSTD_DEPS_MALLOC
+
+#define ZSTD_malloc(s) ({ (void)(s); NULL; })
+#define ZSTD_free(p) ((void)(p))
+#define ZSTD_calloc(n,s) ({ (void)(n); (void)(s); NULL; })
+
+#endif /* ZSTD_DEPS_MALLOC */
+#endif /* ZSTD_DEPS_NEED_MALLOC */
+
+/*
+ * Provides 64-bit math support.
+ * Need:
+ * U64 ZSTD_div64(U64 dividend, U32 divisor)
+ */
+#ifdef ZSTD_DEPS_NEED_MATH64
+#ifndef ZSTD_DEPS_MATH64
+#define ZSTD_DEPS_MATH64
+
+#include <linux/math64.h>
+
+static uint64_t ZSTD_div64(uint64_t dividend, uint32_t divisor) {
+ return div_u64(dividend, divisor);
+}
+
+#endif /* ZSTD_DEPS_MATH64 */
+#endif /* ZSTD_DEPS_NEED_MATH64 */
+
+/*
+ * This is only requested when DEBUGLEVEL >= 1, meaning
+ * it is disabled in production.
+ * Need:
+ * assert()
+ */
+#ifdef ZSTD_DEPS_NEED_ASSERT
+#ifndef ZSTD_DEPS_ASSERT
+#define ZSTD_DEPS_ASSERT
+
+#include <linux/kernel.h>
+
+#define assert(x) WARN_ON(!(x))
+
+#endif /* ZSTD_DEPS_ASSERT */
+#endif /* ZSTD_DEPS_NEED_ASSERT */
+
+/*
+ * This is only requested when DEBUGLEVEL >= 2, meaning
+ * it is disabled in production.
+ * Need:
+ * ZSTD_DEBUG_PRINT()
+ */
+#ifdef ZSTD_DEPS_NEED_IO
+#ifndef ZSTD_DEPS_IO
+#define ZSTD_DEPS_IO
+
+#include <linux/printk.h>
+
+#define ZSTD_DEBUG_PRINT(...) pr_debug(__VA_ARGS__)
+
+#endif /* ZSTD_DEPS_IO */
+#endif /* ZSTD_DEPS_NEED_IO */
diff --git a/lib/zstd/common/zstd_internal.h b/lib/zstd/common/zstd_internal.h
new file mode 100644
index 000000000000..93305d9b41bb
--- /dev/null
+++ b/lib/zstd/common/zstd_internal.h
@@ -0,0 +1,443 @@
+/*
+ * Copyright (c) Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#ifndef ZSTD_CCOMMON_H_MODULE
+#define ZSTD_CCOMMON_H_MODULE
+
+/* this module contains definitions which must be identical
+ * across compression, decompression and dictBuilder.
+ * It also contains a few functions useful to at least 2 of them
+ * and which benefit from being inlined */
+
+/*-*************************************
+* Dependencies
+***************************************/
+#include "compiler.h"
+#include "cpu.h"
+#include "mem.h"
+#include "debug.h" /* assert, DEBUGLOG, RAWLOG, g_debuglevel */
+#include "error_private.h"
+#define ZSTD_STATIC_LINKING_ONLY
+#include <linux/zstd.h>
+#define FSE_STATIC_LINKING_ONLY
+#include "fse.h"
+#define HUF_STATIC_LINKING_ONLY
+#include "huf.h"
+#include <linux/xxhash.h> /* XXH_reset, update, digest */
+#define ZSTD_TRACE 0
+
+
+/* ---- static assert (debug) --- */
+#define ZSTD_STATIC_ASSERT(c) DEBUG_STATIC_ASSERT(c)
+#define ZSTD_isError ERR_isError /* for inlining */
+#define FSE_isError ERR_isError
+#define HUF_isError ERR_isError
+
+
+/*-*************************************
+* shared macros
+***************************************/
+#undef MIN
+#undef MAX
+#define MIN(a,b) ((a)<(b) ? (a) : (b))
+#define MAX(a,b) ((a)>(b) ? (a) : (b))
+#define BOUNDED(min,val,max) (MAX(min,MIN(val,max)))
+
+
+/*-*************************************
+* Common constants
+***************************************/
+#define ZSTD_OPT_NUM (1<<12)
+
+#define ZSTD_REP_NUM 3 /* number of repcodes */
+static UNUSED_ATTR const U32 repStartValue[ZSTD_REP_NUM] = { 1, 4, 8 };
+
+#define KB *(1 <<10)
+#define MB *(1 <<20)
+#define GB *(1U<<30)
+
+#define BIT7 128
+#define BIT6 64
+#define BIT5 32
+#define BIT4 16
+#define BIT1 2
+#define BIT0 1
+
+#define ZSTD_WINDOWLOG_ABSOLUTEMIN 10
+static UNUSED_ATTR const size_t ZSTD_fcs_fieldSize[4] = { 0, 2, 4, 8 };
+static UNUSED_ATTR const size_t ZSTD_did_fieldSize[4] = { 0, 1, 2, 4 };
+
+#define ZSTD_FRAMEIDSIZE 4 /* magic number size */
+
+#define ZSTD_BLOCKHEADERSIZE 3 /* C standard doesn't allow `static const` variable to be init using another `static const` variable */
+static UNUSED_ATTR const size_t ZSTD_blockHeaderSize = ZSTD_BLOCKHEADERSIZE;
+typedef enum { bt_raw, bt_rle, bt_compressed, bt_reserved } blockType_e;
+
+#define ZSTD_FRAMECHECKSUMSIZE 4
+
+#define MIN_SEQUENCES_SIZE 1 /* nbSeq==0 */
+#define MIN_CBLOCK_SIZE (1 /*litCSize*/ + 1 /* RLE or RAW */ + MIN_SEQUENCES_SIZE /* nbSeq==0 */) /* for a non-null block */
+
+#define HufLog 12
+typedef enum { set_basic, set_rle, set_compressed, set_repeat } symbolEncodingType_e;
+
+#define LONGNBSEQ 0x7F00
+
+#define MINMATCH 3
+
+#define Litbits 8
+#define MaxLit ((1<<Litbits) - 1)
+#define MaxML 52
+#define MaxLL 35
+#define DefaultMaxOff 28
+#define MaxOff 31
+#define MaxSeq MAX(MaxLL, MaxML) /* Assumption : MaxOff < MaxLL,MaxML */
+#define MLFSELog 9
+#define LLFSELog 9
+#define OffFSELog 8
+#define MaxFSELog MAX(MAX(MLFSELog, LLFSELog), OffFSELog)
+
+#define ZSTD_MAX_HUF_HEADER_SIZE 128 /* header + <= 127 byte tree description */
+/* Each table cannot take more than #symbols * FSELog bits */
+#define ZSTD_MAX_FSE_HEADERS_SIZE (((MaxML + 1) * MLFSELog + (MaxLL + 1) * LLFSELog + (MaxOff + 1) * OffFSELog + 7) / 8)
+
+static UNUSED_ATTR const U8 LL_bits[MaxLL+1] = {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 1, 1, 1, 1, 2, 2, 3, 3,
+ 4, 6, 7, 8, 9,10,11,12,
+ 13,14,15,16
+};
+static UNUSED_ATTR const S16 LL_defaultNorm[MaxLL+1] = {
+ 4, 3, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 1, 1, 1,
+ 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 3, 2, 1, 1, 1, 1, 1,
+ -1,-1,-1,-1
+};
+#define LL_DEFAULTNORMLOG 6 /* for static allocation */
+static UNUSED_ATTR const U32 LL_defaultNormLog = LL_DEFAULTNORMLOG;
+
+static UNUSED_ATTR const U8 ML_bits[MaxML+1] = {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 1, 1, 1, 1, 2, 2, 3, 3,
+ 4, 4, 5, 7, 8, 9,10,11,
+ 12,13,14,15,16
+};
+static UNUSED_ATTR const S16 ML_defaultNorm[MaxML+1] = {
+ 1, 4, 3, 2, 2, 2, 2, 2,
+ 2, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1,-1,-1,
+ -1,-1,-1,-1,-1
+};
+#define ML_DEFAULTNORMLOG 6 /* for static allocation */
+static UNUSED_ATTR const U32 ML_defaultNormLog = ML_DEFAULTNORMLOG;
+
+static UNUSED_ATTR const S16 OF_defaultNorm[DefaultMaxOff+1] = {
+ 1, 1, 1, 1, 1, 1, 2, 2,
+ 2, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ -1,-1,-1,-1,-1
+};
+#define OF_DEFAULTNORMLOG 5 /* for static allocation */
+static UNUSED_ATTR const U32 OF_defaultNormLog = OF_DEFAULTNORMLOG;
+
+
+/*-*******************************************
+* Shared functions to include for inlining
+*********************************************/
+static void ZSTD_copy8(void* dst, const void* src) {
+#if defined(ZSTD_ARCH_ARM_NEON)
+ vst1_u8((uint8_t*)dst, vld1_u8((const uint8_t*)src));
+#else
+ ZSTD_memcpy(dst, src, 8);
+#endif
+}
+#define COPY8(d,s) { ZSTD_copy8(d,s); d+=8; s+=8; }
+
+/* Need to use memmove here since the literal buffer can now be located within
+ the dst buffer. In circumstances where the op "catches up" to where the
+ literal buffer is, there can be partial overlaps in this call on the final
+ copy if the literal is being shifted by less than 16 bytes. */
+static void ZSTD_copy16(void* dst, const void* src) {
+#if defined(ZSTD_ARCH_ARM_NEON)
+ vst1q_u8((uint8_t*)dst, vld1q_u8((const uint8_t*)src));
+#elif defined(ZSTD_ARCH_X86_SSE2)
+ _mm_storeu_si128((__m128i*)dst, _mm_loadu_si128((const __m128i*)src));
+#elif defined(__clang__)
+ ZSTD_memmove(dst, src, 16);
+#else
+ /* ZSTD_memmove is not inlined properly by gcc */
+ BYTE copy16_buf[16];
+ ZSTD_memcpy(copy16_buf, src, 16);
+ ZSTD_memcpy(dst, copy16_buf, 16);
+#endif
+}
+#define COPY16(d,s) { ZSTD_copy16(d,s); d+=16; s+=16; }
+
+#define WILDCOPY_OVERLENGTH 32
+#define WILDCOPY_VECLEN 16
+
+typedef enum {
+ ZSTD_no_overlap,
+ ZSTD_overlap_src_before_dst
+ /* ZSTD_overlap_dst_before_src, */
+} ZSTD_overlap_e;
+
+/*! ZSTD_wildcopy() :
+ * Custom version of ZSTD_memcpy(), can over read/write up to WILDCOPY_OVERLENGTH bytes (if length==0)
+ * @param ovtype controls the overlap detection
+ * - ZSTD_no_overlap: The source and destination are guaranteed to be at least WILDCOPY_VECLEN bytes apart.
+ * - ZSTD_overlap_src_before_dst: The src and dst may overlap, but they MUST be at least 8 bytes apart.
+ * The src buffer must be before the dst buffer.
+ */
+MEM_STATIC FORCE_INLINE_ATTR
+void ZSTD_wildcopy(void* dst, const void* src, ptrdiff_t length, ZSTD_overlap_e const ovtype)
+{
+ ptrdiff_t diff = (BYTE*)dst - (const BYTE*)src;
+ const BYTE* ip = (const BYTE*)src;
+ BYTE* op = (BYTE*)dst;
+ BYTE* const oend = op + length;
+
+ if (ovtype == ZSTD_overlap_src_before_dst && diff < WILDCOPY_VECLEN) {
+ /* Handle short offset copies. */
+ do {
+ COPY8(op, ip)
+ } while (op < oend);
+ } else {
+ assert(diff >= WILDCOPY_VECLEN || diff <= -WILDCOPY_VECLEN);
+ /* Separate out the first COPY16() call because the copy length is
+ * almost certain to be short, so the branches have different
+ * probabilities. Since it is almost certain to be short, only do
+ * one COPY16() in the first call. Then, do two calls per loop since
+ * at that point it is more likely to have a high trip count.
+ */
+#ifdef __aarch64__
+ do {
+ COPY16(op, ip);
+ }
+ while (op < oend);
+#else
+ ZSTD_copy16(op, ip);
+ if (16 >= length) return;
+ op += 16;
+ ip += 16;
+ do {
+ COPY16(op, ip);
+ COPY16(op, ip);
+ }
+ while (op < oend);
+#endif
+ }
+}
+
+MEM_STATIC size_t ZSTD_limitCopy(void* dst, size_t dstCapacity, const void* src, size_t srcSize)
+{
+ size_t const length = MIN(dstCapacity, srcSize);
+ if (length > 0) {
+ ZSTD_memcpy(dst, src, length);
+ }
+ return length;
+}
+
+/* define "workspace is too large" as this number of times larger than needed */
+#define ZSTD_WORKSPACETOOLARGE_FACTOR 3
+
+/* when workspace is continuously too large
+ * during at least this number of times,
+ * context's memory usage is considered wasteful,
+ * because it's sized to handle a worst case scenario which rarely happens.
+ * In which case, resize it down to free some memory */
+#define ZSTD_WORKSPACETOOLARGE_MAXDURATION 128
+
+/* Controls whether the input/output buffer is buffered or stable. */
+typedef enum {
+ ZSTD_bm_buffered = 0, /* Buffer the input/output */
+ ZSTD_bm_stable = 1 /* ZSTD_inBuffer/ZSTD_outBuffer is stable */
+} ZSTD_bufferMode_e;
+
+
+/*-*******************************************
+* Private declarations
+*********************************************/
+typedef struct seqDef_s {
+ U32 offBase; /* offBase == Offset + ZSTD_REP_NUM, or repcode 1,2,3 */
+ U16 litLength;
+ U16 mlBase; /* mlBase == matchLength - MINMATCH */
+} seqDef;
+
+/* Controls whether seqStore has a single "long" litLength or matchLength. See seqStore_t. */
+typedef enum {
+ ZSTD_llt_none = 0, /* no longLengthType */
+ ZSTD_llt_literalLength = 1, /* represents a long literal */
+ ZSTD_llt_matchLength = 2 /* represents a long match */
+} ZSTD_longLengthType_e;
+
+typedef struct {
+ seqDef* sequencesStart;
+ seqDef* sequences; /* ptr to end of sequences */
+ BYTE* litStart;
+ BYTE* lit; /* ptr to end of literals */
+ BYTE* llCode;
+ BYTE* mlCode;
+ BYTE* ofCode;
+ size_t maxNbSeq;
+ size_t maxNbLit;
+
+ /* longLengthPos and longLengthType to allow us to represent either a single litLength or matchLength
+ * in the seqStore that has a value larger than U16 (if it exists). To do so, we increment
+ * the existing value of the litLength or matchLength by 0x10000.
+ */
+ ZSTD_longLengthType_e longLengthType;
+ U32 longLengthPos; /* Index of the sequence to apply long length modification to */
+} seqStore_t;
+
+typedef struct {
+ U32 litLength;
+ U32 matchLength;
+} ZSTD_sequenceLength;
+
+/*
+ * Returns the ZSTD_sequenceLength for the given sequences. It handles the decoding of long sequences
+ * indicated by longLengthPos and longLengthType, and adds MINMATCH back to matchLength.
+ */
+MEM_STATIC ZSTD_sequenceLength ZSTD_getSequenceLength(seqStore_t const* seqStore, seqDef const* seq)
+{
+ ZSTD_sequenceLength seqLen;
+ seqLen.litLength = seq->litLength;
+ seqLen.matchLength = seq->mlBase + MINMATCH;
+ if (seqStore->longLengthPos == (U32)(seq - seqStore->sequencesStart)) {
+ if (seqStore->longLengthType == ZSTD_llt_literalLength) {
+ seqLen.litLength += 0xFFFF;
+ }
+ if (seqStore->longLengthType == ZSTD_llt_matchLength) {
+ seqLen.matchLength += 0xFFFF;
+ }
+ }
+ return seqLen;
+}
+
+/*
+ * Contains the compressed frame size and an upper-bound for the decompressed frame size.
+ * Note: before using `compressedSize`, check for errors using ZSTD_isError().
+ * similarly, before using `decompressedBound`, check for errors using:
+ * `decompressedBound != ZSTD_CONTENTSIZE_ERROR`
+ */
+typedef struct {
+ size_t compressedSize;
+ unsigned long long decompressedBound;
+} ZSTD_frameSizeInfo; /* decompress & legacy */
+
+const seqStore_t* ZSTD_getSeqStore(const ZSTD_CCtx* ctx); /* compress & dictBuilder */
+void ZSTD_seqToCodes(const seqStore_t* seqStorePtr); /* compress, dictBuilder, decodeCorpus (shouldn't get its definition from here) */
+
+/* custom memory allocation functions */
+void* ZSTD_customMalloc(size_t size, ZSTD_customMem customMem);
+void* ZSTD_customCalloc(size_t size, ZSTD_customMem customMem);
+void ZSTD_customFree(void* ptr, ZSTD_customMem customMem);
+
+
+MEM_STATIC U32 ZSTD_highbit32(U32 val) /* compress, dictBuilder, decodeCorpus */
+{
+ assert(val != 0);
+ {
+# if (__GNUC__ >= 3) /* GCC Intrinsic */
+ return __builtin_clz (val) ^ 31;
+# else /* Software version */
+ static const U32 DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 };
+ U32 v = val;
+ v |= v >> 1;
+ v |= v >> 2;
+ v |= v >> 4;
+ v |= v >> 8;
+ v |= v >> 16;
+ return DeBruijnClz[(v * 0x07C4ACDDU) >> 27];
+# endif
+ }
+}
+
+/*
+ * Counts the number of trailing zeros of a `size_t`.
+ * Most compilers should support CTZ as a builtin. A backup
+ * implementation is provided if the builtin isn't supported, but
+ * it may not be terribly efficient.
+ */
+MEM_STATIC unsigned ZSTD_countTrailingZeros(size_t val)
+{
+ if (MEM_64bits()) {
+# if (__GNUC__ >= 4)
+ return __builtin_ctzll((U64)val);
+# else
+ static const int DeBruijnBytePos[64] = { 0, 1, 2, 7, 3, 13, 8, 19,
+ 4, 25, 14, 28, 9, 34, 20, 56,
+ 5, 17, 26, 54, 15, 41, 29, 43,
+ 10, 31, 38, 35, 21, 45, 49, 57,
+ 63, 6, 12, 18, 24, 27, 33, 55,
+ 16, 53, 40, 42, 30, 37, 44, 48,
+ 62, 11, 23, 32, 52, 39, 36, 47,
+ 61, 22, 51, 46, 60, 50, 59, 58 };
+ return DeBruijnBytePos[((U64)((val & -(long long)val) * 0x0218A392CDABBD3FULL)) >> 58];
+# endif
+ } else { /* 32 bits */
+# if (__GNUC__ >= 3)
+ return __builtin_ctz((U32)val);
+# else
+ static const int DeBruijnBytePos[32] = { 0, 1, 28, 2, 29, 14, 24, 3,
+ 30, 22, 20, 15, 25, 17, 4, 8,
+ 31, 27, 13, 23, 21, 19, 16, 7,
+ 26, 12, 18, 6, 11, 5, 10, 9 };
+ return DeBruijnBytePos[((U32)((val & -(S32)val) * 0x077CB531U)) >> 27];
+# endif
+ }
+}
+
+
+/* ZSTD_invalidateRepCodes() :
+ * ensures next compression will not use repcodes from previous block.
+ * Note : only works with regular variant;
+ * do not use with extDict variant ! */
+void ZSTD_invalidateRepCodes(ZSTD_CCtx* cctx); /* zstdmt, adaptive_compression (shouldn't get this definition from here) */
+
+
+typedef struct {
+ blockType_e blockType;
+ U32 lastBlock;
+ U32 origSize;
+} blockProperties_t; /* declared here for decompress and fullbench */
+
+/*! ZSTD_getcBlockSize() :
+ * Provides the size of compressed block from block header `src` */
+/* Used by: decompress, fullbench (does not get its definition from here) */
+size_t ZSTD_getcBlockSize(const void* src, size_t srcSize,
+ blockProperties_t* bpPtr);
+
+/*! ZSTD_decodeSeqHeaders() :
+ * decode sequence header from src */
+/* Used by: decompress, fullbench (does not get its definition from here) */
+size_t ZSTD_decodeSeqHeaders(ZSTD_DCtx* dctx, int* nbSeqPtr,
+ const void* src, size_t srcSize);
+
+/*
+ * @returns true iff the CPU supports dynamic BMI2 dispatch.
+ */
+MEM_STATIC int ZSTD_cpuSupportsBmi2(void)
+{
+ ZSTD_cpuid_t cpuid = ZSTD_cpuid();
+ return ZSTD_cpuid_bmi1(cpuid) && ZSTD_cpuid_bmi2(cpuid);
+}
+
+
+#endif /* ZSTD_CCOMMON_H_MODULE */
diff --git a/lib/zstd/compress.c b/lib/zstd/compress.c
deleted file mode 100644
index 5e0b67003e55..000000000000
--- a/lib/zstd/compress.c
+++ /dev/null
@@ -1,3485 +0,0 @@
-/**
- * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
- * All rights reserved.
- *
- * This source code is licensed under the BSD-style license found in the
- * LICENSE file in the root directory of https://github.com/facebook/zstd.
- * An additional grant of patent rights can be found in the PATENTS file in the
- * same directory.
- *
- * This program is free software; you can redistribute it and/or modify it under
- * the terms of the GNU General Public License version 2 as published by the
- * Free Software Foundation. This program is dual-licensed; you may select
- * either version 2 of the GNU General Public License ("GPL") or BSD license
- * ("BSD").
- */
-
-/*-*************************************
-* Dependencies
-***************************************/
-#include "fse.h"
-#include "huf.h"
-#include "mem.h"
-#include "zstd_internal.h" /* includes zstd.h */
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/string.h> /* memset */
-
-/*-*************************************
-* Constants
-***************************************/
-static const U32 g_searchStrength = 8; /* control skip over incompressible data */
-#define HASH_READ_SIZE 8
-typedef enum { ZSTDcs_created = 0, ZSTDcs_init, ZSTDcs_ongoing, ZSTDcs_ending } ZSTD_compressionStage_e;
-
-/*-*************************************
-* Helper functions
-***************************************/
-size_t ZSTD_compressBound(size_t srcSize) { return FSE_compressBound(srcSize) + 12; }
-
-/*-*************************************
-* Sequence storage
-***************************************/
-static void ZSTD_resetSeqStore(seqStore_t *ssPtr)
-{
- ssPtr->lit = ssPtr->litStart;
- ssPtr->sequences = ssPtr->sequencesStart;
- ssPtr->longLengthID = 0;
-}
-
-/*-*************************************
-* Context memory management
-***************************************/
-struct ZSTD_CCtx_s {
- const BYTE *nextSrc; /* next block here to continue on curr prefix */
- const BYTE *base; /* All regular indexes relative to this position */
- const BYTE *dictBase; /* extDict indexes relative to this position */
- U32 dictLimit; /* below that point, need extDict */
- U32 lowLimit; /* below that point, no more data */
- U32 nextToUpdate; /* index from which to continue dictionary update */
- U32 nextToUpdate3; /* index from which to continue dictionary update */
- U32 hashLog3; /* dispatch table : larger == faster, more memory */
- U32 loadedDictEnd; /* index of end of dictionary */
- U32 forceWindow; /* force back-references to respect limit of 1<<wLog, even for dictionary */
- U32 forceRawDict; /* Force loading dictionary in "content-only" mode (no header analysis) */
- ZSTD_compressionStage_e stage;
- U32 rep[ZSTD_REP_NUM];
- U32 repToConfirm[ZSTD_REP_NUM];
- U32 dictID;
- ZSTD_parameters params;
- void *workSpace;
- size_t workSpaceSize;
- size_t blockSize;
- U64 frameContentSize;
- struct xxh64_state xxhState;
- ZSTD_customMem customMem;
-
- seqStore_t seqStore; /* sequences storage ptrs */
- U32 *hashTable;
- U32 *hashTable3;
- U32 *chainTable;
- HUF_CElt *hufTable;
- U32 flagStaticTables;
- HUF_repeat flagStaticHufTable;
- FSE_CTable offcodeCTable[FSE_CTABLE_SIZE_U32(OffFSELog, MaxOff)];
- FSE_CTable matchlengthCTable[FSE_CTABLE_SIZE_U32(MLFSELog, MaxML)];
- FSE_CTable litlengthCTable[FSE_CTABLE_SIZE_U32(LLFSELog, MaxLL)];
- unsigned tmpCounters[HUF_COMPRESS_WORKSPACE_SIZE_U32];
-};
-
-size_t ZSTD_CCtxWorkspaceBound(ZSTD_compressionParameters cParams)
-{
- size_t const blockSize = MIN(ZSTD_BLOCKSIZE_ABSOLUTEMAX, (size_t)1 << cParams.windowLog);
- U32 const divider = (cParams.searchLength == 3) ? 3 : 4;
- size_t const maxNbSeq = blockSize / divider;
- size_t const tokenSpace = blockSize + 11 * maxNbSeq;
- size_t const chainSize = (cParams.strategy == ZSTD_fast) ? 0 : (1 << cParams.chainLog);
- size_t const hSize = ((size_t)1) << cParams.hashLog;
- U32 const hashLog3 = (cParams.searchLength > 3) ? 0 : MIN(ZSTD_HASHLOG3_MAX, cParams.windowLog);
- size_t const h3Size = ((size_t)1) << hashLog3;
- size_t const tableSpace = (chainSize + hSize + h3Size) * sizeof(U32);
- size_t const optSpace =
- ((MaxML + 1) + (MaxLL + 1) + (MaxOff + 1) + (1 << Litbits)) * sizeof(U32) + (ZSTD_OPT_NUM + 1) * (sizeof(ZSTD_match_t) + sizeof(ZSTD_optimal_t));
- size_t const workspaceSize = tableSpace + (256 * sizeof(U32)) /* huffTable */ + tokenSpace +
- (((cParams.strategy == ZSTD_btopt) || (cParams.strategy == ZSTD_btopt2)) ? optSpace : 0);
-
- return ZSTD_ALIGN(sizeof(ZSTD_stack)) + ZSTD_ALIGN(sizeof(ZSTD_CCtx)) + ZSTD_ALIGN(workspaceSize);
-}
-
-static ZSTD_CCtx *ZSTD_createCCtx_advanced(ZSTD_customMem customMem)
-{
- ZSTD_CCtx *cctx;
- if (!customMem.customAlloc || !customMem.customFree)
- return NULL;
- cctx = (ZSTD_CCtx *)ZSTD_malloc(sizeof(ZSTD_CCtx), customMem);
- if (!cctx)
- return NULL;
- memset(cctx, 0, sizeof(ZSTD_CCtx));
- cctx->customMem = customMem;
- return cctx;
-}
-
-ZSTD_CCtx *ZSTD_initCCtx(void *workspace, size_t workspaceSize)
-{
- ZSTD_customMem const stackMem = ZSTD_initStack(workspace, workspaceSize);
- ZSTD_CCtx *cctx = ZSTD_createCCtx_advanced(stackMem);
- if (cctx) {
- cctx->workSpace = ZSTD_stackAllocAll(cctx->customMem.opaque, &cctx->workSpaceSize);
- }
- return cctx;
-}
-
-size_t ZSTD_freeCCtx(ZSTD_CCtx *cctx)
-{
- if (cctx == NULL)
- return 0; /* support free on NULL */
- ZSTD_free(cctx->workSpace, cctx->customMem);
- ZSTD_free(cctx, cctx->customMem);
- return 0; /* reserved as a potential error code in the future */
-}
-
-const seqStore_t *ZSTD_getSeqStore(const ZSTD_CCtx *ctx) /* hidden interface */ { return &(ctx->seqStore); }
-
-static ZSTD_parameters ZSTD_getParamsFromCCtx(const ZSTD_CCtx *cctx) { return cctx->params; }
-
-/** ZSTD_checkParams() :
- ensure param values remain within authorized range.
- @return : 0, or an error code if one value is beyond authorized range */
-size_t ZSTD_checkCParams(ZSTD_compressionParameters cParams)
-{
-#define CLAMPCHECK(val, min, max) \
- { \
- if ((val < min) | (val > max)) \
- return ERROR(compressionParameter_unsupported); \
- }
- CLAMPCHECK(cParams.windowLog, ZSTD_WINDOWLOG_MIN, ZSTD_WINDOWLOG_MAX);
- CLAMPCHECK(cParams.chainLog, ZSTD_CHAINLOG_MIN, ZSTD_CHAINLOG_MAX);
- CLAMPCHECK(cParams.hashLog, ZSTD_HASHLOG_MIN, ZSTD_HASHLOG_MAX);
- CLAMPCHECK(cParams.searchLog, ZSTD_SEARCHLOG_MIN, ZSTD_SEARCHLOG_MAX);
- CLAMPCHECK(cParams.searchLength, ZSTD_SEARCHLENGTH_MIN, ZSTD_SEARCHLENGTH_MAX);
- CLAMPCHECK(cParams.targetLength, ZSTD_TARGETLENGTH_MIN, ZSTD_TARGETLENGTH_MAX);
- if ((U32)(cParams.strategy) > (U32)ZSTD_btopt2)
- return ERROR(compressionParameter_unsupported);
- return 0;
-}
-
-/** ZSTD_cycleLog() :
- * condition for correct operation : hashLog > 1 */
-static U32 ZSTD_cycleLog(U32 hashLog, ZSTD_strategy strat)
-{
- U32 const btScale = ((U32)strat >= (U32)ZSTD_btlazy2);
- return hashLog - btScale;
-}
-
-/** ZSTD_adjustCParams() :
- optimize `cPar` for a given input (`srcSize` and `dictSize`).
- mostly downsizing to reduce memory consumption and initialization.
- Both `srcSize` and `dictSize` are optional (use 0 if unknown),
- but if both are 0, no optimization can be done.
- Note : cPar is considered validated at this stage. Use ZSTD_checkParams() to ensure that. */
-ZSTD_compressionParameters ZSTD_adjustCParams(ZSTD_compressionParameters cPar, unsigned long long srcSize, size_t dictSize)
-{
- if (srcSize + dictSize == 0)
- return cPar; /* no size information available : no adjustment */
-
- /* resize params, to use less memory when necessary */
- {
- U32 const minSrcSize = (srcSize == 0) ? 500 : 0;
- U64 const rSize = srcSize + dictSize + minSrcSize;
- if (rSize < ((U64)1 << ZSTD_WINDOWLOG_MAX)) {
- U32 const srcLog = MAX(ZSTD_HASHLOG_MIN, ZSTD_highbit32((U32)(rSize)-1) + 1);
- if (cPar.windowLog > srcLog)
- cPar.windowLog = srcLog;
- }
- }
- if (cPar.hashLog > cPar.windowLog)
- cPar.hashLog = cPar.windowLog;
- {
- U32 const cycleLog = ZSTD_cycleLog(cPar.chainLog, cPar.strategy);
- if (cycleLog > cPar.windowLog)
- cPar.chainLog -= (cycleLog - cPar.windowLog);
- }
-
- if (cPar.windowLog < ZSTD_WINDOWLOG_ABSOLUTEMIN)
- cPar.windowLog = ZSTD_WINDOWLOG_ABSOLUTEMIN; /* required for frame header */
-
- return cPar;
-}
-
-static U32 ZSTD_equivalentParams(ZSTD_parameters param1, ZSTD_parameters param2)
-{
- return (param1.cParams.hashLog == param2.cParams.hashLog) & (param1.cParams.chainLog == param2.cParams.chainLog) &
- (param1.cParams.strategy == param2.cParams.strategy) & ((param1.cParams.searchLength == 3) == (param2.cParams.searchLength == 3));
-}
-
-/*! ZSTD_continueCCtx() :
- reuse CCtx without reset (note : requires no dictionary) */
-static size_t ZSTD_continueCCtx(ZSTD_CCtx *cctx, ZSTD_parameters params, U64 frameContentSize)
-{
- U32 const end = (U32)(cctx->nextSrc - cctx->base);
- cctx->params = params;
- cctx->frameContentSize = frameContentSize;
- cctx->lowLimit = end;
- cctx->dictLimit = end;
- cctx->nextToUpdate = end + 1;
- cctx->stage = ZSTDcs_init;
- cctx->dictID = 0;
- cctx->loadedDictEnd = 0;
- {
- int i;
- for (i = 0; i < ZSTD_REP_NUM; i++)
- cctx->rep[i] = repStartValue[i];
- }
- cctx->seqStore.litLengthSum = 0; /* force reset of btopt stats */
- xxh64_reset(&cctx->xxhState, 0);
- return 0;
-}
-
-typedef enum { ZSTDcrp_continue, ZSTDcrp_noMemset, ZSTDcrp_fullReset } ZSTD_compResetPolicy_e;
-
-/*! ZSTD_resetCCtx_advanced() :
- note : `params` must be validated */
-static size_t ZSTD_resetCCtx_advanced(ZSTD_CCtx *zc, ZSTD_parameters params, U64 frameContentSize, ZSTD_compResetPolicy_e const crp)
-{
- if (crp == ZSTDcrp_continue)
- if (ZSTD_equivalentParams(params, zc->params)) {
- zc->flagStaticTables = 0;
- zc->flagStaticHufTable = HUF_repeat_none;
- return ZSTD_continueCCtx(zc, params, frameContentSize);
- }
-
- {
- size_t const blockSize = MIN(ZSTD_BLOCKSIZE_ABSOLUTEMAX, (size_t)1 << params.cParams.windowLog);
- U32 const divider = (params.cParams.searchLength == 3) ? 3 : 4;
- size_t const maxNbSeq = blockSize / divider;
- size_t const tokenSpace = blockSize + 11 * maxNbSeq;
- size_t const chainSize = (params.cParams.strategy == ZSTD_fast) ? 0 : (1 << params.cParams.chainLog);
- size_t const hSize = ((size_t)1) << params.cParams.hashLog;
- U32 const hashLog3 = (params.cParams.searchLength > 3) ? 0 : MIN(ZSTD_HASHLOG3_MAX, params.cParams.windowLog);
- size_t const h3Size = ((size_t)1) << hashLog3;
- size_t const tableSpace = (chainSize + hSize + h3Size) * sizeof(U32);
- void *ptr;
-
- /* Check if workSpace is large enough, alloc a new one if needed */
- {
- size_t const optSpace = ((MaxML + 1) + (MaxLL + 1) + (MaxOff + 1) + (1 << Litbits)) * sizeof(U32) +
- (ZSTD_OPT_NUM + 1) * (sizeof(ZSTD_match_t) + sizeof(ZSTD_optimal_t));
- size_t const neededSpace = tableSpace + (256 * sizeof(U32)) /* huffTable */ + tokenSpace +
- (((params.cParams.strategy == ZSTD_btopt) || (params.cParams.strategy == ZSTD_btopt2)) ? optSpace : 0);
- if (zc->workSpaceSize < neededSpace) {
- ZSTD_free(zc->workSpace, zc->customMem);
- zc->workSpace = ZSTD_malloc(neededSpace, zc->customMem);
- if (zc->workSpace == NULL)
- return ERROR(memory_allocation);
- zc->workSpaceSize = neededSpace;
- }
- }
-
- if (crp != ZSTDcrp_noMemset)
- memset(zc->workSpace, 0, tableSpace); /* reset tables only */
- xxh64_reset(&zc->xxhState, 0);
- zc->hashLog3 = hashLog3;
- zc->hashTable = (U32 *)(zc->workSpace);
- zc->chainTable = zc->hashTable + hSize;
- zc->hashTable3 = zc->chainTable + chainSize;
- ptr = zc->hashTable3 + h3Size;
- zc->hufTable = (HUF_CElt *)ptr;
- zc->flagStaticTables = 0;
- zc->flagStaticHufTable = HUF_repeat_none;
- ptr = ((U32 *)ptr) + 256; /* note : HUF_CElt* is incomplete type, size is simulated using U32 */
-
- zc->nextToUpdate = 1;
- zc->nextSrc = NULL;
- zc->base = NULL;
- zc->dictBase = NULL;
- zc->dictLimit = 0;
- zc->lowLimit = 0;
- zc->params = params;
- zc->blockSize = blockSize;
- zc->frameContentSize = frameContentSize;
- {
- int i;
- for (i = 0; i < ZSTD_REP_NUM; i++)
- zc->rep[i] = repStartValue[i];
- }
-
- if ((params.cParams.strategy == ZSTD_btopt) || (params.cParams.strategy == ZSTD_btopt2)) {
- zc->seqStore.litFreq = (U32 *)ptr;
- zc->seqStore.litLengthFreq = zc->seqStore.litFreq + (1 << Litbits);
- zc->seqStore.matchLengthFreq = zc->seqStore.litLengthFreq + (MaxLL + 1);
- zc->seqStore.offCodeFreq = zc->seqStore.matchLengthFreq + (MaxML + 1);
- ptr = zc->seqStore.offCodeFreq + (MaxOff + 1);
- zc->seqStore.matchTable = (ZSTD_match_t *)ptr;
- ptr = zc->seqStore.matchTable + ZSTD_OPT_NUM + 1;
- zc->seqStore.priceTable = (ZSTD_optimal_t *)ptr;
- ptr = zc->seqStore.priceTable + ZSTD_OPT_NUM + 1;
- zc->seqStore.litLengthSum = 0;
- }
- zc->seqStore.sequencesStart = (seqDef *)ptr;
- ptr = zc->seqStore.sequencesStart + maxNbSeq;
- zc->seqStore.llCode = (BYTE *)ptr;
- zc->seqStore.mlCode = zc->seqStore.llCode + maxNbSeq;
- zc->seqStore.ofCode = zc->seqStore.mlCode + maxNbSeq;
- zc->seqStore.litStart = zc->seqStore.ofCode + maxNbSeq;
-
- zc->stage = ZSTDcs_init;
- zc->dictID = 0;
- zc->loadedDictEnd = 0;
-
- return 0;
- }
-}
-
-/* ZSTD_invalidateRepCodes() :
- * ensures next compression will not use repcodes from previous block.
- * Note : only works with regular variant;
- * do not use with extDict variant ! */
-void ZSTD_invalidateRepCodes(ZSTD_CCtx *cctx)
-{
- int i;
- for (i = 0; i < ZSTD_REP_NUM; i++)
- cctx->rep[i] = 0;
-}
-
-/*! ZSTD_copyCCtx() :
-* Duplicate an existing context `srcCCtx` into another one `dstCCtx`.
-* Only works during stage ZSTDcs_init (i.e. after creation, but before first call to ZSTD_compressContinue()).
-* @return : 0, or an error code */
-size_t ZSTD_copyCCtx(ZSTD_CCtx *dstCCtx, const ZSTD_CCtx *srcCCtx, unsigned long long pledgedSrcSize)
-{
- if (srcCCtx->stage != ZSTDcs_init)
- return ERROR(stage_wrong);
-
- memcpy(&dstCCtx->customMem, &srcCCtx->customMem, sizeof(ZSTD_customMem));
- {
- ZSTD_parameters params = srcCCtx->params;
- params.fParams.contentSizeFlag = (pledgedSrcSize > 0);
- ZSTD_resetCCtx_advanced(dstCCtx, params, pledgedSrcSize, ZSTDcrp_noMemset);
- }
-
- /* copy tables */
- {
- size_t const chainSize = (srcCCtx->params.cParams.strategy == ZSTD_fast) ? 0 : (1 << srcCCtx->params.cParams.chainLog);
- size_t const hSize = ((size_t)1) << srcCCtx->params.cParams.hashLog;
- size_t const h3Size = (size_t)1 << srcCCtx->hashLog3;
- size_t const tableSpace = (chainSize + hSize + h3Size) * sizeof(U32);
- memcpy(dstCCtx->workSpace, srcCCtx->workSpace, tableSpace);
- }
-
- /* copy dictionary offsets */
- dstCCtx->nextToUpdate = srcCCtx->nextToUpdate;
- dstCCtx->nextToUpdate3 = srcCCtx->nextToUpdate3;
- dstCCtx->nextSrc = srcCCtx->nextSrc;
- dstCCtx->base = srcCCtx->base;
- dstCCtx->dictBase = srcCCtx->dictBase;
- dstCCtx->dictLimit = srcCCtx->dictLimit;
- dstCCtx->lowLimit = srcCCtx->lowLimit;
- dstCCtx->loadedDictEnd = srcCCtx->loadedDictEnd;
- dstCCtx->dictID = srcCCtx->dictID;
-
- /* copy entropy tables */
- dstCCtx->flagStaticTables = srcCCtx->flagStaticTables;
- dstCCtx->flagStaticHufTable = srcCCtx->flagStaticHufTable;
- if (srcCCtx->flagStaticTables) {
- memcpy(dstCCtx->litlengthCTable, srcCCtx->litlengthCTable, sizeof(dstCCtx->litlengthCTable));
- memcpy(dstCCtx->matchlengthCTable, srcCCtx->matchlengthCTable, sizeof(dstCCtx->matchlengthCTable));
- memcpy(dstCCtx->offcodeCTable, srcCCtx->offcodeCTable, sizeof(dstCCtx->offcodeCTable));
- }
- if (srcCCtx->flagStaticHufTable) {
- memcpy(dstCCtx->hufTable, srcCCtx->hufTable, 256 * 4);
- }
-
- return 0;
-}
-
-/*! ZSTD_reduceTable() :
-* reduce table indexes by `reducerValue` */
-static void ZSTD_reduceTable(U32 *const table, U32 const size, U32 const reducerValue)
-{
- U32 u;
- for (u = 0; u < size; u++) {
- if (table[u] < reducerValue)
- table[u] = 0;
- else
- table[u] -= reducerValue;
- }
-}
-
-/*! ZSTD_reduceIndex() :
-* rescale all indexes to avoid future overflow (indexes are U32) */
-static void ZSTD_reduceIndex(ZSTD_CCtx *zc, const U32 reducerValue)
-{
- {
- U32 const hSize = 1 << zc->params.cParams.hashLog;
- ZSTD_reduceTable(zc->hashTable, hSize, reducerValue);
- }
-
- {
- U32 const chainSize = (zc->params.cParams.strategy == ZSTD_fast) ? 0 : (1 << zc->params.cParams.chainLog);
- ZSTD_reduceTable(zc->chainTable, chainSize, reducerValue);
- }
-
- {
- U32 const h3Size = (zc->hashLog3) ? 1 << zc->hashLog3 : 0;
- ZSTD_reduceTable(zc->hashTable3, h3Size, reducerValue);
- }
-}
-
-/*-*******************************************************
-* Block entropic compression
-*********************************************************/
-
-/* See doc/zstd_compression_format.md for detailed format description */
-
-size_t ZSTD_noCompressBlock(void *dst, size_t dstCapacity, const void *src, size_t srcSize)
-{
- if (srcSize + ZSTD_blockHeaderSize > dstCapacity)
- return ERROR(dstSize_tooSmall);
- memcpy((BYTE *)dst + ZSTD_blockHeaderSize, src, srcSize);
- ZSTD_writeLE24(dst, (U32)(srcSize << 2) + (U32)bt_raw);
- return ZSTD_blockHeaderSize + srcSize;
-}
-
-static size_t ZSTD_noCompressLiterals(void *dst, size_t dstCapacity, const void *src, size_t srcSize)
-{
- BYTE *const ostart = (BYTE * const)dst;
- U32 const flSize = 1 + (srcSize > 31) + (srcSize > 4095);
-
- if (srcSize + flSize > dstCapacity)
- return ERROR(dstSize_tooSmall);
-
- switch (flSize) {
- case 1: /* 2 - 1 - 5 */ ostart[0] = (BYTE)((U32)set_basic + (srcSize << 3)); break;
- case 2: /* 2 - 2 - 12 */ ZSTD_writeLE16(ostart, (U16)((U32)set_basic + (1 << 2) + (srcSize << 4))); break;
- default: /*note : should not be necessary : flSize is within {1,2,3} */
- case 3: /* 2 - 2 - 20 */ ZSTD_writeLE32(ostart, (U32)((U32)set_basic + (3 << 2) + (srcSize << 4))); break;
- }
-
- memcpy(ostart + flSize, src, srcSize);
- return srcSize + flSize;
-}
-
-static size_t ZSTD_compressRleLiteralsBlock(void *dst, size_t dstCapacity, const void *src, size_t srcSize)
-{
- BYTE *const ostart = (BYTE * const)dst;
- U32 const flSize = 1 + (srcSize > 31) + (srcSize > 4095);
-
- (void)dstCapacity; /* dstCapacity already guaranteed to be >=4, hence large enough */
-
- switch (flSize) {
- case 1: /* 2 - 1 - 5 */ ostart[0] = (BYTE)((U32)set_rle + (srcSize << 3)); break;
- case 2: /* 2 - 2 - 12 */ ZSTD_writeLE16(ostart, (U16)((U32)set_rle + (1 << 2) + (srcSize << 4))); break;
- default: /*note : should not be necessary : flSize is necessarily within {1,2,3} */
- case 3: /* 2 - 2 - 20 */ ZSTD_writeLE32(ostart, (U32)((U32)set_rle + (3 << 2) + (srcSize << 4))); break;
- }
-
- ostart[flSize] = *(const BYTE *)src;
- return flSize + 1;
-}
-
-static size_t ZSTD_minGain(size_t srcSize) { return (srcSize >> 6) + 2; }
-
-static size_t ZSTD_compressLiterals(ZSTD_CCtx *zc, void *dst, size_t dstCapacity, const void *src, size_t srcSize)
-{
- size_t const minGain = ZSTD_minGain(srcSize);
- size_t const lhSize = 3 + (srcSize >= 1 KB) + (srcSize >= 16 KB);
- BYTE *const ostart = (BYTE *)dst;
- U32 singleStream = srcSize < 256;
- symbolEncodingType_e hType = set_compressed;
- size_t cLitSize;
-
-/* small ? don't even attempt compression (speed opt) */
-#define LITERAL_NOENTROPY 63
- {
- size_t const minLitSize = zc->flagStaticHufTable == HUF_repeat_valid ? 6 : LITERAL_NOENTROPY;
- if (srcSize <= minLitSize)
- return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize);
- }
-
- if (dstCapacity < lhSize + 1)
- return ERROR(dstSize_tooSmall); /* not enough space for compression */
- {
- HUF_repeat repeat = zc->flagStaticHufTable;
- int const preferRepeat = zc->params.cParams.strategy < ZSTD_lazy ? srcSize <= 1024 : 0;
- if (repeat == HUF_repeat_valid && lhSize == 3)
- singleStream = 1;
- cLitSize = singleStream ? HUF_compress1X_repeat(ostart + lhSize, dstCapacity - lhSize, src, srcSize, 255, 11, zc->tmpCounters,
- sizeof(zc->tmpCounters), zc->hufTable, &repeat, preferRepeat)
- : HUF_compress4X_repeat(ostart + lhSize, dstCapacity - lhSize, src, srcSize, 255, 11, zc->tmpCounters,
- sizeof(zc->tmpCounters), zc->hufTable, &repeat, preferRepeat);
- if (repeat != HUF_repeat_none) {
- hType = set_repeat;
- } /* reused the existing table */
- else {
- zc->flagStaticHufTable = HUF_repeat_check;
- } /* now have a table to reuse */
- }
-
- if ((cLitSize == 0) | (cLitSize >= srcSize - minGain)) {
- zc->flagStaticHufTable = HUF_repeat_none;
- return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize);
- }
- if (cLitSize == 1) {
- zc->flagStaticHufTable = HUF_repeat_none;
- return ZSTD_compressRleLiteralsBlock(dst, dstCapacity, src, srcSize);
- }
-
- /* Build header */
- switch (lhSize) {
- case 3: /* 2 - 2 - 10 - 10 */
- {
- U32 const lhc = hType + ((!singleStream) << 2) + ((U32)srcSize << 4) + ((U32)cLitSize << 14);
- ZSTD_writeLE24(ostart, lhc);
- break;
- }
- case 4: /* 2 - 2 - 14 - 14 */
- {
- U32 const lhc = hType + (2 << 2) + ((U32)srcSize << 4) + ((U32)cLitSize << 18);
- ZSTD_writeLE32(ostart, lhc);
- break;
- }
- default: /* should not be necessary, lhSize is only {3,4,5} */
- case 5: /* 2 - 2 - 18 - 18 */
- {
- U32 const lhc = hType + (3 << 2) + ((U32)srcSize << 4) + ((U32)cLitSize << 22);
- ZSTD_writeLE32(ostart, lhc);
- ostart[4] = (BYTE)(cLitSize >> 10);
- break;
- }
- }
- return lhSize + cLitSize;
-}
-
-static const BYTE LL_Code[64] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 16, 17, 17, 18, 18,
- 19, 19, 20, 20, 20, 20, 21, 21, 21, 21, 22, 22, 22, 22, 22, 22, 22, 22, 23, 23, 23, 23,
- 23, 23, 23, 23, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24};
-
-static const BYTE ML_Code[128] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25,
- 26, 27, 28, 29, 30, 31, 32, 32, 33, 33, 34, 34, 35, 35, 36, 36, 36, 36, 37, 37, 37, 37, 38, 38, 38, 38,
- 38, 38, 38, 38, 39, 39, 39, 39, 39, 39, 39, 39, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40,
- 40, 40, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 42, 42, 42, 42, 42, 42, 42, 42,
- 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42};
-
-void ZSTD_seqToCodes(const seqStore_t *seqStorePtr)
-{
- BYTE const LL_deltaCode = 19;
- BYTE const ML_deltaCode = 36;
- const seqDef *const sequences = seqStorePtr->sequencesStart;
- BYTE *const llCodeTable = seqStorePtr->llCode;
- BYTE *const ofCodeTable = seqStorePtr->ofCode;
- BYTE *const mlCodeTable = seqStorePtr->mlCode;
- U32 const nbSeq = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
- U32 u;
- for (u = 0; u < nbSeq; u++) {
- U32 const llv = sequences[u].litLength;
- U32 const mlv = sequences[u].matchLength;
- llCodeTable[u] = (llv > 63) ? (BYTE)ZSTD_highbit32(llv) + LL_deltaCode : LL_Code[llv];
- ofCodeTable[u] = (BYTE)ZSTD_highbit32(sequences[u].offset);
- mlCodeTable[u] = (mlv > 127) ? (BYTE)ZSTD_highbit32(mlv) + ML_deltaCode : ML_Code[mlv];
- }
- if (seqStorePtr->longLengthID == 1)
- llCodeTable[seqStorePtr->longLengthPos] = MaxLL;
- if (seqStorePtr->longLengthID == 2)
- mlCodeTable[seqStorePtr->longLengthPos] = MaxML;
-}
-
-ZSTD_STATIC size_t ZSTD_compressSequences_internal(ZSTD_CCtx *zc, void *dst, size_t dstCapacity)
-{
- const int longOffsets = zc->params.cParams.windowLog > STREAM_ACCUMULATOR_MIN;
- const seqStore_t *seqStorePtr = &(zc->seqStore);
- FSE_CTable *CTable_LitLength = zc->litlengthCTable;
- FSE_CTable *CTable_OffsetBits = zc->offcodeCTable;
- FSE_CTable *CTable_MatchLength = zc->matchlengthCTable;
- U32 LLtype, Offtype, MLtype; /* compressed, raw or rle */
- const seqDef *const sequences = seqStorePtr->sequencesStart;
- const BYTE *const ofCodeTable = seqStorePtr->ofCode;
- const BYTE *const llCodeTable = seqStorePtr->llCode;
- const BYTE *const mlCodeTable = seqStorePtr->mlCode;
- BYTE *const ostart = (BYTE *)dst;
- BYTE *const oend = ostart + dstCapacity;
- BYTE *op = ostart;
- size_t const nbSeq = seqStorePtr->sequences - seqStorePtr->sequencesStart;
- BYTE *seqHead;
-
- U32 *count;
- S16 *norm;
- U32 *workspace;
- size_t workspaceSize = sizeof(zc->tmpCounters);
- {
- size_t spaceUsed32 = 0;
- count = (U32 *)zc->tmpCounters + spaceUsed32;
- spaceUsed32 += MaxSeq + 1;
- norm = (S16 *)((U32 *)zc->tmpCounters + spaceUsed32);
- spaceUsed32 += ALIGN(sizeof(S16) * (MaxSeq + 1), sizeof(U32)) >> 2;
-
- workspace = (U32 *)zc->tmpCounters + spaceUsed32;
- workspaceSize -= (spaceUsed32 << 2);
- }
-
- /* Compress literals */
- {
- const BYTE *const literals = seqStorePtr->litStart;
- size_t const litSize = seqStorePtr->lit - literals;
- size_t const cSize = ZSTD_compressLiterals(zc, op, dstCapacity, literals, litSize);
- if (ZSTD_isError(cSize))
- return cSize;
- op += cSize;
- }
-
- /* Sequences Header */
- if ((oend - op) < 3 /*max nbSeq Size*/ + 1 /*seqHead */)
- return ERROR(dstSize_tooSmall);
- if (nbSeq < 0x7F)
- *op++ = (BYTE)nbSeq;
- else if (nbSeq < LONGNBSEQ)
- op[0] = (BYTE)((nbSeq >> 8) + 0x80), op[1] = (BYTE)nbSeq, op += 2;
- else
- op[0] = 0xFF, ZSTD_writeLE16(op + 1, (U16)(nbSeq - LONGNBSEQ)), op += 3;
- if (nbSeq == 0)
- return op - ostart;
-
- /* seqHead : flags for FSE encoding type */
- seqHead = op++;
-
-#define MIN_SEQ_FOR_DYNAMIC_FSE 64
-#define MAX_SEQ_FOR_STATIC_FSE 1000
-
- /* convert length/distances into codes */
- ZSTD_seqToCodes(seqStorePtr);
-
- /* CTable for Literal Lengths */
- {
- U32 max = MaxLL;
- size_t const mostFrequent = FSE_countFast_wksp(count, &max, llCodeTable, nbSeq, workspace);
- if ((mostFrequent == nbSeq) && (nbSeq > 2)) {
- *op++ = llCodeTable[0];
- FSE_buildCTable_rle(CTable_LitLength, (BYTE)max);
- LLtype = set_rle;
- } else if ((zc->flagStaticTables) && (nbSeq < MAX_SEQ_FOR_STATIC_FSE)) {
- LLtype = set_repeat;
- } else if ((nbSeq < MIN_SEQ_FOR_DYNAMIC_FSE) || (mostFrequent < (nbSeq >> (LL_defaultNormLog - 1)))) {
- FSE_buildCTable_wksp(CTable_LitLength, LL_defaultNorm, MaxLL, LL_defaultNormLog, workspace, workspaceSize);
- LLtype = set_basic;
- } else {
- size_t nbSeq_1 = nbSeq;
- const U32 tableLog = FSE_optimalTableLog(LLFSELog, nbSeq, max);
- if (count[llCodeTable[nbSeq - 1]] > 1) {
- count[llCodeTable[nbSeq - 1]]--;
- nbSeq_1--;
- }
- FSE_normalizeCount(norm, tableLog, count, nbSeq_1, max);
- {
- size_t const NCountSize = FSE_writeNCount(op, oend - op, norm, max, tableLog); /* overflow protected */
- if (FSE_isError(NCountSize))
- return NCountSize;
- op += NCountSize;
- }
- FSE_buildCTable_wksp(CTable_LitLength, norm, max, tableLog, workspace, workspaceSize);
- LLtype = set_compressed;
- }
- }
-
- /* CTable for Offsets */
- {
- U32 max = MaxOff;
- size_t const mostFrequent = FSE_countFast_wksp(count, &max, ofCodeTable, nbSeq, workspace);
- if ((mostFrequent == nbSeq) && (nbSeq > 2)) {
- *op++ = ofCodeTable[0];
- FSE_buildCTable_rle(CTable_OffsetBits, (BYTE)max);
- Offtype = set_rle;
- } else if ((zc->flagStaticTables) && (nbSeq < MAX_SEQ_FOR_STATIC_FSE)) {
- Offtype = set_repeat;
- } else if ((nbSeq < MIN_SEQ_FOR_DYNAMIC_FSE) || (mostFrequent < (nbSeq >> (OF_defaultNormLog - 1)))) {
- FSE_buildCTable_wksp(CTable_OffsetBits, OF_defaultNorm, MaxOff, OF_defaultNormLog, workspace, workspaceSize);
- Offtype = set_basic;
- } else {
- size_t nbSeq_1 = nbSeq;
- const U32 tableLog = FSE_optimalTableLog(OffFSELog, nbSeq, max);
- if (count[ofCodeTable[nbSeq - 1]] > 1) {
- count[ofCodeTable[nbSeq - 1]]--;
- nbSeq_1--;
- }
- FSE_normalizeCount(norm, tableLog, count, nbSeq_1, max);
- {
- size_t const NCountSize = FSE_writeNCount(op, oend - op, norm, max, tableLog); /* overflow protected */
- if (FSE_isError(NCountSize))
- return NCountSize;
- op += NCountSize;
- }
- FSE_buildCTable_wksp(CTable_OffsetBits, norm, max, tableLog, workspace, workspaceSize);
- Offtype = set_compressed;
- }
- }
-
- /* CTable for MatchLengths */
- {
- U32 max = MaxML;
- size_t const mostFrequent = FSE_countFast_wksp(count, &max, mlCodeTable, nbSeq, workspace);
- if ((mostFrequent == nbSeq) && (nbSeq > 2)) {
- *op++ = *mlCodeTable;
- FSE_buildCTable_rle(CTable_MatchLength, (BYTE)max);
- MLtype = set_rle;
- } else if ((zc->flagStaticTables) && (nbSeq < MAX_SEQ_FOR_STATIC_FSE)) {
- MLtype = set_repeat;
- } else if ((nbSeq < MIN_SEQ_FOR_DYNAMIC_FSE) || (mostFrequent < (nbSeq >> (ML_defaultNormLog - 1)))) {
- FSE_buildCTable_wksp(CTable_MatchLength, ML_defaultNorm, MaxML, ML_defaultNormLog, workspace, workspaceSize);
- MLtype = set_basic;
- } else {
- size_t nbSeq_1 = nbSeq;
- const U32 tableLog = FSE_optimalTableLog(MLFSELog, nbSeq, max);
- if (count[mlCodeTable[nbSeq - 1]] > 1) {
- count[mlCodeTable[nbSeq - 1]]--;
- nbSeq_1--;
- }
- FSE_normalizeCount(norm, tableLog, count, nbSeq_1, max);
- {
- size_t const NCountSize = FSE_writeNCount(op, oend - op, norm, max, tableLog); /* overflow protected */
- if (FSE_isError(NCountSize))
- return NCountSize;
- op += NCountSize;
- }
- FSE_buildCTable_wksp(CTable_MatchLength, norm, max, tableLog, workspace, workspaceSize);
- MLtype = set_compressed;
- }
- }
-
- *seqHead = (BYTE)((LLtype << 6) + (Offtype << 4) + (MLtype << 2));
- zc->flagStaticTables = 0;
-
- /* Encoding Sequences */
- {
- BIT_CStream_t blockStream;
- FSE_CState_t stateMatchLength;
- FSE_CState_t stateOffsetBits;
- FSE_CState_t stateLitLength;
-
- CHECK_E(BIT_initCStream(&blockStream, op, oend - op), dstSize_tooSmall); /* not enough space remaining */
-
- /* first symbols */
- FSE_initCState2(&stateMatchLength, CTable_MatchLength, mlCodeTable[nbSeq - 1]);
- FSE_initCState2(&stateOffsetBits, CTable_OffsetBits, ofCodeTable[nbSeq - 1]);
- FSE_initCState2(&stateLitLength, CTable_LitLength, llCodeTable[nbSeq - 1]);
- BIT_addBits(&blockStream, sequences[nbSeq - 1].litLength, LL_bits[llCodeTable[nbSeq - 1]]);
- if (ZSTD_32bits())
- BIT_flushBits(&blockStream);
- BIT_addBits(&blockStream, sequences[nbSeq - 1].matchLength, ML_bits[mlCodeTable[nbSeq - 1]]);
- if (ZSTD_32bits())
- BIT_flushBits(&blockStream);
- if (longOffsets) {
- U32 const ofBits = ofCodeTable[nbSeq - 1];
- int const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN - 1);
- if (extraBits) {
- BIT_addBits(&blockStream, sequences[nbSeq - 1].offset, extraBits);
- BIT_flushBits(&blockStream);
- }
- BIT_addBits(&blockStream, sequences[nbSeq - 1].offset >> extraBits, ofBits - extraBits);
- } else {
- BIT_addBits(&blockStream, sequences[nbSeq - 1].offset, ofCodeTable[nbSeq - 1]);
- }
- BIT_flushBits(&blockStream);
-
- {
- size_t n;
- for (n = nbSeq - 2; n < nbSeq; n--) { /* intentional underflow */
- BYTE const llCode = llCodeTable[n];
- BYTE const ofCode = ofCodeTable[n];
- BYTE const mlCode = mlCodeTable[n];
- U32 const llBits = LL_bits[llCode];
- U32 const ofBits = ofCode; /* 32b*/ /* 64b*/
- U32 const mlBits = ML_bits[mlCode];
- /* (7)*/ /* (7)*/
- FSE_encodeSymbol(&blockStream, &stateOffsetBits, ofCode); /* 15 */ /* 15 */
- FSE_encodeSymbol(&blockStream, &stateMatchLength, mlCode); /* 24 */ /* 24 */
- if (ZSTD_32bits())
- BIT_flushBits(&blockStream); /* (7)*/
- FSE_encodeSymbol(&blockStream, &stateLitLength, llCode); /* 16 */ /* 33 */
- if (ZSTD_32bits() || (ofBits + mlBits + llBits >= 64 - 7 - (LLFSELog + MLFSELog + OffFSELog)))
- BIT_flushBits(&blockStream); /* (7)*/
- BIT_addBits(&blockStream, sequences[n].litLength, llBits);
- if (ZSTD_32bits() && ((llBits + mlBits) > 24))
- BIT_flushBits(&blockStream);
- BIT_addBits(&blockStream, sequences[n].matchLength, mlBits);
- if (ZSTD_32bits())
- BIT_flushBits(&blockStream); /* (7)*/
- if (longOffsets) {
- int const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN - 1);
- if (extraBits) {
- BIT_addBits(&blockStream, sequences[n].offset, extraBits);
- BIT_flushBits(&blockStream); /* (7)*/
- }
- BIT_addBits(&blockStream, sequences[n].offset >> extraBits, ofBits - extraBits); /* 31 */
- } else {
- BIT_addBits(&blockStream, sequences[n].offset, ofBits); /* 31 */
- }
- BIT_flushBits(&blockStream); /* (7)*/
- }
- }
-
- FSE_flushCState(&blockStream, &stateMatchLength);
- FSE_flushCState(&blockStream, &stateOffsetBits);
- FSE_flushCState(&blockStream, &stateLitLength);
-
- {
- size_t const streamSize = BIT_closeCStream(&blockStream);
- if (streamSize == 0)
- return ERROR(dstSize_tooSmall); /* not enough space */
- op += streamSize;
- }
- }
- return op - ostart;
-}
-
-ZSTD_STATIC size_t ZSTD_compressSequences(ZSTD_CCtx *zc, void *dst, size_t dstCapacity, size_t srcSize)
-{
- size_t const cSize = ZSTD_compressSequences_internal(zc, dst, dstCapacity);
- size_t const minGain = ZSTD_minGain(srcSize);
- size_t const maxCSize = srcSize - minGain;
- /* If the srcSize <= dstCapacity, then there is enough space to write a
- * raw uncompressed block. Since we ran out of space, the block must not
- * be compressible, so fall back to a raw uncompressed block.
- */
- int const uncompressibleError = cSize == ERROR(dstSize_tooSmall) && srcSize <= dstCapacity;
- int i;
-
- if (ZSTD_isError(cSize) && !uncompressibleError)
- return cSize;
- if (cSize >= maxCSize || uncompressibleError) {
- zc->flagStaticHufTable = HUF_repeat_none;
- return 0;
- }
- /* confirm repcodes */
- for (i = 0; i < ZSTD_REP_NUM; i++)
- zc->rep[i] = zc->repToConfirm[i];
- return cSize;
-}
-
-/*! ZSTD_storeSeq() :
- Store a sequence (literal length, literals, offset code and match length code) into seqStore_t.
- `offsetCode` : distance to match, or 0 == repCode.
- `matchCode` : matchLength - MINMATCH
-*/
-ZSTD_STATIC void ZSTD_storeSeq(seqStore_t *seqStorePtr, size_t litLength, const void *literals, U32 offsetCode, size_t matchCode)
-{
- /* copy Literals */
- ZSTD_wildcopy(seqStorePtr->lit, literals, litLength);
- seqStorePtr->lit += litLength;
-
- /* literal Length */
- if (litLength > 0xFFFF) {
- seqStorePtr->longLengthID = 1;
- seqStorePtr->longLengthPos = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
- }
- seqStorePtr->sequences[0].litLength = (U16)litLength;
-
- /* match offset */
- seqStorePtr->sequences[0].offset = offsetCode + 1;
-
- /* match Length */
- if (matchCode > 0xFFFF) {
- seqStorePtr->longLengthID = 2;
- seqStorePtr->longLengthPos = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
- }
- seqStorePtr->sequences[0].matchLength = (U16)matchCode;
-
- seqStorePtr->sequences++;
-}
-
-/*-*************************************
-* Match length counter
-***************************************/
-static unsigned ZSTD_NbCommonBytes(register size_t val)
-{
- if (ZSTD_isLittleEndian()) {
- if (ZSTD_64bits()) {
- return (__builtin_ctzll((U64)val) >> 3);
- } else { /* 32 bits */
- return (__builtin_ctz((U32)val) >> 3);
- }
- } else { /* Big Endian CPU */
- if (ZSTD_64bits()) {
- return (__builtin_clzll(val) >> 3);
- } else { /* 32 bits */
- return (__builtin_clz((U32)val) >> 3);
- }
- }
-}
-
-static size_t ZSTD_count(const BYTE *pIn, const BYTE *pMatch, const BYTE *const pInLimit)
-{
- const BYTE *const pStart = pIn;
- const BYTE *const pInLoopLimit = pInLimit - (sizeof(size_t) - 1);
-
- while (pIn < pInLoopLimit) {
- size_t const diff = ZSTD_readST(pMatch) ^ ZSTD_readST(pIn);
- if (!diff) {
- pIn += sizeof(size_t);
- pMatch += sizeof(size_t);
- continue;
- }
- pIn += ZSTD_NbCommonBytes(diff);
- return (size_t)(pIn - pStart);
- }
- if (ZSTD_64bits())
- if ((pIn < (pInLimit - 3)) && (ZSTD_read32(pMatch) == ZSTD_read32(pIn))) {
- pIn += 4;
- pMatch += 4;
- }
- if ((pIn < (pInLimit - 1)) && (ZSTD_read16(pMatch) == ZSTD_read16(pIn))) {
- pIn += 2;
- pMatch += 2;
- }
- if ((pIn < pInLimit) && (*pMatch == *pIn))
- pIn++;
- return (size_t)(pIn - pStart);
-}
-
-/** ZSTD_count_2segments() :
-* can count match length with `ip` & `match` in 2 different segments.
-* convention : on reaching mEnd, match count continue starting from iStart
-*/
-static size_t ZSTD_count_2segments(const BYTE *ip, const BYTE *match, const BYTE *iEnd, const BYTE *mEnd, const BYTE *iStart)
-{
- const BYTE *const vEnd = MIN(ip + (mEnd - match), iEnd);
- size_t const matchLength = ZSTD_count(ip, match, vEnd);
- if (match + matchLength != mEnd)
- return matchLength;
- return matchLength + ZSTD_count(ip + matchLength, iStart, iEnd);
-}
-
-/*-*************************************
-* Hashes
-***************************************/
-static const U32 prime3bytes = 506832829U;
-static U32 ZSTD_hash3(U32 u, U32 h) { return ((u << (32 - 24)) * prime3bytes) >> (32 - h); }
-ZSTD_STATIC size_t ZSTD_hash3Ptr(const void *ptr, U32 h) { return ZSTD_hash3(ZSTD_readLE32(ptr), h); } /* only in zstd_opt.h */
-
-static const U32 prime4bytes = 2654435761U;
-static U32 ZSTD_hash4(U32 u, U32 h) { return (u * prime4bytes) >> (32 - h); }
-static size_t ZSTD_hash4Ptr(const void *ptr, U32 h) { return ZSTD_hash4(ZSTD_read32(ptr), h); }
-
-static const U64 prime5bytes = 889523592379ULL;
-static size_t ZSTD_hash5(U64 u, U32 h) { return (size_t)(((u << (64 - 40)) * prime5bytes) >> (64 - h)); }
-static size_t ZSTD_hash5Ptr(const void *p, U32 h) { return ZSTD_hash5(ZSTD_readLE64(p), h); }
-
-static const U64 prime6bytes = 227718039650203ULL;
-static size_t ZSTD_hash6(U64 u, U32 h) { return (size_t)(((u << (64 - 48)) * prime6bytes) >> (64 - h)); }
-static size_t ZSTD_hash6Ptr(const void *p, U32 h) { return ZSTD_hash6(ZSTD_readLE64(p), h); }
-
-static const U64 prime7bytes = 58295818150454627ULL;
-static size_t ZSTD_hash7(U64 u, U32 h) { return (size_t)(((u << (64 - 56)) * prime7bytes) >> (64 - h)); }
-static size_t ZSTD_hash7Ptr(const void *p, U32 h) { return ZSTD_hash7(ZSTD_readLE64(p), h); }
-
-static const U64 prime8bytes = 0xCF1BBCDCB7A56463ULL;
-static size_t ZSTD_hash8(U64 u, U32 h) { return (size_t)(((u)*prime8bytes) >> (64 - h)); }
-static size_t ZSTD_hash8Ptr(const void *p, U32 h) { return ZSTD_hash8(ZSTD_readLE64(p), h); }
-
-static size_t ZSTD_hashPtr(const void *p, U32 hBits, U32 mls)
-{
- switch (mls) {
- // case 3: return ZSTD_hash3Ptr(p, hBits);
- default:
- case 4: return ZSTD_hash4Ptr(p, hBits);
- case 5: return ZSTD_hash5Ptr(p, hBits);
- case 6: return ZSTD_hash6Ptr(p, hBits);
- case 7: return ZSTD_hash7Ptr(p, hBits);
- case 8: return ZSTD_hash8Ptr(p, hBits);
- }
-}
-
-/*-*************************************
-* Fast Scan
-***************************************/
-static void ZSTD_fillHashTable(ZSTD_CCtx *zc, const void *end, const U32 mls)
-{
- U32 *const hashTable = zc->hashTable;
- U32 const hBits = zc->params.cParams.hashLog;
- const BYTE *const base = zc->base;
- const BYTE *ip = base + zc->nextToUpdate;
- const BYTE *const iend = ((const BYTE *)end) - HASH_READ_SIZE;
- const size_t fastHashFillStep = 3;
-
- while (ip <= iend) {
- hashTable[ZSTD_hashPtr(ip, hBits, mls)] = (U32)(ip - base);
- ip += fastHashFillStep;
- }
-}
-
-FORCE_INLINE
-void ZSTD_compressBlock_fast_generic(ZSTD_CCtx *cctx, const void *src, size_t srcSize, const U32 mls)
-{
- U32 *const hashTable = cctx->hashTable;
- U32 const hBits = cctx->params.cParams.hashLog;
- seqStore_t *seqStorePtr = &(cctx->seqStore);
- const BYTE *const base = cctx->base;
- const BYTE *const istart = (const BYTE *)src;
- const BYTE *ip = istart;
- const BYTE *anchor = istart;
- const U32 lowestIndex = cctx->dictLimit;
- const BYTE *const lowest = base + lowestIndex;
- const BYTE *const iend = istart + srcSize;
- const BYTE *const ilimit = iend - HASH_READ_SIZE;
- U32 offset_1 = cctx->rep[0], offset_2 = cctx->rep[1];
- U32 offsetSaved = 0;
-
- /* init */
- ip += (ip == lowest);
- {
- U32 const maxRep = (U32)(ip - lowest);
- if (offset_2 > maxRep)
- offsetSaved = offset_2, offset_2 = 0;
- if (offset_1 > maxRep)
- offsetSaved = offset_1, offset_1 = 0;
- }
-
- /* Main Search Loop */
- while (ip < ilimit) { /* < instead of <=, because repcode check at (ip+1) */
- size_t mLength;
- size_t const h = ZSTD_hashPtr(ip, hBits, mls);
- U32 const curr = (U32)(ip - base);
- U32 const matchIndex = hashTable[h];
- const BYTE *match = base + matchIndex;
- hashTable[h] = curr; /* update hash table */
-
- if ((offset_1 > 0) & (ZSTD_read32(ip + 1 - offset_1) == ZSTD_read32(ip + 1))) {
- mLength = ZSTD_count(ip + 1 + 4, ip + 1 + 4 - offset_1, iend) + 4;
- ip++;
- ZSTD_storeSeq(seqStorePtr, ip - anchor, anchor, 0, mLength - MINMATCH);
- } else {
- U32 offset;
- if ((matchIndex <= lowestIndex) || (ZSTD_read32(match) != ZSTD_read32(ip))) {
- ip += ((ip - anchor) >> g_searchStrength) + 1;
- continue;
- }
- mLength = ZSTD_count(ip + 4, match + 4, iend) + 4;
- offset = (U32)(ip - match);
- while (((ip > anchor) & (match > lowest)) && (ip[-1] == match[-1])) {
- ip--;
- match--;
- mLength++;
- } /* catch up */
- offset_2 = offset_1;
- offset_1 = offset;
-
- ZSTD_storeSeq(seqStorePtr, ip - anchor, anchor, offset + ZSTD_REP_MOVE, mLength - MINMATCH);
- }
-
- /* match found */
- ip += mLength;
- anchor = ip;
-
- if (ip <= ilimit) {
- /* Fill Table */
- hashTable[ZSTD_hashPtr(base + curr + 2, hBits, mls)] = curr + 2; /* here because curr+2 could be > iend-8 */
- hashTable[ZSTD_hashPtr(ip - 2, hBits, mls)] = (U32)(ip - 2 - base);
- /* check immediate repcode */
- while ((ip <= ilimit) && ((offset_2 > 0) & (ZSTD_read32(ip) == ZSTD_read32(ip - offset_2)))) {
- /* store sequence */
- size_t const rLength = ZSTD_count(ip + 4, ip + 4 - offset_2, iend) + 4;
- {
- U32 const tmpOff = offset_2;
- offset_2 = offset_1;
- offset_1 = tmpOff;
- } /* swap offset_2 <=> offset_1 */
- hashTable[ZSTD_hashPtr(ip, hBits, mls)] = (U32)(ip - base);
- ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, rLength - MINMATCH);
- ip += rLength;
- anchor = ip;
- continue; /* faster when present ... (?) */
- }
- }
- }
-
- /* save reps for next block */
- cctx->repToConfirm[0] = offset_1 ? offset_1 : offsetSaved;
- cctx->repToConfirm[1] = offset_2 ? offset_2 : offsetSaved;
-
- /* Last Literals */
- {
- size_t const lastLLSize = iend - anchor;
- memcpy(seqStorePtr->lit, anchor, lastLLSize);
- seqStorePtr->lit += lastLLSize;
- }
-}
-
-static void ZSTD_compressBlock_fast(ZSTD_CCtx *ctx, const void *src, size_t srcSize)
-{
- const U32 mls = ctx->params.cParams.searchLength;
- switch (mls) {
- default: /* includes case 3 */
- case 4: ZSTD_compressBlock_fast_generic(ctx, src, srcSize, 4); return;
- case 5: ZSTD_compressBlock_fast_generic(ctx, src, srcSize, 5); return;
- case 6: ZSTD_compressBlock_fast_generic(ctx, src, srcSize, 6); return;
- case 7: ZSTD_compressBlock_fast_generic(ctx, src, srcSize, 7); return;
- }
-}
-
-static void ZSTD_compressBlock_fast_extDict_generic(ZSTD_CCtx *ctx, const void *src, size_t srcSize, const U32 mls)
-{
- U32 *hashTable = ctx->hashTable;
- const U32 hBits = ctx->params.cParams.hashLog;
- seqStore_t *seqStorePtr = &(ctx->seqStore);
- const BYTE *const base = ctx->base;
- const BYTE *const dictBase = ctx->dictBase;
- const BYTE *const istart = (const BYTE *)src;
- const BYTE *ip = istart;
- const BYTE *anchor = istart;
- const U32 lowestIndex = ctx->lowLimit;
- const BYTE *const dictStart = dictBase + lowestIndex;
- const U32 dictLimit = ctx->dictLimit;
- const BYTE *const lowPrefixPtr = base + dictLimit;
- const BYTE *const dictEnd = dictBase + dictLimit;
- const BYTE *const iend = istart + srcSize;
- const BYTE *const ilimit = iend - 8;
- U32 offset_1 = ctx->rep[0], offset_2 = ctx->rep[1];
-
- /* Search Loop */
- while (ip < ilimit) { /* < instead of <=, because (ip+1) */
- const size_t h = ZSTD_hashPtr(ip, hBits, mls);
- const U32 matchIndex = hashTable[h];
- const BYTE *matchBase = matchIndex < dictLimit ? dictBase : base;
- const BYTE *match = matchBase + matchIndex;
- const U32 curr = (U32)(ip - base);
- const U32 repIndex = curr + 1 - offset_1; /* offset_1 expected <= curr +1 */
- const BYTE *repBase = repIndex < dictLimit ? dictBase : base;
- const BYTE *repMatch = repBase + repIndex;
- size_t mLength;
- hashTable[h] = curr; /* update hash table */
-
- if ((((U32)((dictLimit - 1) - repIndex) >= 3) /* intentional underflow */ & (repIndex > lowestIndex)) &&
- (ZSTD_read32(repMatch) == ZSTD_read32(ip + 1))) {
- const BYTE *repMatchEnd = repIndex < dictLimit ? dictEnd : iend;
- mLength = ZSTD_count_2segments(ip + 1 + EQUAL_READ32, repMatch + EQUAL_READ32, iend, repMatchEnd, lowPrefixPtr) + EQUAL_READ32;
- ip++;
- ZSTD_storeSeq(seqStorePtr, ip - anchor, anchor, 0, mLength - MINMATCH);
- } else {
- if ((matchIndex < lowestIndex) || (ZSTD_read32(match) != ZSTD_read32(ip))) {
- ip += ((ip - anchor) >> g_searchStrength) + 1;
- continue;
- }
- {
- const BYTE *matchEnd = matchIndex < dictLimit ? dictEnd : iend;
- const BYTE *lowMatchPtr = matchIndex < dictLimit ? dictStart : lowPrefixPtr;
- U32 offset;
- mLength = ZSTD_count_2segments(ip + EQUAL_READ32, match + EQUAL_READ32, iend, matchEnd, lowPrefixPtr) + EQUAL_READ32;
- while (((ip > anchor) & (match > lowMatchPtr)) && (ip[-1] == match[-1])) {
- ip--;
- match--;
- mLength++;
- } /* catch up */
- offset = curr - matchIndex;
- offset_2 = offset_1;
- offset_1 = offset;
- ZSTD_storeSeq(seqStorePtr, ip - anchor, anchor, offset + ZSTD_REP_MOVE, mLength - MINMATCH);
- }
- }
-
- /* found a match : store it */
- ip += mLength;
- anchor = ip;
-
- if (ip <= ilimit) {
- /* Fill Table */
- hashTable[ZSTD_hashPtr(base + curr + 2, hBits, mls)] = curr + 2;
- hashTable[ZSTD_hashPtr(ip - 2, hBits, mls)] = (U32)(ip - 2 - base);
- /* check immediate repcode */
- while (ip <= ilimit) {
- U32 const curr2 = (U32)(ip - base);
- U32 const repIndex2 = curr2 - offset_2;
- const BYTE *repMatch2 = repIndex2 < dictLimit ? dictBase + repIndex2 : base + repIndex2;
- if ((((U32)((dictLimit - 1) - repIndex2) >= 3) & (repIndex2 > lowestIndex)) /* intentional overflow */
- && (ZSTD_read32(repMatch2) == ZSTD_read32(ip))) {
- const BYTE *const repEnd2 = repIndex2 < dictLimit ? dictEnd : iend;
- size_t repLength2 =
- ZSTD_count_2segments(ip + EQUAL_READ32, repMatch2 + EQUAL_READ32, iend, repEnd2, lowPrefixPtr) + EQUAL_READ32;
- U32 tmpOffset = offset_2;
- offset_2 = offset_1;
- offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */
- ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, repLength2 - MINMATCH);
- hashTable[ZSTD_hashPtr(ip, hBits, mls)] = curr2;
- ip += repLength2;
- anchor = ip;
- continue;
- }
- break;
- }
- }
- }
-
- /* save reps for next block */
- ctx->repToConfirm[0] = offset_1;
- ctx->repToConfirm[1] = offset_2;
-
- /* Last Literals */
- {
- size_t const lastLLSize = iend - anchor;
- memcpy(seqStorePtr->lit, anchor, lastLLSize);
- seqStorePtr->lit += lastLLSize;
- }
-}
-
-static void ZSTD_compressBlock_fast_extDict(ZSTD_CCtx *ctx, const void *src, size_t srcSize)
-{
- U32 const mls = ctx->params.cParams.searchLength;
- switch (mls) {
- default: /* includes case 3 */
- case 4: ZSTD_compressBlock_fast_extDict_generic(ctx, src, srcSize, 4); return;
- case 5: ZSTD_compressBlock_fast_extDict_generic(ctx, src, srcSize, 5); return;
- case 6: ZSTD_compressBlock_fast_extDict_generic(ctx, src, srcSize, 6); return;
- case 7: ZSTD_compressBlock_fast_extDict_generic(ctx, src, srcSize, 7); return;
- }
-}
-
-/*-*************************************
-* Double Fast
-***************************************/
-static void ZSTD_fillDoubleHashTable(ZSTD_CCtx *cctx, const void *end, const U32 mls)
-{
- U32 *const hashLarge = cctx->hashTable;
- U32 const hBitsL = cctx->params.cParams.hashLog;
- U32 *const hashSmall = cctx->chainTable;
- U32 const hBitsS = cctx->params.cParams.chainLog;
- const BYTE *const base = cctx->base;
- const BYTE *ip = base + cctx->nextToUpdate;
- const BYTE *const iend = ((const BYTE *)end) - HASH_READ_SIZE;
- const size_t fastHashFillStep = 3;
-
- while (ip <= iend) {
- hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = (U32)(ip - base);
- hashLarge[ZSTD_hashPtr(ip, hBitsL, 8)] = (U32)(ip - base);
- ip += fastHashFillStep;
- }
-}
-
-FORCE_INLINE
-void ZSTD_compressBlock_doubleFast_generic(ZSTD_CCtx *cctx, const void *src, size_t srcSize, const U32 mls)
-{
- U32 *const hashLong = cctx->hashTable;
- const U32 hBitsL = cctx->params.cParams.hashLog;
- U32 *const hashSmall = cctx->chainTable;
- const U32 hBitsS = cctx->params.cParams.chainLog;
- seqStore_t *seqStorePtr = &(cctx->seqStore);
- const BYTE *const base = cctx->base;
- const BYTE *const istart = (const BYTE *)src;
- const BYTE *ip = istart;
- const BYTE *anchor = istart;
- const U32 lowestIndex = cctx->dictLimit;
- const BYTE *const lowest = base + lowestIndex;
- const BYTE *const iend = istart + srcSize;
- const BYTE *const ilimit = iend - HASH_READ_SIZE;
- U32 offset_1 = cctx->rep[0], offset_2 = cctx->rep[1];
- U32 offsetSaved = 0;
-
- /* init */
- ip += (ip == lowest);
- {
- U32 const maxRep = (U32)(ip - lowest);
- if (offset_2 > maxRep)
- offsetSaved = offset_2, offset_2 = 0;
- if (offset_1 > maxRep)
- offsetSaved = offset_1, offset_1 = 0;
- }
-
- /* Main Search Loop */
- while (ip < ilimit) { /* < instead of <=, because repcode check at (ip+1) */
- size_t mLength;
- size_t const h2 = ZSTD_hashPtr(ip, hBitsL, 8);
- size_t const h = ZSTD_hashPtr(ip, hBitsS, mls);
- U32 const curr = (U32)(ip - base);
- U32 const matchIndexL = hashLong[h2];
- U32 const matchIndexS = hashSmall[h];
- const BYTE *matchLong = base + matchIndexL;
- const BYTE *match = base + matchIndexS;
- hashLong[h2] = hashSmall[h] = curr; /* update hash tables */
-
- if ((offset_1 > 0) & (ZSTD_read32(ip + 1 - offset_1) == ZSTD_read32(ip + 1))) { /* note : by construction, offset_1 <= curr */
- mLength = ZSTD_count(ip + 1 + 4, ip + 1 + 4 - offset_1, iend) + 4;
- ip++;
- ZSTD_storeSeq(seqStorePtr, ip - anchor, anchor, 0, mLength - MINMATCH);
- } else {
- U32 offset;
- if ((matchIndexL > lowestIndex) && (ZSTD_read64(matchLong) == ZSTD_read64(ip))) {
- mLength = ZSTD_count(ip + 8, matchLong + 8, iend) + 8;
- offset = (U32)(ip - matchLong);
- while (((ip > anchor) & (matchLong > lowest)) && (ip[-1] == matchLong[-1])) {
- ip--;
- matchLong--;
- mLength++;
- } /* catch up */
- } else if ((matchIndexS > lowestIndex) && (ZSTD_read32(match) == ZSTD_read32(ip))) {
- size_t const h3 = ZSTD_hashPtr(ip + 1, hBitsL, 8);
- U32 const matchIndex3 = hashLong[h3];
- const BYTE *match3 = base + matchIndex3;
- hashLong[h3] = curr + 1;
- if ((matchIndex3 > lowestIndex) && (ZSTD_read64(match3) == ZSTD_read64(ip + 1))) {
- mLength = ZSTD_count(ip + 9, match3 + 8, iend) + 8;
- ip++;
- offset = (U32)(ip - match3);
- while (((ip > anchor) & (match3 > lowest)) && (ip[-1] == match3[-1])) {
- ip--;
- match3--;
- mLength++;
- } /* catch up */
- } else {
- mLength = ZSTD_count(ip + 4, match + 4, iend) + 4;
- offset = (U32)(ip - match);
- while (((ip > anchor) & (match > lowest)) && (ip[-1] == match[-1])) {
- ip--;
- match--;
- mLength++;
- } /* catch up */
- }
- } else {
- ip += ((ip - anchor) >> g_searchStrength) + 1;
- continue;
- }
-
- offset_2 = offset_1;
- offset_1 = offset;
-
- ZSTD_storeSeq(seqStorePtr, ip - anchor, anchor, offset + ZSTD_REP_MOVE, mLength - MINMATCH);
- }
-
- /* match found */
- ip += mLength;
- anchor = ip;
-
- if (ip <= ilimit) {
- /* Fill Table */
- hashLong[ZSTD_hashPtr(base + curr + 2, hBitsL, 8)] = hashSmall[ZSTD_hashPtr(base + curr + 2, hBitsS, mls)] =
- curr + 2; /* here because curr+2 could be > iend-8 */
- hashLong[ZSTD_hashPtr(ip - 2, hBitsL, 8)] = hashSmall[ZSTD_hashPtr(ip - 2, hBitsS, mls)] = (U32)(ip - 2 - base);
-
- /* check immediate repcode */
- while ((ip <= ilimit) && ((offset_2 > 0) & (ZSTD_read32(ip) == ZSTD_read32(ip - offset_2)))) {
- /* store sequence */
- size_t const rLength = ZSTD_count(ip + 4, ip + 4 - offset_2, iend) + 4;
- {
- U32 const tmpOff = offset_2;
- offset_2 = offset_1;
- offset_1 = tmpOff;
- } /* swap offset_2 <=> offset_1 */
- hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = (U32)(ip - base);
- hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = (U32)(ip - base);
- ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, rLength - MINMATCH);
- ip += rLength;
- anchor = ip;
- continue; /* faster when present ... (?) */
- }
- }
- }
-
- /* save reps for next block */
- cctx->repToConfirm[0] = offset_1 ? offset_1 : offsetSaved;
- cctx->repToConfirm[1] = offset_2 ? offset_2 : offsetSaved;
-
- /* Last Literals */
- {
- size_t const lastLLSize = iend - anchor;
- memcpy(seqStorePtr->lit, anchor, lastLLSize);
- seqStorePtr->lit += lastLLSize;
- }
-}
-
-static void ZSTD_compressBlock_doubleFast(ZSTD_CCtx *ctx, const void *src, size_t srcSize)
-{
- const U32 mls = ctx->params.cParams.searchLength;
- switch (mls) {
- default: /* includes case 3 */
- case 4: ZSTD_compressBlock_doubleFast_generic(ctx, src, srcSize, 4); return;
- case 5: ZSTD_compressBlock_doubleFast_generic(ctx, src, srcSize, 5); return;
- case 6: ZSTD_compressBlock_doubleFast_generic(ctx, src, srcSize, 6); return;
- case 7: ZSTD_compressBlock_doubleFast_generic(ctx, src, srcSize, 7); return;
- }
-}
-
-static void ZSTD_compressBlock_doubleFast_extDict_generic(ZSTD_CCtx *ctx, const void *src, size_t srcSize, const U32 mls)
-{
- U32 *const hashLong = ctx->hashTable;
- U32 const hBitsL = ctx->params.cParams.hashLog;
- U32 *const hashSmall = ctx->chainTable;
- U32 const hBitsS = ctx->params.cParams.chainLog;
- seqStore_t *seqStorePtr = &(ctx->seqStore);
- const BYTE *const base = ctx->base;
- const BYTE *const dictBase = ctx->dictBase;
- const BYTE *const istart = (const BYTE *)src;
- const BYTE *ip = istart;
- const BYTE *anchor = istart;
- const U32 lowestIndex = ctx->lowLimit;
- const BYTE *const dictStart = dictBase + lowestIndex;
- const U32 dictLimit = ctx->dictLimit;
- const BYTE *const lowPrefixPtr = base + dictLimit;
- const BYTE *const dictEnd = dictBase + dictLimit;
- const BYTE *const iend = istart + srcSize;
- const BYTE *const ilimit = iend - 8;
- U32 offset_1 = ctx->rep[0], offset_2 = ctx->rep[1];
-
- /* Search Loop */
- while (ip < ilimit) { /* < instead of <=, because (ip+1) */
- const size_t hSmall = ZSTD_hashPtr(ip, hBitsS, mls);
- const U32 matchIndex = hashSmall[hSmall];
- const BYTE *matchBase = matchIndex < dictLimit ? dictBase : base;
- const BYTE *match = matchBase + matchIndex;
-
- const size_t hLong = ZSTD_hashPtr(ip, hBitsL, 8);
- const U32 matchLongIndex = hashLong[hLong];
- const BYTE *matchLongBase = matchLongIndex < dictLimit ? dictBase : base;
- const BYTE *matchLong = matchLongBase + matchLongIndex;
-
- const U32 curr = (U32)(ip - base);
- const U32 repIndex = curr + 1 - offset_1; /* offset_1 expected <= curr +1 */
- const BYTE *repBase = repIndex < dictLimit ? dictBase : base;
- const BYTE *repMatch = repBase + repIndex;
- size_t mLength;
- hashSmall[hSmall] = hashLong[hLong] = curr; /* update hash table */
-
- if ((((U32)((dictLimit - 1) - repIndex) >= 3) /* intentional underflow */ & (repIndex > lowestIndex)) &&
- (ZSTD_read32(repMatch) == ZSTD_read32(ip + 1))) {
- const BYTE *repMatchEnd = repIndex < dictLimit ? dictEnd : iend;
- mLength = ZSTD_count_2segments(ip + 1 + 4, repMatch + 4, iend, repMatchEnd, lowPrefixPtr) + 4;
- ip++;
- ZSTD_storeSeq(seqStorePtr, ip - anchor, anchor, 0, mLength - MINMATCH);
- } else {
- if ((matchLongIndex > lowestIndex) && (ZSTD_read64(matchLong) == ZSTD_read64(ip))) {
- const BYTE *matchEnd = matchLongIndex < dictLimit ? dictEnd : iend;
- const BYTE *lowMatchPtr = matchLongIndex < dictLimit ? dictStart : lowPrefixPtr;
- U32 offset;
- mLength = ZSTD_count_2segments(ip + 8, matchLong + 8, iend, matchEnd, lowPrefixPtr) + 8;
- offset = curr - matchLongIndex;
- while (((ip > anchor) & (matchLong > lowMatchPtr)) && (ip[-1] == matchLong[-1])) {
- ip--;
- matchLong--;
- mLength++;
- } /* catch up */
- offset_2 = offset_1;
- offset_1 = offset;
- ZSTD_storeSeq(seqStorePtr, ip - anchor, anchor, offset + ZSTD_REP_MOVE, mLength - MINMATCH);
-
- } else if ((matchIndex > lowestIndex) && (ZSTD_read32(match) == ZSTD_read32(ip))) {
- size_t const h3 = ZSTD_hashPtr(ip + 1, hBitsL, 8);
- U32 const matchIndex3 = hashLong[h3];
- const BYTE *const match3Base = matchIndex3 < dictLimit ? dictBase : base;
- const BYTE *match3 = match3Base + matchIndex3;
- U32 offset;
- hashLong[h3] = curr + 1;
- if ((matchIndex3 > lowestIndex) && (ZSTD_read64(match3) == ZSTD_read64(ip + 1))) {
- const BYTE *matchEnd = matchIndex3 < dictLimit ? dictEnd : iend;
- const BYTE *lowMatchPtr = matchIndex3 < dictLimit ? dictStart : lowPrefixPtr;
- mLength = ZSTD_count_2segments(ip + 9, match3 + 8, iend, matchEnd, lowPrefixPtr) + 8;
- ip++;
- offset = curr + 1 - matchIndex3;
- while (((ip > anchor) & (match3 > lowMatchPtr)) && (ip[-1] == match3[-1])) {
- ip--;
- match3--;
- mLength++;
- } /* catch up */
- } else {
- const BYTE *matchEnd = matchIndex < dictLimit ? dictEnd : iend;
- const BYTE *lowMatchPtr = matchIndex < dictLimit ? dictStart : lowPrefixPtr;
- mLength = ZSTD_count_2segments(ip + 4, match + 4, iend, matchEnd, lowPrefixPtr) + 4;
- offset = curr - matchIndex;
- while (((ip > anchor) & (match > lowMatchPtr)) && (ip[-1] == match[-1])) {
- ip--;
- match--;
- mLength++;
- } /* catch up */
- }
- offset_2 = offset_1;
- offset_1 = offset;
- ZSTD_storeSeq(seqStorePtr, ip - anchor, anchor, offset + ZSTD_REP_MOVE, mLength - MINMATCH);
-
- } else {
- ip += ((ip - anchor) >> g_searchStrength) + 1;
- continue;
- }
- }
-
- /* found a match : store it */
- ip += mLength;
- anchor = ip;
-
- if (ip <= ilimit) {
- /* Fill Table */
- hashSmall[ZSTD_hashPtr(base + curr + 2, hBitsS, mls)] = curr + 2;
- hashLong[ZSTD_hashPtr(base + curr + 2, hBitsL, 8)] = curr + 2;
- hashSmall[ZSTD_hashPtr(ip - 2, hBitsS, mls)] = (U32)(ip - 2 - base);
- hashLong[ZSTD_hashPtr(ip - 2, hBitsL, 8)] = (U32)(ip - 2 - base);
- /* check immediate repcode */
- while (ip <= ilimit) {
- U32 const curr2 = (U32)(ip - base);
- U32 const repIndex2 = curr2 - offset_2;
- const BYTE *repMatch2 = repIndex2 < dictLimit ? dictBase + repIndex2 : base + repIndex2;
- if ((((U32)((dictLimit - 1) - repIndex2) >= 3) & (repIndex2 > lowestIndex)) /* intentional overflow */
- && (ZSTD_read32(repMatch2) == ZSTD_read32(ip))) {
- const BYTE *const repEnd2 = repIndex2 < dictLimit ? dictEnd : iend;
- size_t const repLength2 =
- ZSTD_count_2segments(ip + EQUAL_READ32, repMatch2 + EQUAL_READ32, iend, repEnd2, lowPrefixPtr) + EQUAL_READ32;
- U32 tmpOffset = offset_2;
- offset_2 = offset_1;
- offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */
- ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, repLength2 - MINMATCH);
- hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = curr2;
- hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = curr2;
- ip += repLength2;
- anchor = ip;
- continue;
- }
- break;
- }
- }
- }
-
- /* save reps for next block */
- ctx->repToConfirm[0] = offset_1;
- ctx->repToConfirm[1] = offset_2;
-
- /* Last Literals */
- {
- size_t const lastLLSize = iend - anchor;
- memcpy(seqStorePtr->lit, anchor, lastLLSize);
- seqStorePtr->lit += lastLLSize;
- }
-}
-
-static void ZSTD_compressBlock_doubleFast_extDict(ZSTD_CCtx *ctx, const void *src, size_t srcSize)
-{
- U32 const mls = ctx->params.cParams.searchLength;
- switch (mls) {
- default: /* includes case 3 */
- case 4: ZSTD_compressBlock_doubleFast_extDict_generic(ctx, src, srcSize, 4); return;
- case 5: ZSTD_compressBlock_doubleFast_extDict_generic(ctx, src, srcSize, 5); return;
- case 6: ZSTD_compressBlock_doubleFast_extDict_generic(ctx, src, srcSize, 6); return;
- case 7: ZSTD_compressBlock_doubleFast_extDict_generic(ctx, src, srcSize, 7); return;
- }
-}
-
-/*-*************************************
-* Binary Tree search
-***************************************/
-/** ZSTD_insertBt1() : add one or multiple positions to tree.
-* ip : assumed <= iend-8 .
-* @return : nb of positions added */
-static U32 ZSTD_insertBt1(ZSTD_CCtx *zc, const BYTE *const ip, const U32 mls, const BYTE *const iend, U32 nbCompares, U32 extDict)
-{
- U32 *const hashTable = zc->hashTable;
- U32 const hashLog = zc->params.cParams.hashLog;
- size_t const h = ZSTD_hashPtr(ip, hashLog, mls);
- U32 *const bt = zc->chainTable;
- U32 const btLog = zc->params.cParams.chainLog - 1;
- U32 const btMask = (1 << btLog) - 1;
- U32 matchIndex = hashTable[h];
- size_t commonLengthSmaller = 0, commonLengthLarger = 0;
- const BYTE *const base = zc->base;
- const BYTE *const dictBase = zc->dictBase;
- const U32 dictLimit = zc->dictLimit;
- const BYTE *const dictEnd = dictBase + dictLimit;
- const BYTE *const prefixStart = base + dictLimit;
- const BYTE *match;
- const U32 curr = (U32)(ip - base);
- const U32 btLow = btMask >= curr ? 0 : curr - btMask;
- U32 *smallerPtr = bt + 2 * (curr & btMask);
- U32 *largerPtr = smallerPtr + 1;
- U32 dummy32; /* to be nullified at the end */
- U32 const windowLow = zc->lowLimit;
- U32 matchEndIdx = curr + 8;
- size_t bestLength = 8;
-
- hashTable[h] = curr; /* Update Hash Table */
-
- while (nbCompares-- && (matchIndex > windowLow)) {
- U32 *const nextPtr = bt + 2 * (matchIndex & btMask);
- size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */
-
- if ((!extDict) || (matchIndex + matchLength >= dictLimit)) {
- match = base + matchIndex;
- if (match[matchLength] == ip[matchLength])
- matchLength += ZSTD_count(ip + matchLength + 1, match + matchLength + 1, iend) + 1;
- } else {
- match = dictBase + matchIndex;
- matchLength += ZSTD_count_2segments(ip + matchLength, match + matchLength, iend, dictEnd, prefixStart);
- if (matchIndex + matchLength >= dictLimit)
- match = base + matchIndex; /* to prepare for next usage of match[matchLength] */
- }
-
- if (matchLength > bestLength) {
- bestLength = matchLength;
- if (matchLength > matchEndIdx - matchIndex)
- matchEndIdx = matchIndex + (U32)matchLength;
- }
-
- if (ip + matchLength == iend) /* equal : no way to know if inf or sup */
- break; /* drop , to guarantee consistency ; miss a bit of compression, but other solutions can corrupt the tree */
-
- if (match[matchLength] < ip[matchLength]) { /* necessarily within correct buffer */
- /* match is smaller than curr */
- *smallerPtr = matchIndex; /* update smaller idx */
- commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */
- if (matchIndex <= btLow) {
- smallerPtr = &dummy32;
- break;
- } /* beyond tree size, stop the search */
- smallerPtr = nextPtr + 1; /* new "smaller" => larger of match */
- matchIndex = nextPtr[1]; /* new matchIndex larger than previous (closer to curr) */
- } else {
- /* match is larger than curr */
- *largerPtr = matchIndex;
- commonLengthLarger = matchLength;
- if (matchIndex <= btLow) {
- largerPtr = &dummy32;
- break;
- } /* beyond tree size, stop the search */
- largerPtr = nextPtr;
- matchIndex = nextPtr[0];
- }
- }
-
- *smallerPtr = *largerPtr = 0;
- if (bestLength > 384)
- return MIN(192, (U32)(bestLength - 384)); /* speed optimization */
- if (matchEndIdx > curr + 8)
- return matchEndIdx - curr - 8;
- return 1;
-}
-
-static size_t ZSTD_insertBtAndFindBestMatch(ZSTD_CCtx *zc, const BYTE *const ip, const BYTE *const iend, size_t *offsetPtr, U32 nbCompares, const U32 mls,
- U32 extDict)
-{
- U32 *const hashTable = zc->hashTable;
- U32 const hashLog = zc->params.cParams.hashLog;
- size_t const h = ZSTD_hashPtr(ip, hashLog, mls);
- U32 *const bt = zc->chainTable;
- U32 const btLog = zc->params.cParams.chainLog - 1;
- U32 const btMask = (1 << btLog) - 1;
- U32 matchIndex = hashTable[h];
- size_t commonLengthSmaller = 0, commonLengthLarger = 0;
- const BYTE *const base = zc->base;
- const BYTE *const dictBase = zc->dictBase;
- const U32 dictLimit = zc->dictLimit;
- const BYTE *const dictEnd = dictBase + dictLimit;
- const BYTE *const prefixStart = base + dictLimit;
- const U32 curr = (U32)(ip - base);
- const U32 btLow = btMask >= curr ? 0 : curr - btMask;
- const U32 windowLow = zc->lowLimit;
- U32 *smallerPtr = bt + 2 * (curr & btMask);
- U32 *largerPtr = bt + 2 * (curr & btMask) + 1;
- U32 matchEndIdx = curr + 8;
- U32 dummy32; /* to be nullified at the end */
- size_t bestLength = 0;
-
- hashTable[h] = curr; /* Update Hash Table */
-
- while (nbCompares-- && (matchIndex > windowLow)) {
- U32 *const nextPtr = bt + 2 * (matchIndex & btMask);
- size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */
- const BYTE *match;
-
- if ((!extDict) || (matchIndex + matchLength >= dictLimit)) {
- match = base + matchIndex;
- if (match[matchLength] == ip[matchLength])
- matchLength += ZSTD_count(ip + matchLength + 1, match + matchLength + 1, iend) + 1;
- } else {
- match = dictBase + matchIndex;
- matchLength += ZSTD_count_2segments(ip + matchLength, match + matchLength, iend, dictEnd, prefixStart);
- if (matchIndex + matchLength >= dictLimit)
- match = base + matchIndex; /* to prepare for next usage of match[matchLength] */
- }
-
- if (matchLength > bestLength) {
- if (matchLength > matchEndIdx - matchIndex)
- matchEndIdx = matchIndex + (U32)matchLength;
- if ((4 * (int)(matchLength - bestLength)) > (int)(ZSTD_highbit32(curr - matchIndex + 1) - ZSTD_highbit32((U32)offsetPtr[0] + 1)))
- bestLength = matchLength, *offsetPtr = ZSTD_REP_MOVE + curr - matchIndex;
- if (ip + matchLength == iend) /* equal : no way to know if inf or sup */
- break; /* drop, to guarantee consistency (miss a little bit of compression) */
- }
-
- if (match[matchLength] < ip[matchLength]) {
- /* match is smaller than curr */
- *smallerPtr = matchIndex; /* update smaller idx */
- commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */
- if (matchIndex <= btLow) {
- smallerPtr = &dummy32;
- break;
- } /* beyond tree size, stop the search */
- smallerPtr = nextPtr + 1; /* new "smaller" => larger of match */
- matchIndex = nextPtr[1]; /* new matchIndex larger than previous (closer to curr) */
- } else {
- /* match is larger than curr */
- *largerPtr = matchIndex;
- commonLengthLarger = matchLength;
- if (matchIndex <= btLow) {
- largerPtr = &dummy32;
- break;
- } /* beyond tree size, stop the search */
- largerPtr = nextPtr;
- matchIndex = nextPtr[0];
- }
- }
-
- *smallerPtr = *largerPtr = 0;
-
- zc->nextToUpdate = (matchEndIdx > curr + 8) ? matchEndIdx - 8 : curr + 1;
- return bestLength;
-}
-
-static void ZSTD_updateTree(ZSTD_CCtx *zc, const BYTE *const ip, const BYTE *const iend, const U32 nbCompares, const U32 mls)
-{
- const BYTE *const base = zc->base;
- const U32 target = (U32)(ip - base);
- U32 idx = zc->nextToUpdate;
-
- while (idx < target)
- idx += ZSTD_insertBt1(zc, base + idx, mls, iend, nbCompares, 0);
-}
-
-/** ZSTD_BtFindBestMatch() : Tree updater, providing best match */
-static size_t ZSTD_BtFindBestMatch(ZSTD_CCtx *zc, const BYTE *const ip, const BYTE *const iLimit, size_t *offsetPtr, const U32 maxNbAttempts, const U32 mls)
-{
- if (ip < zc->base + zc->nextToUpdate)
- return 0; /* skipped area */
- ZSTD_updateTree(zc, ip, iLimit, maxNbAttempts, mls);
- return ZSTD_insertBtAndFindBestMatch(zc, ip, iLimit, offsetPtr, maxNbAttempts, mls, 0);
-}
-
-static size_t ZSTD_BtFindBestMatch_selectMLS(ZSTD_CCtx *zc, /* Index table will be updated */
- const BYTE *ip, const BYTE *const iLimit, size_t *offsetPtr, const U32 maxNbAttempts, const U32 matchLengthSearch)
-{
- switch (matchLengthSearch) {
- default: /* includes case 3 */
- case 4: return ZSTD_BtFindBestMatch(zc, ip, iLimit, offsetPtr, maxNbAttempts, 4);
- case 5: return ZSTD_BtFindBestMatch(zc, ip, iLimit, offsetPtr, maxNbAttempts, 5);
- case 7:
- case 6: return ZSTD_BtFindBestMatch(zc, ip, iLimit, offsetPtr, maxNbAttempts, 6);
- }
-}
-
-static void ZSTD_updateTree_extDict(ZSTD_CCtx *zc, const BYTE *const ip, const BYTE *const iend, const U32 nbCompares, const U32 mls)
-{
- const BYTE *const base = zc->base;
- const U32 target = (U32)(ip - base);
- U32 idx = zc->nextToUpdate;
-
- while (idx < target)
- idx += ZSTD_insertBt1(zc, base + idx, mls, iend, nbCompares, 1);
-}
-
-/** Tree updater, providing best match */
-static size_t ZSTD_BtFindBestMatch_extDict(ZSTD_CCtx *zc, const BYTE *const ip, const BYTE *const iLimit, size_t *offsetPtr, const U32 maxNbAttempts,
- const U32 mls)
-{
- if (ip < zc->base + zc->nextToUpdate)
- return 0; /* skipped area */
- ZSTD_updateTree_extDict(zc, ip, iLimit, maxNbAttempts, mls);
- return ZSTD_insertBtAndFindBestMatch(zc, ip, iLimit, offsetPtr, maxNbAttempts, mls, 1);
-}
-
-static size_t ZSTD_BtFindBestMatch_selectMLS_extDict(ZSTD_CCtx *zc, /* Index table will be updated */
- const BYTE *ip, const BYTE *const iLimit, size_t *offsetPtr, const U32 maxNbAttempts,
- const U32 matchLengthSearch)
-{
- switch (matchLengthSearch) {
- default: /* includes case 3 */
- case 4: return ZSTD_BtFindBestMatch_extDict(zc, ip, iLimit, offsetPtr, maxNbAttempts, 4);
- case 5: return ZSTD_BtFindBestMatch_extDict(zc, ip, iLimit, offsetPtr, maxNbAttempts, 5);
- case 7:
- case 6: return ZSTD_BtFindBestMatch_extDict(zc, ip, iLimit, offsetPtr, maxNbAttempts, 6);
- }
-}
-
-/* *********************************
-* Hash Chain
-***********************************/
-#define NEXT_IN_CHAIN(d, mask) chainTable[(d)&mask]
-
-/* Update chains up to ip (excluded)
- Assumption : always within prefix (i.e. not within extDict) */
-FORCE_INLINE
-U32 ZSTD_insertAndFindFirstIndex(ZSTD_CCtx *zc, const BYTE *ip, U32 mls)
-{
- U32 *const hashTable = zc->hashTable;
- const U32 hashLog = zc->params.cParams.hashLog;
- U32 *const chainTable = zc->chainTable;
- const U32 chainMask = (1 << zc->params.cParams.chainLog) - 1;
- const BYTE *const base = zc->base;
- const U32 target = (U32)(ip - base);
- U32 idx = zc->nextToUpdate;
-
- while (idx < target) { /* catch up */
- size_t const h = ZSTD_hashPtr(base + idx, hashLog, mls);
- NEXT_IN_CHAIN(idx, chainMask) = hashTable[h];
- hashTable[h] = idx;
- idx++;
- }
-
- zc->nextToUpdate = target;
- return hashTable[ZSTD_hashPtr(ip, hashLog, mls)];
-}
-
-/* inlining is important to hardwire a hot branch (template emulation) */
-FORCE_INLINE
-size_t ZSTD_HcFindBestMatch_generic(ZSTD_CCtx *zc, /* Index table will be updated */
- const BYTE *const ip, const BYTE *const iLimit, size_t *offsetPtr, const U32 maxNbAttempts, const U32 mls,
- const U32 extDict)
-{
- U32 *const chainTable = zc->chainTable;
- const U32 chainSize = (1 << zc->params.cParams.chainLog);
- const U32 chainMask = chainSize - 1;
- const BYTE *const base = zc->base;
- const BYTE *const dictBase = zc->dictBase;
- const U32 dictLimit = zc->dictLimit;
- const BYTE *const prefixStart = base + dictLimit;
- const BYTE *const dictEnd = dictBase + dictLimit;
- const U32 lowLimit = zc->lowLimit;
- const U32 curr = (U32)(ip - base);
- const U32 minChain = curr > chainSize ? curr - chainSize : 0;
- int nbAttempts = maxNbAttempts;
- size_t ml = EQUAL_READ32 - 1;
-
- /* HC4 match finder */
- U32 matchIndex = ZSTD_insertAndFindFirstIndex(zc, ip, mls);
-
- for (; (matchIndex > lowLimit) & (nbAttempts > 0); nbAttempts--) {
- const BYTE *match;
- size_t currMl = 0;
- if ((!extDict) || matchIndex >= dictLimit) {
- match = base + matchIndex;
- if (match[ml] == ip[ml]) /* potentially better */
- currMl = ZSTD_count(ip, match, iLimit);
- } else {
- match = dictBase + matchIndex;
- if (ZSTD_read32(match) == ZSTD_read32(ip)) /* assumption : matchIndex <= dictLimit-4 (by table construction) */
- currMl = ZSTD_count_2segments(ip + EQUAL_READ32, match + EQUAL_READ32, iLimit, dictEnd, prefixStart) + EQUAL_READ32;
- }
-
- /* save best solution */
- if (currMl > ml) {
- ml = currMl;
- *offsetPtr = curr - matchIndex + ZSTD_REP_MOVE;
- if (ip + currMl == iLimit)
- break; /* best possible, and avoid read overflow*/
- }
-
- if (matchIndex <= minChain)
- break;
- matchIndex = NEXT_IN_CHAIN(matchIndex, chainMask);
- }
-
- return ml;
-}
-
-FORCE_INLINE size_t ZSTD_HcFindBestMatch_selectMLS(ZSTD_CCtx *zc, const BYTE *ip, const BYTE *const iLimit, size_t *offsetPtr, const U32 maxNbAttempts,
- const U32 matchLengthSearch)
-{
- switch (matchLengthSearch) {
- default: /* includes case 3 */
- case 4: return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 4, 0);
- case 5: return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 5, 0);
- case 7:
- case 6: return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 6, 0);
- }
-}
-
-FORCE_INLINE size_t ZSTD_HcFindBestMatch_extDict_selectMLS(ZSTD_CCtx *zc, const BYTE *ip, const BYTE *const iLimit, size_t *offsetPtr, const U32 maxNbAttempts,
- const U32 matchLengthSearch)
-{
- switch (matchLengthSearch) {
- default: /* includes case 3 */
- case 4: return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 4, 1);
- case 5: return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 5, 1);
- case 7:
- case 6: return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 6, 1);
- }
-}
-
-/* *******************************
-* Common parser - lazy strategy
-*********************************/
-FORCE_INLINE
-void ZSTD_compressBlock_lazy_generic(ZSTD_CCtx *ctx, const void *src, size_t srcSize, const U32 searchMethod, const U32 depth)
-{
- seqStore_t *seqStorePtr = &(ctx->seqStore);
- const BYTE *const istart = (const BYTE *)src;
- const BYTE *ip = istart;
- const BYTE *anchor = istart;
- const BYTE *const iend = istart + srcSize;
- const BYTE *const ilimit = iend - 8;
- const BYTE *const base = ctx->base + ctx->dictLimit;
-
- U32 const maxSearches = 1 << ctx->params.cParams.searchLog;
- U32 const mls = ctx->params.cParams.searchLength;
-
- typedef size_t (*searchMax_f)(ZSTD_CCtx * zc, const BYTE *ip, const BYTE *iLimit, size_t *offsetPtr, U32 maxNbAttempts, U32 matchLengthSearch);
- searchMax_f const searchMax = searchMethod ? ZSTD_BtFindBestMatch_selectMLS : ZSTD_HcFindBestMatch_selectMLS;
- U32 offset_1 = ctx->rep[0], offset_2 = ctx->rep[1], savedOffset = 0;
-
- /* init */
- ip += (ip == base);
- ctx->nextToUpdate3 = ctx->nextToUpdate;
- {
- U32 const maxRep = (U32)(ip - base);
- if (offset_2 > maxRep)
- savedOffset = offset_2, offset_2 = 0;
- if (offset_1 > maxRep)
- savedOffset = offset_1, offset_1 = 0;
- }
-
- /* Match Loop */
- while (ip < ilimit) {
- size_t matchLength = 0;
- size_t offset = 0;
- const BYTE *start = ip + 1;
-
- /* check repCode */
- if ((offset_1 > 0) & (ZSTD_read32(ip + 1) == ZSTD_read32(ip + 1 - offset_1))) {
- /* repcode : we take it */
- matchLength = ZSTD_count(ip + 1 + EQUAL_READ32, ip + 1 + EQUAL_READ32 - offset_1, iend) + EQUAL_READ32;
- if (depth == 0)
- goto _storeSequence;
- }
-
- /* first search (depth 0) */
- {
- size_t offsetFound = 99999999;
- size_t const ml2 = searchMax(ctx, ip, iend, &offsetFound, maxSearches, mls);
- if (ml2 > matchLength)
- matchLength = ml2, start = ip, offset = offsetFound;
- }
-
- if (matchLength < EQUAL_READ32) {
- ip += ((ip - anchor) >> g_searchStrength) + 1; /* jump faster over incompressible sections */
- continue;
- }
-
- /* let's try to find a better solution */
- if (depth >= 1)
- while (ip < ilimit) {
- ip++;
- if ((offset) && ((offset_1 > 0) & (ZSTD_read32(ip) == ZSTD_read32(ip - offset_1)))) {
- size_t const mlRep = ZSTD_count(ip + EQUAL_READ32, ip + EQUAL_READ32 - offset_1, iend) + EQUAL_READ32;
- int const gain2 = (int)(mlRep * 3);
- int const gain1 = (int)(matchLength * 3 - ZSTD_highbit32((U32)offset + 1) + 1);
- if ((mlRep >= EQUAL_READ32) && (gain2 > gain1))
- matchLength = mlRep, offset = 0, start = ip;
- }
- {
- size_t offset2 = 99999999;
- size_t const ml2 = searchMax(ctx, ip, iend, &offset2, maxSearches, mls);
- int const gain2 = (int)(ml2 * 4 - ZSTD_highbit32((U32)offset2 + 1)); /* raw approx */
- int const gain1 = (int)(matchLength * 4 - ZSTD_highbit32((U32)offset + 1) + 4);
- if ((ml2 >= EQUAL_READ32) && (gain2 > gain1)) {
- matchLength = ml2, offset = offset2, start = ip;
- continue; /* search a better one */
- }
- }
-
- /* let's find an even better one */
- if ((depth == 2) && (ip < ilimit)) {
- ip++;
- if ((offset) && ((offset_1 > 0) & (ZSTD_read32(ip) == ZSTD_read32(ip - offset_1)))) {
- size_t const ml2 = ZSTD_count(ip + EQUAL_READ32, ip + EQUAL_READ32 - offset_1, iend) + EQUAL_READ32;
- int const gain2 = (int)(ml2 * 4);
- int const gain1 = (int)(matchLength * 4 - ZSTD_highbit32((U32)offset + 1) + 1);
- if ((ml2 >= EQUAL_READ32) && (gain2 > gain1))
- matchLength = ml2, offset = 0, start = ip;
- }
- {
- size_t offset2 = 99999999;
- size_t const ml2 = searchMax(ctx, ip, iend, &offset2, maxSearches, mls);
- int const gain2 = (int)(ml2 * 4 - ZSTD_highbit32((U32)offset2 + 1)); /* raw approx */
- int const gain1 = (int)(matchLength * 4 - ZSTD_highbit32((U32)offset + 1) + 7);
- if ((ml2 >= EQUAL_READ32) && (gain2 > gain1)) {
- matchLength = ml2, offset = offset2, start = ip;
- continue;
- }
- }
- }
- break; /* nothing found : store previous solution */
- }
-
- /* NOTE:
- * start[-offset+ZSTD_REP_MOVE-1] is undefined behavior.
- * (-offset+ZSTD_REP_MOVE-1) is unsigned, and is added to start, which
- * overflows the pointer, which is undefined behavior.
- */
- /* catch up */
- if (offset) {
- while ((start > anchor) && (start > base + offset - ZSTD_REP_MOVE) &&
- (start[-1] == (start-offset+ZSTD_REP_MOVE)[-1])) /* only search for offset within prefix */
- {
- start--;
- matchLength++;
- }
- offset_2 = offset_1;
- offset_1 = (U32)(offset - ZSTD_REP_MOVE);
- }
-
- /* store sequence */
-_storeSequence:
- {
- size_t const litLength = start - anchor;
- ZSTD_storeSeq(seqStorePtr, litLength, anchor, (U32)offset, matchLength - MINMATCH);
- anchor = ip = start + matchLength;
- }
-
- /* check immediate repcode */
- while ((ip <= ilimit) && ((offset_2 > 0) & (ZSTD_read32(ip) == ZSTD_read32(ip - offset_2)))) {
- /* store sequence */
- matchLength = ZSTD_count(ip + EQUAL_READ32, ip + EQUAL_READ32 - offset_2, iend) + EQUAL_READ32;
- offset = offset_2;
- offset_2 = offset_1;
- offset_1 = (U32)offset; /* swap repcodes */
- ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, matchLength - MINMATCH);
- ip += matchLength;
- anchor = ip;
- continue; /* faster when present ... (?) */
- }
- }
-
- /* Save reps for next block */
- ctx->repToConfirm[0] = offset_1 ? offset_1 : savedOffset;
- ctx->repToConfirm[1] = offset_2 ? offset_2 : savedOffset;
-
- /* Last Literals */
- {
- size_t const lastLLSize = iend - anchor;
- memcpy(seqStorePtr->lit, anchor, lastLLSize);
- seqStorePtr->lit += lastLLSize;
- }
-}
-
-static void ZSTD_compressBlock_btlazy2(ZSTD_CCtx *ctx, const void *src, size_t srcSize) { ZSTD_compressBlock_lazy_generic(ctx, src, srcSize, 1, 2); }
-
-static void ZSTD_compressBlock_lazy2(ZSTD_CCtx *ctx, const void *src, size_t srcSize) { ZSTD_compressBlock_lazy_generic(ctx, src, srcSize, 0, 2); }
-
-static void ZSTD_compressBlock_lazy(ZSTD_CCtx *ctx, const void *src, size_t srcSize) { ZSTD_compressBlock_lazy_generic(ctx, src, srcSize, 0, 1); }
-
-static void ZSTD_compressBlock_greedy(ZSTD_CCtx *ctx, const void *src, size_t srcSize) { ZSTD_compressBlock_lazy_generic(ctx, src, srcSize, 0, 0); }
-
-FORCE_INLINE
-void ZSTD_compressBlock_lazy_extDict_generic(ZSTD_CCtx *ctx, const void *src, size_t srcSize, const U32 searchMethod, const U32 depth)
-{
- seqStore_t *seqStorePtr = &(ctx->seqStore);
- const BYTE *const istart = (const BYTE *)src;
- const BYTE *ip = istart;
- const BYTE *anchor = istart;
- const BYTE *const iend = istart + srcSize;
- const BYTE *const ilimit = iend - 8;
- const BYTE *const base = ctx->base;
- const U32 dictLimit = ctx->dictLimit;
- const U32 lowestIndex = ctx->lowLimit;
- const BYTE *const prefixStart = base + dictLimit;
- const BYTE *const dictBase = ctx->dictBase;
- const BYTE *const dictEnd = dictBase + dictLimit;
- const BYTE *const dictStart = dictBase + ctx->lowLimit;
-
- const U32 maxSearches = 1 << ctx->params.cParams.searchLog;
- const U32 mls = ctx->params.cParams.searchLength;
-
- typedef size_t (*searchMax_f)(ZSTD_CCtx * zc, const BYTE *ip, const BYTE *iLimit, size_t *offsetPtr, U32 maxNbAttempts, U32 matchLengthSearch);
- searchMax_f searchMax = searchMethod ? ZSTD_BtFindBestMatch_selectMLS_extDict : ZSTD_HcFindBestMatch_extDict_selectMLS;
-
- U32 offset_1 = ctx->rep[0], offset_2 = ctx->rep[1];
-
- /* init */
- ctx->nextToUpdate3 = ctx->nextToUpdate;
- ip += (ip == prefixStart);
-
- /* Match Loop */
- while (ip < ilimit) {
- size_t matchLength = 0;
- size_t offset = 0;
- const BYTE *start = ip + 1;
- U32 curr = (U32)(ip - base);
-
- /* check repCode */
- {
- const U32 repIndex = (U32)(curr + 1 - offset_1);
- const BYTE *const repBase = repIndex < dictLimit ? dictBase : base;
- const BYTE *const repMatch = repBase + repIndex;
- if (((U32)((dictLimit - 1) - repIndex) >= 3) & (repIndex > lowestIndex)) /* intentional overflow */
- if (ZSTD_read32(ip + 1) == ZSTD_read32(repMatch)) {
- /* repcode detected we should take it */
- const BYTE *const repEnd = repIndex < dictLimit ? dictEnd : iend;
- matchLength =
- ZSTD_count_2segments(ip + 1 + EQUAL_READ32, repMatch + EQUAL_READ32, iend, repEnd, prefixStart) + EQUAL_READ32;
- if (depth == 0)
- goto _storeSequence;
- }
- }
-
- /* first search (depth 0) */
- {
- size_t offsetFound = 99999999;
- size_t const ml2 = searchMax(ctx, ip, iend, &offsetFound, maxSearches, mls);
- if (ml2 > matchLength)
- matchLength = ml2, start = ip, offset = offsetFound;
- }
-
- if (matchLength < EQUAL_READ32) {
- ip += ((ip - anchor) >> g_searchStrength) + 1; /* jump faster over incompressible sections */
- continue;
- }
-
- /* let's try to find a better solution */
- if (depth >= 1)
- while (ip < ilimit) {
- ip++;
- curr++;
- /* check repCode */
- if (offset) {
- const U32 repIndex = (U32)(curr - offset_1);
- const BYTE *const repBase = repIndex < dictLimit ? dictBase : base;
- const BYTE *const repMatch = repBase + repIndex;
- if (((U32)((dictLimit - 1) - repIndex) >= 3) & (repIndex > lowestIndex)) /* intentional overflow */
- if (ZSTD_read32(ip) == ZSTD_read32(repMatch)) {
- /* repcode detected */
- const BYTE *const repEnd = repIndex < dictLimit ? dictEnd : iend;
- size_t const repLength =
- ZSTD_count_2segments(ip + EQUAL_READ32, repMatch + EQUAL_READ32, iend, repEnd, prefixStart) +
- EQUAL_READ32;
- int const gain2 = (int)(repLength * 3);
- int const gain1 = (int)(matchLength * 3 - ZSTD_highbit32((U32)offset + 1) + 1);
- if ((repLength >= EQUAL_READ32) && (gain2 > gain1))
- matchLength = repLength, offset = 0, start = ip;
- }
- }
-
- /* search match, depth 1 */
- {
- size_t offset2 = 99999999;
- size_t const ml2 = searchMax(ctx, ip, iend, &offset2, maxSearches, mls);
- int const gain2 = (int)(ml2 * 4 - ZSTD_highbit32((U32)offset2 + 1)); /* raw approx */
- int const gain1 = (int)(matchLength * 4 - ZSTD_highbit32((U32)offset + 1) + 4);
- if ((ml2 >= EQUAL_READ32) && (gain2 > gain1)) {
- matchLength = ml2, offset = offset2, start = ip;
- continue; /* search a better one */
- }
- }
-
- /* let's find an even better one */
- if ((depth == 2) && (ip < ilimit)) {
- ip++;
- curr++;
- /* check repCode */
- if (offset) {
- const U32 repIndex = (U32)(curr - offset_1);
- const BYTE *const repBase = repIndex < dictLimit ? dictBase : base;
- const BYTE *const repMatch = repBase + repIndex;
- if (((U32)((dictLimit - 1) - repIndex) >= 3) & (repIndex > lowestIndex)) /* intentional overflow */
- if (ZSTD_read32(ip) == ZSTD_read32(repMatch)) {
- /* repcode detected */
- const BYTE *const repEnd = repIndex < dictLimit ? dictEnd : iend;
- size_t repLength = ZSTD_count_2segments(ip + EQUAL_READ32, repMatch + EQUAL_READ32, iend,
- repEnd, prefixStart) +
- EQUAL_READ32;
- int gain2 = (int)(repLength * 4);
- int gain1 = (int)(matchLength * 4 - ZSTD_highbit32((U32)offset + 1) + 1);
- if ((repLength >= EQUAL_READ32) && (gain2 > gain1))
- matchLength = repLength, offset = 0, start = ip;
- }
- }
-
- /* search match, depth 2 */
- {
- size_t offset2 = 99999999;
- size_t const ml2 = searchMax(ctx, ip, iend, &offset2, maxSearches, mls);
- int const gain2 = (int)(ml2 * 4 - ZSTD_highbit32((U32)offset2 + 1)); /* raw approx */
- int const gain1 = (int)(matchLength * 4 - ZSTD_highbit32((U32)offset + 1) + 7);
- if ((ml2 >= EQUAL_READ32) && (gain2 > gain1)) {
- matchLength = ml2, offset = offset2, start = ip;
- continue;
- }
- }
- }
- break; /* nothing found : store previous solution */
- }
-
- /* catch up */
- if (offset) {
- U32 const matchIndex = (U32)((start - base) - (offset - ZSTD_REP_MOVE));
- const BYTE *match = (matchIndex < dictLimit) ? dictBase + matchIndex : base + matchIndex;
- const BYTE *const mStart = (matchIndex < dictLimit) ? dictStart : prefixStart;
- while ((start > anchor) && (match > mStart) && (start[-1] == match[-1])) {
- start--;
- match--;
- matchLength++;
- } /* catch up */
- offset_2 = offset_1;
- offset_1 = (U32)(offset - ZSTD_REP_MOVE);
- }
-
- /* store sequence */
- _storeSequence : {
- size_t const litLength = start - anchor;
- ZSTD_storeSeq(seqStorePtr, litLength, anchor, (U32)offset, matchLength - MINMATCH);
- anchor = ip = start + matchLength;
- }
-
- /* check immediate repcode */
- while (ip <= ilimit) {
- const U32 repIndex = (U32)((ip - base) - offset_2);
- const BYTE *const repBase = repIndex < dictLimit ? dictBase : base;
- const BYTE *const repMatch = repBase + repIndex;
- if (((U32)((dictLimit - 1) - repIndex) >= 3) & (repIndex > lowestIndex)) /* intentional overflow */
- if (ZSTD_read32(ip) == ZSTD_read32(repMatch)) {
- /* repcode detected we should take it */
- const BYTE *const repEnd = repIndex < dictLimit ? dictEnd : iend;
- matchLength =
- ZSTD_count_2segments(ip + EQUAL_READ32, repMatch + EQUAL_READ32, iend, repEnd, prefixStart) + EQUAL_READ32;
- offset = offset_2;
- offset_2 = offset_1;
- offset_1 = (U32)offset; /* swap offset history */
- ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, matchLength - MINMATCH);
- ip += matchLength;
- anchor = ip;
- continue; /* faster when present ... (?) */
- }
- break;
- }
- }
-
- /* Save reps for next block */
- ctx->repToConfirm[0] = offset_1;
- ctx->repToConfirm[1] = offset_2;
-
- /* Last Literals */
- {
- size_t const lastLLSize = iend - anchor;
- memcpy(seqStorePtr->lit, anchor, lastLLSize);
- seqStorePtr->lit += lastLLSize;
- }
-}
-
-void ZSTD_compressBlock_greedy_extDict(ZSTD_CCtx *ctx, const void *src, size_t srcSize) { ZSTD_compressBlock_lazy_extDict_generic(ctx, src, srcSize, 0, 0); }
-
-static void ZSTD_compressBlock_lazy_extDict(ZSTD_CCtx *ctx, const void *src, size_t srcSize)
-{
- ZSTD_compressBlock_lazy_extDict_generic(ctx, src, srcSize, 0, 1);
-}
-
-static void ZSTD_compressBlock_lazy2_extDict(ZSTD_CCtx *ctx, const void *src, size_t srcSize)
-{
- ZSTD_compressBlock_lazy_extDict_generic(ctx, src, srcSize, 0, 2);
-}
-
-static void ZSTD_compressBlock_btlazy2_extDict(ZSTD_CCtx *ctx, const void *src, size_t srcSize)
-{
- ZSTD_compressBlock_lazy_extDict_generic(ctx, src, srcSize, 1, 2);
-}
-
-/* The optimal parser */
-#include "zstd_opt.h"
-
-static void ZSTD_compressBlock_btopt(ZSTD_CCtx *ctx, const void *src, size_t srcSize)
-{
-#ifdef ZSTD_OPT_H_91842398743
- ZSTD_compressBlock_opt_generic(ctx, src, srcSize, 0);
-#else
- (void)ctx;
- (void)src;
- (void)srcSize;
- return;
-#endif
-}
-
-static void ZSTD_compressBlock_btopt2(ZSTD_CCtx *ctx, const void *src, size_t srcSize)
-{
-#ifdef ZSTD_OPT_H_91842398743
- ZSTD_compressBlock_opt_generic(ctx, src, srcSize, 1);
-#else
- (void)ctx;
- (void)src;
- (void)srcSize;
- return;
-#endif
-}
-
-static void ZSTD_compressBlock_btopt_extDict(ZSTD_CCtx *ctx, const void *src, size_t srcSize)
-{
-#ifdef ZSTD_OPT_H_91842398743
- ZSTD_compressBlock_opt_extDict_generic(ctx, src, srcSize, 0);
-#else
- (void)ctx;
- (void)src;
- (void)srcSize;
- return;
-#endif
-}
-
-static void ZSTD_compressBlock_btopt2_extDict(ZSTD_CCtx *ctx, const void *src, size_t srcSize)
-{
-#ifdef ZSTD_OPT_H_91842398743
- ZSTD_compressBlock_opt_extDict_generic(ctx, src, srcSize, 1);
-#else
- (void)ctx;
- (void)src;
- (void)srcSize;
- return;
-#endif
-}
-
-typedef void (*ZSTD_blockCompressor)(ZSTD_CCtx *ctx, const void *src, size_t srcSize);
-
-static ZSTD_blockCompressor ZSTD_selectBlockCompressor(ZSTD_strategy strat, int extDict)
-{
- static const ZSTD_blockCompressor blockCompressor[2][8] = {
- {ZSTD_compressBlock_fast, ZSTD_compressBlock_doubleFast, ZSTD_compressBlock_greedy, ZSTD_compressBlock_lazy, ZSTD_compressBlock_lazy2,
- ZSTD_compressBlock_btlazy2, ZSTD_compressBlock_btopt, ZSTD_compressBlock_btopt2},
- {ZSTD_compressBlock_fast_extDict, ZSTD_compressBlock_doubleFast_extDict, ZSTD_compressBlock_greedy_extDict, ZSTD_compressBlock_lazy_extDict,
- ZSTD_compressBlock_lazy2_extDict, ZSTD_compressBlock_btlazy2_extDict, ZSTD_compressBlock_btopt_extDict, ZSTD_compressBlock_btopt2_extDict}};
-
- return blockCompressor[extDict][(U32)strat];
-}
-
-static size_t ZSTD_compressBlock_internal(ZSTD_CCtx *zc, void *dst, size_t dstCapacity, const void *src, size_t srcSize)
-{
- ZSTD_blockCompressor const blockCompressor = ZSTD_selectBlockCompressor(zc->params.cParams.strategy, zc->lowLimit < zc->dictLimit);
- const BYTE *const base = zc->base;
- const BYTE *const istart = (const BYTE *)src;
- const U32 curr = (U32)(istart - base);
- if (srcSize < MIN_CBLOCK_SIZE + ZSTD_blockHeaderSize + 1)
- return 0; /* don't even attempt compression below a certain srcSize */
- ZSTD_resetSeqStore(&(zc->seqStore));
- if (curr > zc->nextToUpdate + 384)
- zc->nextToUpdate = curr - MIN(192, (U32)(curr - zc->nextToUpdate - 384)); /* update tree not updated after finding very long rep matches */
- blockCompressor(zc, src, srcSize);
- return ZSTD_compressSequences(zc, dst, dstCapacity, srcSize);
-}
-
-/*! ZSTD_compress_generic() :
-* Compress a chunk of data into one or multiple blocks.
-* All blocks will be terminated, all input will be consumed.
-* Function will issue an error if there is not enough `dstCapacity` to hold the compressed content.
-* Frame is supposed already started (header already produced)
-* @return : compressed size, or an error code
-*/
-static size_t ZSTD_compress_generic(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize, U32 lastFrameChunk)
-{
- size_t blockSize = cctx->blockSize;
- size_t remaining = srcSize;
- const BYTE *ip = (const BYTE *)src;
- BYTE *const ostart = (BYTE *)dst;
- BYTE *op = ostart;
- U32 const maxDist = 1 << cctx->params.cParams.windowLog;
-
- if (cctx->params.fParams.checksumFlag && srcSize)
- xxh64_update(&cctx->xxhState, src, srcSize);
-
- while (remaining) {
- U32 const lastBlock = lastFrameChunk & (blockSize >= remaining);
- size_t cSize;
-
- if (dstCapacity < ZSTD_blockHeaderSize + MIN_CBLOCK_SIZE)
- return ERROR(dstSize_tooSmall); /* not enough space to store compressed block */
- if (remaining < blockSize)
- blockSize = remaining;
-
- /* preemptive overflow correction */
- if (cctx->lowLimit > (3U << 29)) {
- U32 const cycleMask = (1 << ZSTD_cycleLog(cctx->params.cParams.hashLog, cctx->params.cParams.strategy)) - 1;
- U32 const curr = (U32)(ip - cctx->base);
- U32 const newCurr = (curr & cycleMask) + (1 << cctx->params.cParams.windowLog);
- U32 const correction = curr - newCurr;
- ZSTD_STATIC_ASSERT(ZSTD_WINDOWLOG_MAX_64 <= 30);
- ZSTD_reduceIndex(cctx, correction);
- cctx->base += correction;
- cctx->dictBase += correction;
- cctx->lowLimit -= correction;
- cctx->dictLimit -= correction;
- if (cctx->nextToUpdate < correction)
- cctx->nextToUpdate = 0;
- else
- cctx->nextToUpdate -= correction;
- }
-
- if ((U32)(ip + blockSize - cctx->base) > cctx->loadedDictEnd + maxDist) {
- /* enforce maxDist */
- U32 const newLowLimit = (U32)(ip + blockSize - cctx->base) - maxDist;
- if (cctx->lowLimit < newLowLimit)
- cctx->lowLimit = newLowLimit;
- if (cctx->dictLimit < cctx->lowLimit)
- cctx->dictLimit = cctx->lowLimit;
- }
-
- cSize = ZSTD_compressBlock_internal(cctx, op + ZSTD_blockHeaderSize, dstCapacity - ZSTD_blockHeaderSize, ip, blockSize);
- if (ZSTD_isError(cSize))
- return cSize;
-
- if (cSize == 0) { /* block is not compressible */
- U32 const cBlockHeader24 = lastBlock + (((U32)bt_raw) << 1) + (U32)(blockSize << 3);
- if (blockSize + ZSTD_blockHeaderSize > dstCapacity)
- return ERROR(dstSize_tooSmall);
- ZSTD_writeLE32(op, cBlockHeader24); /* no pb, 4th byte will be overwritten */
- memcpy(op + ZSTD_blockHeaderSize, ip, blockSize);
- cSize = ZSTD_blockHeaderSize + blockSize;
- } else {
- U32 const cBlockHeader24 = lastBlock + (((U32)bt_compressed) << 1) + (U32)(cSize << 3);
- ZSTD_writeLE24(op, cBlockHeader24);
- cSize += ZSTD_blockHeaderSize;
- }
-
- remaining -= blockSize;
- dstCapacity -= cSize;
- ip += blockSize;
- op += cSize;
- }
-
- if (lastFrameChunk && (op > ostart))
- cctx->stage = ZSTDcs_ending;
- return op - ostart;
-}
-
-static size_t ZSTD_writeFrameHeader(void *dst, size_t dstCapacity, ZSTD_parameters params, U64 pledgedSrcSize, U32 dictID)
-{
- BYTE *const op = (BYTE *)dst;
- U32 const dictIDSizeCode = (dictID > 0) + (dictID >= 256) + (dictID >= 65536); /* 0-3 */
- U32 const checksumFlag = params.fParams.checksumFlag > 0;
- U32 const windowSize = 1U << params.cParams.windowLog;
- U32 const singleSegment = params.fParams.contentSizeFlag && (windowSize >= pledgedSrcSize);
- BYTE const windowLogByte = (BYTE)((params.cParams.windowLog - ZSTD_WINDOWLOG_ABSOLUTEMIN) << 3);
- U32 const fcsCode =
- params.fParams.contentSizeFlag ? (pledgedSrcSize >= 256) + (pledgedSrcSize >= 65536 + 256) + (pledgedSrcSize >= 0xFFFFFFFFU) : 0; /* 0-3 */
- BYTE const frameHeaderDecriptionByte = (BYTE)(dictIDSizeCode + (checksumFlag << 2) + (singleSegment << 5) + (fcsCode << 6));
- size_t pos;
-
- if (dstCapacity < ZSTD_frameHeaderSize_max)
- return ERROR(dstSize_tooSmall);
-
- ZSTD_writeLE32(dst, ZSTD_MAGICNUMBER);
- op[4] = frameHeaderDecriptionByte;
- pos = 5;
- if (!singleSegment)
- op[pos++] = windowLogByte;
- switch (dictIDSizeCode) {
- default: /* impossible */
- case 0: break;
- case 1:
- op[pos] = (BYTE)(dictID);
- pos++;
- break;
- case 2:
- ZSTD_writeLE16(op + pos, (U16)dictID);
- pos += 2;
- break;
- case 3:
- ZSTD_writeLE32(op + pos, dictID);
- pos += 4;
- break;
- }
- switch (fcsCode) {
- default: /* impossible */
- case 0:
- if (singleSegment)
- op[pos++] = (BYTE)(pledgedSrcSize);
- break;
- case 1:
- ZSTD_writeLE16(op + pos, (U16)(pledgedSrcSize - 256));
- pos += 2;
- break;
- case 2:
- ZSTD_writeLE32(op + pos, (U32)(pledgedSrcSize));
- pos += 4;
- break;
- case 3:
- ZSTD_writeLE64(op + pos, (U64)(pledgedSrcSize));
- pos += 8;
- break;
- }
- return pos;
-}
-
-static size_t ZSTD_compressContinue_internal(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize, U32 frame, U32 lastFrameChunk)
-{
- const BYTE *const ip = (const BYTE *)src;
- size_t fhSize = 0;
-
- if (cctx->stage == ZSTDcs_created)
- return ERROR(stage_wrong); /* missing init (ZSTD_compressBegin) */
-
- if (frame && (cctx->stage == ZSTDcs_init)) {
- fhSize = ZSTD_writeFrameHeader(dst, dstCapacity, cctx->params, cctx->frameContentSize, cctx->dictID);
- if (ZSTD_isError(fhSize))
- return fhSize;
- dstCapacity -= fhSize;
- dst = (char *)dst + fhSize;
- cctx->stage = ZSTDcs_ongoing;
- }
-
- /* Check if blocks follow each other */
- if (src != cctx->nextSrc) {
- /* not contiguous */
- ptrdiff_t const delta = cctx->nextSrc - ip;
- cctx->lowLimit = cctx->dictLimit;
- cctx->dictLimit = (U32)(cctx->nextSrc - cctx->base);
- cctx->dictBase = cctx->base;
- cctx->base -= delta;
- cctx->nextToUpdate = cctx->dictLimit;
- if (cctx->dictLimit - cctx->lowLimit < HASH_READ_SIZE)
- cctx->lowLimit = cctx->dictLimit; /* too small extDict */
- }
-
- /* if input and dictionary overlap : reduce dictionary (area presumed modified by input) */
- if ((ip + srcSize > cctx->dictBase + cctx->lowLimit) & (ip < cctx->dictBase + cctx->dictLimit)) {
- ptrdiff_t const highInputIdx = (ip + srcSize) - cctx->dictBase;
- U32 const lowLimitMax = (highInputIdx > (ptrdiff_t)cctx->dictLimit) ? cctx->dictLimit : (U32)highInputIdx;
- cctx->lowLimit = lowLimitMax;
- }
-
- cctx->nextSrc = ip + srcSize;
-
- if (srcSize) {
- size_t const cSize = frame ? ZSTD_compress_generic(cctx, dst, dstCapacity, src, srcSize, lastFrameChunk)
- : ZSTD_compressBlock_internal(cctx, dst, dstCapacity, src, srcSize);
- if (ZSTD_isError(cSize))
- return cSize;
- return cSize + fhSize;
- } else
- return fhSize;
-}
-
-size_t ZSTD_compressContinue(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize)
-{
- return ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 1, 0);
-}
-
-size_t ZSTD_getBlockSizeMax(ZSTD_CCtx *cctx) { return MIN(ZSTD_BLOCKSIZE_ABSOLUTEMAX, 1 << cctx->params.cParams.windowLog); }
-
-size_t ZSTD_compressBlock(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize)
-{
- size_t const blockSizeMax = ZSTD_getBlockSizeMax(cctx);
- if (srcSize > blockSizeMax)
- return ERROR(srcSize_wrong);
- return ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 0, 0);
-}
-
-/*! ZSTD_loadDictionaryContent() :
- * @return : 0, or an error code
- */
-static size_t ZSTD_loadDictionaryContent(ZSTD_CCtx *zc, const void *src, size_t srcSize)
-{
- const BYTE *const ip = (const BYTE *)src;
- const BYTE *const iend = ip + srcSize;
-
- /* input becomes curr prefix */
- zc->lowLimit = zc->dictLimit;
- zc->dictLimit = (U32)(zc->nextSrc - zc->base);
- zc->dictBase = zc->base;
- zc->base += ip - zc->nextSrc;
- zc->nextToUpdate = zc->dictLimit;
- zc->loadedDictEnd = zc->forceWindow ? 0 : (U32)(iend - zc->base);
-
- zc->nextSrc = iend;
- if (srcSize <= HASH_READ_SIZE)
- return 0;
-
- switch (zc->params.cParams.strategy) {
- case ZSTD_fast: ZSTD_fillHashTable(zc, iend, zc->params.cParams.searchLength); break;
-
- case ZSTD_dfast: ZSTD_fillDoubleHashTable(zc, iend, zc->params.cParams.searchLength); break;
-
- case ZSTD_greedy:
- case ZSTD_lazy:
- case ZSTD_lazy2:
- if (srcSize >= HASH_READ_SIZE)
- ZSTD_insertAndFindFirstIndex(zc, iend - HASH_READ_SIZE, zc->params.cParams.searchLength);
- break;
-
- case ZSTD_btlazy2:
- case ZSTD_btopt:
- case ZSTD_btopt2:
- if (srcSize >= HASH_READ_SIZE)
- ZSTD_updateTree(zc, iend - HASH_READ_SIZE, iend, 1 << zc->params.cParams.searchLog, zc->params.cParams.searchLength);
- break;
-
- default:
- return ERROR(GENERIC); /* strategy doesn't exist; impossible */
- }
-
- zc->nextToUpdate = (U32)(iend - zc->base);
- return 0;
-}
-
-/* Dictionaries that assign zero probability to symbols that show up causes problems
- when FSE encoding. Refuse dictionaries that assign zero probability to symbols
- that we may encounter during compression.
- NOTE: This behavior is not standard and could be improved in the future. */
-static size_t ZSTD_checkDictNCount(short *normalizedCounter, unsigned dictMaxSymbolValue, unsigned maxSymbolValue)
-{
- U32 s;
- if (dictMaxSymbolValue < maxSymbolValue)
- return ERROR(dictionary_corrupted);
- for (s = 0; s <= maxSymbolValue; ++s) {
- if (normalizedCounter[s] == 0)
- return ERROR(dictionary_corrupted);
- }
- return 0;
-}
-
-/* Dictionary format :
- * See :
- * https://github.com/facebook/zstd/blob/master/doc/zstd_compression_format.md#dictionary-format
- */
-/*! ZSTD_loadZstdDictionary() :
- * @return : 0, or an error code
- * assumptions : magic number supposed already checked
- * dictSize supposed > 8
- */
-static size_t ZSTD_loadZstdDictionary(ZSTD_CCtx *cctx, const void *dict, size_t dictSize)
-{
- const BYTE *dictPtr = (const BYTE *)dict;
- const BYTE *const dictEnd = dictPtr + dictSize;
- short offcodeNCount[MaxOff + 1];
- unsigned offcodeMaxValue = MaxOff;
-
- dictPtr += 4; /* skip magic number */
- cctx->dictID = cctx->params.fParams.noDictIDFlag ? 0 : ZSTD_readLE32(dictPtr);
- dictPtr += 4;
-
- {
- size_t const hufHeaderSize = HUF_readCTable_wksp(cctx->hufTable, 255, dictPtr, dictEnd - dictPtr, cctx->tmpCounters, sizeof(cctx->tmpCounters));
- if (HUF_isError(hufHeaderSize))
- return ERROR(dictionary_corrupted);
- dictPtr += hufHeaderSize;
- }
-
- {
- unsigned offcodeLog;
- size_t const offcodeHeaderSize = FSE_readNCount(offcodeNCount, &offcodeMaxValue, &offcodeLog, dictPtr, dictEnd - dictPtr);
- if (FSE_isError(offcodeHeaderSize))
- return ERROR(dictionary_corrupted);
- if (offcodeLog > OffFSELog)
- return ERROR(dictionary_corrupted);
- /* Defer checking offcodeMaxValue because we need to know the size of the dictionary content */
- CHECK_E(FSE_buildCTable_wksp(cctx->offcodeCTable, offcodeNCount, offcodeMaxValue, offcodeLog, cctx->tmpCounters, sizeof(cctx->tmpCounters)),
- dictionary_corrupted);
- dictPtr += offcodeHeaderSize;
- }
-
- {
- short matchlengthNCount[MaxML + 1];
- unsigned matchlengthMaxValue = MaxML, matchlengthLog;
- size_t const matchlengthHeaderSize = FSE_readNCount(matchlengthNCount, &matchlengthMaxValue, &matchlengthLog, dictPtr, dictEnd - dictPtr);
- if (FSE_isError(matchlengthHeaderSize))
- return ERROR(dictionary_corrupted);
- if (matchlengthLog > MLFSELog)
- return ERROR(dictionary_corrupted);
- /* Every match length code must have non-zero probability */
- CHECK_F(ZSTD_checkDictNCount(matchlengthNCount, matchlengthMaxValue, MaxML));
- CHECK_E(
- FSE_buildCTable_wksp(cctx->matchlengthCTable, matchlengthNCount, matchlengthMaxValue, matchlengthLog, cctx->tmpCounters, sizeof(cctx->tmpCounters)),
- dictionary_corrupted);
- dictPtr += matchlengthHeaderSize;
- }
-
- {
- short litlengthNCount[MaxLL + 1];
- unsigned litlengthMaxValue = MaxLL, litlengthLog;
- size_t const litlengthHeaderSize = FSE_readNCount(litlengthNCount, &litlengthMaxValue, &litlengthLog, dictPtr, dictEnd - dictPtr);
- if (FSE_isError(litlengthHeaderSize))
- return ERROR(dictionary_corrupted);
- if (litlengthLog > LLFSELog)
- return ERROR(dictionary_corrupted);
- /* Every literal length code must have non-zero probability */
- CHECK_F(ZSTD_checkDictNCount(litlengthNCount, litlengthMaxValue, MaxLL));
- CHECK_E(FSE_buildCTable_wksp(cctx->litlengthCTable, litlengthNCount, litlengthMaxValue, litlengthLog, cctx->tmpCounters, sizeof(cctx->tmpCounters)),
- dictionary_corrupted);
- dictPtr += litlengthHeaderSize;
- }
-
- if (dictPtr + 12 > dictEnd)
- return ERROR(dictionary_corrupted);
- cctx->rep[0] = ZSTD_readLE32(dictPtr + 0);
- cctx->rep[1] = ZSTD_readLE32(dictPtr + 4);
- cctx->rep[2] = ZSTD_readLE32(dictPtr + 8);
- dictPtr += 12;
-
- {
- size_t const dictContentSize = (size_t)(dictEnd - dictPtr);
- U32 offcodeMax = MaxOff;
- if (dictContentSize <= ((U32)-1) - 128 KB) {
- U32 const maxOffset = (U32)dictContentSize + 128 KB; /* The maximum offset that must be supported */
- offcodeMax = ZSTD_highbit32(maxOffset); /* Calculate minimum offset code required to represent maxOffset */
- }
- /* All offset values <= dictContentSize + 128 KB must be representable */
- CHECK_F(ZSTD_checkDictNCount(offcodeNCount, offcodeMaxValue, MIN(offcodeMax, MaxOff)));
- /* All repCodes must be <= dictContentSize and != 0*/
- {
- U32 u;
- for (u = 0; u < 3; u++) {
- if (cctx->rep[u] == 0)
- return ERROR(dictionary_corrupted);
- if (cctx->rep[u] > dictContentSize)
- return ERROR(dictionary_corrupted);
- }
- }
-
- cctx->flagStaticTables = 1;
- cctx->flagStaticHufTable = HUF_repeat_valid;
- return ZSTD_loadDictionaryContent(cctx, dictPtr, dictContentSize);
- }
-}
-
-/** ZSTD_compress_insertDictionary() :
-* @return : 0, or an error code */
-static size_t ZSTD_compress_insertDictionary(ZSTD_CCtx *cctx, const void *dict, size_t dictSize)
-{
- if ((dict == NULL) || (dictSize <= 8))
- return 0;
-
- /* dict as pure content */
- if ((ZSTD_readLE32(dict) != ZSTD_DICT_MAGIC) || (cctx->forceRawDict))
- return ZSTD_loadDictionaryContent(cctx, dict, dictSize);
-
- /* dict as zstd dictionary */
- return ZSTD_loadZstdDictionary(cctx, dict, dictSize);
-}
-
-/*! ZSTD_compressBegin_internal() :
-* @return : 0, or an error code */
-static size_t ZSTD_compressBegin_internal(ZSTD_CCtx *cctx, const void *dict, size_t dictSize, ZSTD_parameters params, U64 pledgedSrcSize)
-{
- ZSTD_compResetPolicy_e const crp = dictSize ? ZSTDcrp_fullReset : ZSTDcrp_continue;
- CHECK_F(ZSTD_resetCCtx_advanced(cctx, params, pledgedSrcSize, crp));
- return ZSTD_compress_insertDictionary(cctx, dict, dictSize);
-}
-
-/*! ZSTD_compressBegin_advanced() :
-* @return : 0, or an error code */
-size_t ZSTD_compressBegin_advanced(ZSTD_CCtx *cctx, const void *dict, size_t dictSize, ZSTD_parameters params, unsigned long long pledgedSrcSize)
-{
- /* compression parameters verification and optimization */
- CHECK_F(ZSTD_checkCParams(params.cParams));
- return ZSTD_compressBegin_internal(cctx, dict, dictSize, params, pledgedSrcSize);
-}
-
-size_t ZSTD_compressBegin_usingDict(ZSTD_CCtx *cctx, const void *dict, size_t dictSize, int compressionLevel)
-{
- ZSTD_parameters const params = ZSTD_getParams(compressionLevel, 0, dictSize);
- return ZSTD_compressBegin_internal(cctx, dict, dictSize, params, 0);
-}
-
-size_t ZSTD_compressBegin(ZSTD_CCtx *cctx, int compressionLevel) { return ZSTD_compressBegin_usingDict(cctx, NULL, 0, compressionLevel); }
-
-/*! ZSTD_writeEpilogue() :
-* Ends a frame.
-* @return : nb of bytes written into dst (or an error code) */
-static size_t ZSTD_writeEpilogue(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity)
-{
- BYTE *const ostart = (BYTE *)dst;
- BYTE *op = ostart;
- size_t fhSize = 0;
-
- if (cctx->stage == ZSTDcs_created)
- return ERROR(stage_wrong); /* init missing */
-
- /* special case : empty frame */
- if (cctx->stage == ZSTDcs_init) {
- fhSize = ZSTD_writeFrameHeader(dst, dstCapacity, cctx->params, 0, 0);
- if (ZSTD_isError(fhSize))
- return fhSize;
- dstCapacity -= fhSize;
- op += fhSize;
- cctx->stage = ZSTDcs_ongoing;
- }
-
- if (cctx->stage != ZSTDcs_ending) {
- /* write one last empty block, make it the "last" block */
- U32 const cBlockHeader24 = 1 /* last block */ + (((U32)bt_raw) << 1) + 0;
- if (dstCapacity < 4)
- return ERROR(dstSize_tooSmall);
- ZSTD_writeLE32(op, cBlockHeader24);
- op += ZSTD_blockHeaderSize;
- dstCapacity -= ZSTD_blockHeaderSize;
- }
-
- if (cctx->params.fParams.checksumFlag) {
- U32 const checksum = (U32)xxh64_digest(&cctx->xxhState);
- if (dstCapacity < 4)
- return ERROR(dstSize_tooSmall);
- ZSTD_writeLE32(op, checksum);
- op += 4;
- }
-
- cctx->stage = ZSTDcs_created; /* return to "created but no init" status */
- return op - ostart;
-}
-
-size_t ZSTD_compressEnd(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize)
-{
- size_t endResult;
- size_t const cSize = ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 1, 1);
- if (ZSTD_isError(cSize))
- return cSize;
- endResult = ZSTD_writeEpilogue(cctx, (char *)dst + cSize, dstCapacity - cSize);
- if (ZSTD_isError(endResult))
- return endResult;
- return cSize + endResult;
-}
-
-static size_t ZSTD_compress_internal(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize, const void *dict, size_t dictSize,
- ZSTD_parameters params)
-{
- CHECK_F(ZSTD_compressBegin_internal(cctx, dict, dictSize, params, srcSize));
- return ZSTD_compressEnd(cctx, dst, dstCapacity, src, srcSize);
-}
-
-size_t ZSTD_compress_usingDict(ZSTD_CCtx *ctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize, const void *dict, size_t dictSize,
- ZSTD_parameters params)
-{
- return ZSTD_compress_internal(ctx, dst, dstCapacity, src, srcSize, dict, dictSize, params);
-}
-
-size_t ZSTD_compressCCtx(ZSTD_CCtx *ctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize, ZSTD_parameters params)
-{
- return ZSTD_compress_internal(ctx, dst, dstCapacity, src, srcSize, NULL, 0, params);
-}
-
-/* ===== Dictionary API ===== */
-
-struct ZSTD_CDict_s {
- void *dictBuffer;
- const void *dictContent;
- size_t dictContentSize;
- ZSTD_CCtx *refContext;
-}; /* typedef'd tp ZSTD_CDict within "zstd.h" */
-
-size_t ZSTD_CDictWorkspaceBound(ZSTD_compressionParameters cParams) { return ZSTD_CCtxWorkspaceBound(cParams) + ZSTD_ALIGN(sizeof(ZSTD_CDict)); }
-
-static ZSTD_CDict *ZSTD_createCDict_advanced(const void *dictBuffer, size_t dictSize, unsigned byReference, ZSTD_parameters params, ZSTD_customMem customMem)
-{
- if (!customMem.customAlloc || !customMem.customFree)
- return NULL;
-
- {
- ZSTD_CDict *const cdict = (ZSTD_CDict *)ZSTD_malloc(sizeof(ZSTD_CDict), customMem);
- ZSTD_CCtx *const cctx = ZSTD_createCCtx_advanced(customMem);
-
- if (!cdict || !cctx) {
- ZSTD_free(cdict, customMem);
- ZSTD_freeCCtx(cctx);
- return NULL;
- }
-
- if ((byReference) || (!dictBuffer) || (!dictSize)) {
- cdict->dictBuffer = NULL;
- cdict->dictContent = dictBuffer;
- } else {
- void *const internalBuffer = ZSTD_malloc(dictSize, customMem);
- if (!internalBuffer) {
- ZSTD_free(cctx, customMem);
- ZSTD_free(cdict, customMem);
- return NULL;
- }
- memcpy(internalBuffer, dictBuffer, dictSize);
- cdict->dictBuffer = internalBuffer;
- cdict->dictContent = internalBuffer;
- }
-
- {
- size_t const errorCode = ZSTD_compressBegin_advanced(cctx, cdict->dictContent, dictSize, params, 0);
- if (ZSTD_isError(errorCode)) {
- ZSTD_free(cdict->dictBuffer, customMem);
- ZSTD_free(cdict, customMem);
- ZSTD_freeCCtx(cctx);
- return NULL;
- }
- }
-
- cdict->refContext = cctx;
- cdict->dictContentSize = dictSize;
- return cdict;
- }
-}
-
-ZSTD_CDict *ZSTD_initCDict(const void *dict, size_t dictSize, ZSTD_parameters params, void *workspace, size_t workspaceSize)
-{
- ZSTD_customMem const stackMem = ZSTD_initStack(workspace, workspaceSize);
- return ZSTD_createCDict_advanced(dict, dictSize, 1, params, stackMem);
-}
-
-size_t ZSTD_freeCDict(ZSTD_CDict *cdict)
-{
- if (cdict == NULL)
- return 0; /* support free on NULL */
- {
- ZSTD_customMem const cMem = cdict->refContext->customMem;
- ZSTD_freeCCtx(cdict->refContext);
- ZSTD_free(cdict->dictBuffer, cMem);
- ZSTD_free(cdict, cMem);
- return 0;
- }
-}
-
-static ZSTD_parameters ZSTD_getParamsFromCDict(const ZSTD_CDict *cdict) { return ZSTD_getParamsFromCCtx(cdict->refContext); }
-
-size_t ZSTD_compressBegin_usingCDict(ZSTD_CCtx *cctx, const ZSTD_CDict *cdict, unsigned long long pledgedSrcSize)
-{
- if (cdict->dictContentSize)
- CHECK_F(ZSTD_copyCCtx(cctx, cdict->refContext, pledgedSrcSize))
- else {
- ZSTD_parameters params = cdict->refContext->params;
- params.fParams.contentSizeFlag = (pledgedSrcSize > 0);
- CHECK_F(ZSTD_compressBegin_advanced(cctx, NULL, 0, params, pledgedSrcSize));
- }
- return 0;
-}
-
-/*! ZSTD_compress_usingCDict() :
-* Compression using a digested Dictionary.
-* Faster startup than ZSTD_compress_usingDict(), recommended when same dictionary is used multiple times.
-* Note that compression level is decided during dictionary creation */
-size_t ZSTD_compress_usingCDict(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize, const ZSTD_CDict *cdict)
-{
- CHECK_F(ZSTD_compressBegin_usingCDict(cctx, cdict, srcSize));
-
- if (cdict->refContext->params.fParams.contentSizeFlag == 1) {
- cctx->params.fParams.contentSizeFlag = 1;
- cctx->frameContentSize = srcSize;
- } else {
- cctx->params.fParams.contentSizeFlag = 0;
- }
-
- return ZSTD_compressEnd(cctx, dst, dstCapacity, src, srcSize);
-}
-
-/* ******************************************************************
-* Streaming
-********************************************************************/
-
-typedef enum { zcss_init, zcss_load, zcss_flush, zcss_final } ZSTD_cStreamStage;
-
-struct ZSTD_CStream_s {
- ZSTD_CCtx *cctx;
- ZSTD_CDict *cdictLocal;
- const ZSTD_CDict *cdict;
- char *inBuff;
- size_t inBuffSize;
- size_t inToCompress;
- size_t inBuffPos;
- size_t inBuffTarget;
- size_t blockSize;
- char *outBuff;
- size_t outBuffSize;
- size_t outBuffContentSize;
- size_t outBuffFlushedSize;
- ZSTD_cStreamStage stage;
- U32 checksum;
- U32 frameEnded;
- U64 pledgedSrcSize;
- U64 inputProcessed;
- ZSTD_parameters params;
- ZSTD_customMem customMem;
-}; /* typedef'd to ZSTD_CStream within "zstd.h" */
-
-size_t ZSTD_CStreamWorkspaceBound(ZSTD_compressionParameters cParams)
-{
- size_t const inBuffSize = (size_t)1 << cParams.windowLog;
- size_t const blockSize = MIN(ZSTD_BLOCKSIZE_ABSOLUTEMAX, inBuffSize);
- size_t const outBuffSize = ZSTD_compressBound(blockSize) + 1;
-
- return ZSTD_CCtxWorkspaceBound(cParams) + ZSTD_ALIGN(sizeof(ZSTD_CStream)) + ZSTD_ALIGN(inBuffSize) + ZSTD_ALIGN(outBuffSize);
-}
-
-ZSTD_CStream *ZSTD_createCStream_advanced(ZSTD_customMem customMem)
-{
- ZSTD_CStream *zcs;
-
- if (!customMem.customAlloc || !customMem.customFree)
- return NULL;
-
- zcs = (ZSTD_CStream *)ZSTD_malloc(sizeof(ZSTD_CStream), customMem);
- if (zcs == NULL)
- return NULL;
- memset(zcs, 0, sizeof(ZSTD_CStream));
- memcpy(&zcs->customMem, &customMem, sizeof(ZSTD_customMem));
- zcs->cctx = ZSTD_createCCtx_advanced(customMem);
- if (zcs->cctx == NULL) {
- ZSTD_freeCStream(zcs);
- return NULL;
- }
- return zcs;
-}
-
-size_t ZSTD_freeCStream(ZSTD_CStream *zcs)
-{
- if (zcs == NULL)
- return 0; /* support free on NULL */
- {
- ZSTD_customMem const cMem = zcs->customMem;
- ZSTD_freeCCtx(zcs->cctx);
- zcs->cctx = NULL;
- ZSTD_freeCDict(zcs->cdictLocal);
- zcs->cdictLocal = NULL;
- ZSTD_free(zcs->inBuff, cMem);
- zcs->inBuff = NULL;
- ZSTD_free(zcs->outBuff, cMem);
- zcs->outBuff = NULL;
- ZSTD_free(zcs, cMem);
- return 0;
- }
-}
-
-/*====== Initialization ======*/
-
-size_t ZSTD_CStreamInSize(void) { return ZSTD_BLOCKSIZE_ABSOLUTEMAX; }
-size_t ZSTD_CStreamOutSize(void) { return ZSTD_compressBound(ZSTD_BLOCKSIZE_ABSOLUTEMAX) + ZSTD_blockHeaderSize + 4 /* 32-bits hash */; }
-
-static size_t ZSTD_resetCStream_internal(ZSTD_CStream *zcs, unsigned long long pledgedSrcSize)
-{
- if (zcs->inBuffSize == 0)
- return ERROR(stage_wrong); /* zcs has not been init at least once => can't reset */
-
- if (zcs->cdict)
- CHECK_F(ZSTD_compressBegin_usingCDict(zcs->cctx, zcs->cdict, pledgedSrcSize))
- else
- CHECK_F(ZSTD_compressBegin_advanced(zcs->cctx, NULL, 0, zcs->params, pledgedSrcSize));
-
- zcs->inToCompress = 0;
- zcs->inBuffPos = 0;
- zcs->inBuffTarget = zcs->blockSize;
- zcs->outBuffContentSize = zcs->outBuffFlushedSize = 0;
- zcs->stage = zcss_load;
- zcs->frameEnded = 0;
- zcs->pledgedSrcSize = pledgedSrcSize;
- zcs->inputProcessed = 0;
- return 0; /* ready to go */
-}
-
-size_t ZSTD_resetCStream(ZSTD_CStream *zcs, unsigned long long pledgedSrcSize)
-{
-
- zcs->params.fParams.contentSizeFlag = (pledgedSrcSize > 0);
-
- return ZSTD_resetCStream_internal(zcs, pledgedSrcSize);
-}
-
-static size_t ZSTD_initCStream_advanced(ZSTD_CStream *zcs, const void *dict, size_t dictSize, ZSTD_parameters params, unsigned long long pledgedSrcSize)
-{
- /* allocate buffers */
- {
- size_t const neededInBuffSize = (size_t)1 << params.cParams.windowLog;
- if (zcs->inBuffSize < neededInBuffSize) {
- zcs->inBuffSize = neededInBuffSize;
- ZSTD_free(zcs->inBuff, zcs->customMem);
- zcs->inBuff = (char *)ZSTD_malloc(neededInBuffSize, zcs->customMem);
- if (zcs->inBuff == NULL)
- return ERROR(memory_allocation);
- }
- zcs->blockSize = MIN(ZSTD_BLOCKSIZE_ABSOLUTEMAX, neededInBuffSize);
- }
- if (zcs->outBuffSize < ZSTD_compressBound(zcs->blockSize) + 1) {
- zcs->outBuffSize = ZSTD_compressBound(zcs->blockSize) + 1;
- ZSTD_free(zcs->outBuff, zcs->customMem);
- zcs->outBuff = (char *)ZSTD_malloc(zcs->outBuffSize, zcs->customMem);
- if (zcs->outBuff == NULL)
- return ERROR(memory_allocation);
- }
-
- if (dict && dictSize >= 8) {
- ZSTD_freeCDict(zcs->cdictLocal);
- zcs->cdictLocal = ZSTD_createCDict_advanced(dict, dictSize, 0, params, zcs->customMem);
- if (zcs->cdictLocal == NULL)
- return ERROR(memory_allocation);
- zcs->cdict = zcs->cdictLocal;
- } else
- zcs->cdict = NULL;
-
- zcs->checksum = params.fParams.checksumFlag > 0;
- zcs->params = params;
-
- return ZSTD_resetCStream_internal(zcs, pledgedSrcSize);
-}
-
-ZSTD_CStream *ZSTD_initCStream(ZSTD_parameters params, unsigned long long pledgedSrcSize, void *workspace, size_t workspaceSize)
-{
- ZSTD_customMem const stackMem = ZSTD_initStack(workspace, workspaceSize);
- ZSTD_CStream *const zcs = ZSTD_createCStream_advanced(stackMem);
- if (zcs) {
- size_t const code = ZSTD_initCStream_advanced(zcs, NULL, 0, params, pledgedSrcSize);
- if (ZSTD_isError(code)) {
- return NULL;
- }
- }
- return zcs;
-}
-
-ZSTD_CStream *ZSTD_initCStream_usingCDict(const ZSTD_CDict *cdict, unsigned long long pledgedSrcSize, void *workspace, size_t workspaceSize)
-{
- ZSTD_parameters const params = ZSTD_getParamsFromCDict(cdict);
- ZSTD_CStream *const zcs = ZSTD_initCStream(params, pledgedSrcSize, workspace, workspaceSize);
- if (zcs) {
- zcs->cdict = cdict;
- if (ZSTD_isError(ZSTD_resetCStream_internal(zcs, pledgedSrcSize))) {
- return NULL;
- }
- }
- return zcs;
-}
-
-/*====== Compression ======*/
-
-typedef enum { zsf_gather, zsf_flush, zsf_end } ZSTD_flush_e;
-
-ZSTD_STATIC size_t ZSTD_limitCopy(void *dst, size_t dstCapacity, const void *src, size_t srcSize)
-{
- size_t const length = MIN(dstCapacity, srcSize);
- memcpy(dst, src, length);
- return length;
-}
-
-static size_t ZSTD_compressStream_generic(ZSTD_CStream *zcs, void *dst, size_t *dstCapacityPtr, const void *src, size_t *srcSizePtr, ZSTD_flush_e const flush)
-{
- U32 someMoreWork = 1;
- const char *const istart = (const char *)src;
- const char *const iend = istart + *srcSizePtr;
- const char *ip = istart;
- char *const ostart = (char *)dst;
- char *const oend = ostart + *dstCapacityPtr;
- char *op = ostart;
-
- while (someMoreWork) {
- switch (zcs->stage) {
- case zcss_init:
- return ERROR(init_missing); /* call ZBUFF_compressInit() first ! */
-
- case zcss_load:
- /* complete inBuffer */
- {
- size_t const toLoad = zcs->inBuffTarget - zcs->inBuffPos;
- size_t const loaded = ZSTD_limitCopy(zcs->inBuff + zcs->inBuffPos, toLoad, ip, iend - ip);
- zcs->inBuffPos += loaded;
- ip += loaded;
- if ((zcs->inBuffPos == zcs->inToCompress) || (!flush && (toLoad != loaded))) {
- someMoreWork = 0;
- break; /* not enough input to get a full block : stop there, wait for more */
- }
- }
- /* compress curr block (note : this stage cannot be stopped in the middle) */
- {
- void *cDst;
- size_t cSize;
- size_t const iSize = zcs->inBuffPos - zcs->inToCompress;
- size_t oSize = oend - op;
- if (oSize >= ZSTD_compressBound(iSize))
- cDst = op; /* compress directly into output buffer (avoid flush stage) */
- else
- cDst = zcs->outBuff, oSize = zcs->outBuffSize;
- cSize = (flush == zsf_end) ? ZSTD_compressEnd(zcs->cctx, cDst, oSize, zcs->inBuff + zcs->inToCompress, iSize)
- : ZSTD_compressContinue(zcs->cctx, cDst, oSize, zcs->inBuff + zcs->inToCompress, iSize);
- if (ZSTD_isError(cSize))
- return cSize;
- if (flush == zsf_end)
- zcs->frameEnded = 1;
- /* prepare next block */
- zcs->inBuffTarget = zcs->inBuffPos + zcs->blockSize;
- if (zcs->inBuffTarget > zcs->inBuffSize)
- zcs->inBuffPos = 0, zcs->inBuffTarget = zcs->blockSize; /* note : inBuffSize >= blockSize */
- zcs->inToCompress = zcs->inBuffPos;
- if (cDst == op) {
- op += cSize;
- break;
- } /* no need to flush */
- zcs->outBuffContentSize = cSize;
- zcs->outBuffFlushedSize = 0;
- zcs->stage = zcss_flush; /* pass-through to flush stage */
- }
- /* fall through */
-
- case zcss_flush: {
- size_t const toFlush = zcs->outBuffContentSize - zcs->outBuffFlushedSize;
- size_t const flushed = ZSTD_limitCopy(op, oend - op, zcs->outBuff + zcs->outBuffFlushedSize, toFlush);
- op += flushed;
- zcs->outBuffFlushedSize += flushed;
- if (toFlush != flushed) {
- someMoreWork = 0;
- break;
- } /* dst too small to store flushed data : stop there */
- zcs->outBuffContentSize = zcs->outBuffFlushedSize = 0;
- zcs->stage = zcss_load;
- break;
- }
-
- case zcss_final:
- someMoreWork = 0; /* do nothing */
- break;
-
- default:
- return ERROR(GENERIC); /* impossible */
- }
- }
-
- *srcSizePtr = ip - istart;
- *dstCapacityPtr = op - ostart;
- zcs->inputProcessed += *srcSizePtr;
- if (zcs->frameEnded)
- return 0;
- {
- size_t hintInSize = zcs->inBuffTarget - zcs->inBuffPos;
- if (hintInSize == 0)
- hintInSize = zcs->blockSize;
- return hintInSize;
- }
-}
-
-size_t ZSTD_compressStream(ZSTD_CStream *zcs, ZSTD_outBuffer *output, ZSTD_inBuffer *input)
-{
- size_t sizeRead = input->size - input->pos;
- size_t sizeWritten = output->size - output->pos;
- size_t const result =
- ZSTD_compressStream_generic(zcs, (char *)(output->dst) + output->pos, &sizeWritten, (const char *)(input->src) + input->pos, &sizeRead, zsf_gather);
- input->pos += sizeRead;
- output->pos += sizeWritten;
- return result;
-}
-
-/*====== Finalize ======*/
-
-/*! ZSTD_flushStream() :
-* @return : amount of data remaining to flush */
-size_t ZSTD_flushStream(ZSTD_CStream *zcs, ZSTD_outBuffer *output)
-{
- size_t srcSize = 0;
- size_t sizeWritten = output->size - output->pos;
- size_t const result = ZSTD_compressStream_generic(zcs, (char *)(output->dst) + output->pos, &sizeWritten, &srcSize,
- &srcSize, /* use a valid src address instead of NULL */
- zsf_flush);
- output->pos += sizeWritten;
- if (ZSTD_isError(result))
- return result;
- return zcs->outBuffContentSize - zcs->outBuffFlushedSize; /* remaining to flush */
-}
-
-size_t ZSTD_endStream(ZSTD_CStream *zcs, ZSTD_outBuffer *output)
-{
- BYTE *const ostart = (BYTE *)(output->dst) + output->pos;
- BYTE *const oend = (BYTE *)(output->dst) + output->size;
- BYTE *op = ostart;
-
- if ((zcs->pledgedSrcSize) && (zcs->inputProcessed != zcs->pledgedSrcSize))
- return ERROR(srcSize_wrong); /* pledgedSrcSize not respected */
-
- if (zcs->stage != zcss_final) {
- /* flush whatever remains */
- size_t srcSize = 0;
- size_t sizeWritten = output->size - output->pos;
- size_t const notEnded =
- ZSTD_compressStream_generic(zcs, ostart, &sizeWritten, &srcSize, &srcSize, zsf_end); /* use a valid src address instead of NULL */
- size_t const remainingToFlush = zcs->outBuffContentSize - zcs->outBuffFlushedSize;
- op += sizeWritten;
- if (remainingToFlush) {
- output->pos += sizeWritten;
- return remainingToFlush + ZSTD_BLOCKHEADERSIZE /* final empty block */ + (zcs->checksum * 4);
- }
- /* create epilogue */
- zcs->stage = zcss_final;
- zcs->outBuffContentSize = !notEnded ? 0 : ZSTD_compressEnd(zcs->cctx, zcs->outBuff, zcs->outBuffSize, NULL,
- 0); /* write epilogue, including final empty block, into outBuff */
- }
-
- /* flush epilogue */
- {
- size_t const toFlush = zcs->outBuffContentSize - zcs->outBuffFlushedSize;
- size_t const flushed = ZSTD_limitCopy(op, oend - op, zcs->outBuff + zcs->outBuffFlushedSize, toFlush);
- op += flushed;
- zcs->outBuffFlushedSize += flushed;
- output->pos += op - ostart;
- if (toFlush == flushed)
- zcs->stage = zcss_init; /* end reached */
- return toFlush - flushed;
- }
-}
-
-/*-===== Pre-defined compression levels =====-*/
-
-#define ZSTD_DEFAULT_CLEVEL 1
-#define ZSTD_MAX_CLEVEL 22
-int ZSTD_maxCLevel(void) { return ZSTD_MAX_CLEVEL; }
-
-static const ZSTD_compressionParameters ZSTD_defaultCParameters[4][ZSTD_MAX_CLEVEL + 1] = {
- {
- /* "default" */
- /* W, C, H, S, L, TL, strat */
- {18, 12, 12, 1, 7, 16, ZSTD_fast}, /* level 0 - never used */
- {19, 13, 14, 1, 7, 16, ZSTD_fast}, /* level 1 */
- {19, 15, 16, 1, 6, 16, ZSTD_fast}, /* level 2 */
- {20, 16, 17, 1, 5, 16, ZSTD_dfast}, /* level 3.*/
- {20, 18, 18, 1, 5, 16, ZSTD_dfast}, /* level 4.*/
- {20, 15, 18, 3, 5, 16, ZSTD_greedy}, /* level 5 */
- {21, 16, 19, 2, 5, 16, ZSTD_lazy}, /* level 6 */
- {21, 17, 20, 3, 5, 16, ZSTD_lazy}, /* level 7 */
- {21, 18, 20, 3, 5, 16, ZSTD_lazy2}, /* level 8 */
- {21, 20, 20, 3, 5, 16, ZSTD_lazy2}, /* level 9 */
- {21, 19, 21, 4, 5, 16, ZSTD_lazy2}, /* level 10 */
- {22, 20, 22, 4, 5, 16, ZSTD_lazy2}, /* level 11 */
- {22, 20, 22, 5, 5, 16, ZSTD_lazy2}, /* level 12 */
- {22, 21, 22, 5, 5, 16, ZSTD_lazy2}, /* level 13 */
- {22, 21, 22, 6, 5, 16, ZSTD_lazy2}, /* level 14 */
- {22, 21, 21, 5, 5, 16, ZSTD_btlazy2}, /* level 15 */
- {23, 22, 22, 5, 5, 16, ZSTD_btlazy2}, /* level 16 */
- {23, 21, 22, 4, 5, 24, ZSTD_btopt}, /* level 17 */
- {23, 23, 22, 6, 5, 32, ZSTD_btopt}, /* level 18 */
- {23, 23, 22, 6, 3, 48, ZSTD_btopt}, /* level 19 */
- {25, 25, 23, 7, 3, 64, ZSTD_btopt2}, /* level 20 */
- {26, 26, 23, 7, 3, 256, ZSTD_btopt2}, /* level 21 */
- {27, 27, 25, 9, 3, 512, ZSTD_btopt2}, /* level 22 */
- },
- {
- /* for srcSize <= 256 KB */
- /* W, C, H, S, L, T, strat */
- {0, 0, 0, 0, 0, 0, ZSTD_fast}, /* level 0 - not used */
- {18, 13, 14, 1, 6, 8, ZSTD_fast}, /* level 1 */
- {18, 14, 13, 1, 5, 8, ZSTD_dfast}, /* level 2 */
- {18, 16, 15, 1, 5, 8, ZSTD_dfast}, /* level 3 */
- {18, 15, 17, 1, 5, 8, ZSTD_greedy}, /* level 4.*/
- {18, 16, 17, 4, 5, 8, ZSTD_greedy}, /* level 5.*/
- {18, 16, 17, 3, 5, 8, ZSTD_lazy}, /* level 6.*/
- {18, 17, 17, 4, 4, 8, ZSTD_lazy}, /* level 7 */
- {18, 17, 17, 4, 4, 8, ZSTD_lazy2}, /* level 8 */
- {18, 17, 17, 5, 4, 8, ZSTD_lazy2}, /* level 9 */
- {18, 17, 17, 6, 4, 8, ZSTD_lazy2}, /* level 10 */
- {18, 18, 17, 6, 4, 8, ZSTD_lazy2}, /* level 11.*/
- {18, 18, 17, 7, 4, 8, ZSTD_lazy2}, /* level 12.*/
- {18, 19, 17, 6, 4, 8, ZSTD_btlazy2}, /* level 13 */
- {18, 18, 18, 4, 4, 16, ZSTD_btopt}, /* level 14.*/
- {18, 18, 18, 4, 3, 16, ZSTD_btopt}, /* level 15.*/
- {18, 19, 18, 6, 3, 32, ZSTD_btopt}, /* level 16.*/
- {18, 19, 18, 8, 3, 64, ZSTD_btopt}, /* level 17.*/
- {18, 19, 18, 9, 3, 128, ZSTD_btopt}, /* level 18.*/
- {18, 19, 18, 10, 3, 256, ZSTD_btopt}, /* level 19.*/
- {18, 19, 18, 11, 3, 512, ZSTD_btopt2}, /* level 20.*/
- {18, 19, 18, 12, 3, 512, ZSTD_btopt2}, /* level 21.*/
- {18, 19, 18, 13, 3, 512, ZSTD_btopt2}, /* level 22.*/
- },
- {
- /* for srcSize <= 128 KB */
- /* W, C, H, S, L, T, strat */
- {17, 12, 12, 1, 7, 8, ZSTD_fast}, /* level 0 - not used */
- {17, 12, 13, 1, 6, 8, ZSTD_fast}, /* level 1 */
- {17, 13, 16, 1, 5, 8, ZSTD_fast}, /* level 2 */
- {17, 16, 16, 2, 5, 8, ZSTD_dfast}, /* level 3 */
- {17, 13, 15, 3, 4, 8, ZSTD_greedy}, /* level 4 */
- {17, 15, 17, 4, 4, 8, ZSTD_greedy}, /* level 5 */
- {17, 16, 17, 3, 4, 8, ZSTD_lazy}, /* level 6 */
- {17, 15, 17, 4, 4, 8, ZSTD_lazy2}, /* level 7 */
- {17, 17, 17, 4, 4, 8, ZSTD_lazy2}, /* level 8 */
- {17, 17, 17, 5, 4, 8, ZSTD_lazy2}, /* level 9 */
- {17, 17, 17, 6, 4, 8, ZSTD_lazy2}, /* level 10 */
- {17, 17, 17, 7, 4, 8, ZSTD_lazy2}, /* level 11 */
- {17, 17, 17, 8, 4, 8, ZSTD_lazy2}, /* level 12 */
- {17, 18, 17, 6, 4, 8, ZSTD_btlazy2}, /* level 13.*/
- {17, 17, 17, 7, 3, 8, ZSTD_btopt}, /* level 14.*/
- {17, 17, 17, 7, 3, 16, ZSTD_btopt}, /* level 15.*/
- {17, 18, 17, 7, 3, 32, ZSTD_btopt}, /* level 16.*/
- {17, 18, 17, 7, 3, 64, ZSTD_btopt}, /* level 17.*/
- {17, 18, 17, 7, 3, 256, ZSTD_btopt}, /* level 18.*/
- {17, 18, 17, 8, 3, 256, ZSTD_btopt}, /* level 19.*/
- {17, 18, 17, 9, 3, 256, ZSTD_btopt2}, /* level 20.*/
- {17, 18, 17, 10, 3, 256, ZSTD_btopt2}, /* level 21.*/
- {17, 18, 17, 11, 3, 512, ZSTD_btopt2}, /* level 22.*/
- },
- {
- /* for srcSize <= 16 KB */
- /* W, C, H, S, L, T, strat */
- {14, 12, 12, 1, 7, 6, ZSTD_fast}, /* level 0 - not used */
- {14, 14, 14, 1, 6, 6, ZSTD_fast}, /* level 1 */
- {14, 14, 14, 1, 4, 6, ZSTD_fast}, /* level 2 */
- {14, 14, 14, 1, 4, 6, ZSTD_dfast}, /* level 3.*/
- {14, 14, 14, 4, 4, 6, ZSTD_greedy}, /* level 4.*/
- {14, 14, 14, 3, 4, 6, ZSTD_lazy}, /* level 5.*/
- {14, 14, 14, 4, 4, 6, ZSTD_lazy2}, /* level 6 */
- {14, 14, 14, 5, 4, 6, ZSTD_lazy2}, /* level 7 */
- {14, 14, 14, 6, 4, 6, ZSTD_lazy2}, /* level 8.*/
- {14, 15, 14, 6, 4, 6, ZSTD_btlazy2}, /* level 9.*/
- {14, 15, 14, 3, 3, 6, ZSTD_btopt}, /* level 10.*/
- {14, 15, 14, 6, 3, 8, ZSTD_btopt}, /* level 11.*/
- {14, 15, 14, 6, 3, 16, ZSTD_btopt}, /* level 12.*/
- {14, 15, 14, 6, 3, 24, ZSTD_btopt}, /* level 13.*/
- {14, 15, 15, 6, 3, 48, ZSTD_btopt}, /* level 14.*/
- {14, 15, 15, 6, 3, 64, ZSTD_btopt}, /* level 15.*/
- {14, 15, 15, 6, 3, 96, ZSTD_btopt}, /* level 16.*/
- {14, 15, 15, 6, 3, 128, ZSTD_btopt}, /* level 17.*/
- {14, 15, 15, 6, 3, 256, ZSTD_btopt}, /* level 18.*/
- {14, 15, 15, 7, 3, 256, ZSTD_btopt}, /* level 19.*/
- {14, 15, 15, 8, 3, 256, ZSTD_btopt2}, /* level 20.*/
- {14, 15, 15, 9, 3, 256, ZSTD_btopt2}, /* level 21.*/
- {14, 15, 15, 10, 3, 256, ZSTD_btopt2}, /* level 22.*/
- },
-};
-
-/*! ZSTD_getCParams() :
-* @return ZSTD_compressionParameters structure for a selected compression level, `srcSize` and `dictSize`.
-* Size values are optional, provide 0 if not known or unused */
-ZSTD_compressionParameters ZSTD_getCParams(int compressionLevel, unsigned long long srcSize, size_t dictSize)
-{
- ZSTD_compressionParameters cp;
- size_t const addedSize = srcSize ? 0 : 500;
- U64 const rSize = srcSize + dictSize ? srcSize + dictSize + addedSize : (U64)-1;
- U32 const tableID = (rSize <= 256 KB) + (rSize <= 128 KB) + (rSize <= 16 KB); /* intentional underflow for srcSizeHint == 0 */
- if (compressionLevel <= 0)
- compressionLevel = ZSTD_DEFAULT_CLEVEL; /* 0 == default; no negative compressionLevel yet */
- if (compressionLevel > ZSTD_MAX_CLEVEL)
- compressionLevel = ZSTD_MAX_CLEVEL;
- cp = ZSTD_defaultCParameters[tableID][compressionLevel];
- if (ZSTD_32bits()) { /* auto-correction, for 32-bits mode */
- if (cp.windowLog > ZSTD_WINDOWLOG_MAX)
- cp.windowLog = ZSTD_WINDOWLOG_MAX;
- if (cp.chainLog > ZSTD_CHAINLOG_MAX)
- cp.chainLog = ZSTD_CHAINLOG_MAX;
- if (cp.hashLog > ZSTD_HASHLOG_MAX)
- cp.hashLog = ZSTD_HASHLOG_MAX;
- }
- cp = ZSTD_adjustCParams(cp, srcSize, dictSize);
- return cp;
-}
-
-/*! ZSTD_getParams() :
-* same as ZSTD_getCParams(), but @return a `ZSTD_parameters` object (instead of `ZSTD_compressionParameters`).
-* All fields of `ZSTD_frameParameters` are set to default (0) */
-ZSTD_parameters ZSTD_getParams(int compressionLevel, unsigned long long srcSize, size_t dictSize)
-{
- ZSTD_parameters params;
- ZSTD_compressionParameters const cParams = ZSTD_getCParams(compressionLevel, srcSize, dictSize);
- memset(&params, 0, sizeof(params));
- params.cParams = cParams;
- return params;
-}
-
-EXPORT_SYMBOL(ZSTD_maxCLevel);
-EXPORT_SYMBOL(ZSTD_compressBound);
-
-EXPORT_SYMBOL(ZSTD_CCtxWorkspaceBound);
-EXPORT_SYMBOL(ZSTD_initCCtx);
-EXPORT_SYMBOL(ZSTD_compressCCtx);
-EXPORT_SYMBOL(ZSTD_compress_usingDict);
-
-EXPORT_SYMBOL(ZSTD_CDictWorkspaceBound);
-EXPORT_SYMBOL(ZSTD_initCDict);
-EXPORT_SYMBOL(ZSTD_compress_usingCDict);
-
-EXPORT_SYMBOL(ZSTD_CStreamWorkspaceBound);
-EXPORT_SYMBOL(ZSTD_initCStream);
-EXPORT_SYMBOL(ZSTD_initCStream_usingCDict);
-EXPORT_SYMBOL(ZSTD_resetCStream);
-EXPORT_SYMBOL(ZSTD_compressStream);
-EXPORT_SYMBOL(ZSTD_flushStream);
-EXPORT_SYMBOL(ZSTD_endStream);
-EXPORT_SYMBOL(ZSTD_CStreamInSize);
-EXPORT_SYMBOL(ZSTD_CStreamOutSize);
-
-EXPORT_SYMBOL(ZSTD_getCParams);
-EXPORT_SYMBOL(ZSTD_getParams);
-EXPORT_SYMBOL(ZSTD_checkCParams);
-EXPORT_SYMBOL(ZSTD_adjustCParams);
-
-EXPORT_SYMBOL(ZSTD_compressBegin);
-EXPORT_SYMBOL(ZSTD_compressBegin_usingDict);
-EXPORT_SYMBOL(ZSTD_compressBegin_advanced);
-EXPORT_SYMBOL(ZSTD_copyCCtx);
-EXPORT_SYMBOL(ZSTD_compressBegin_usingCDict);
-EXPORT_SYMBOL(ZSTD_compressContinue);
-EXPORT_SYMBOL(ZSTD_compressEnd);
-
-EXPORT_SYMBOL(ZSTD_getBlockSizeMax);
-EXPORT_SYMBOL(ZSTD_compressBlock);
-
-MODULE_LICENSE("Dual BSD/GPL");
-MODULE_DESCRIPTION("Zstd Compressor");
diff --git a/lib/zstd/compress/clevels.h b/lib/zstd/compress/clevels.h
new file mode 100644
index 000000000000..d9a76112ec3a
--- /dev/null
+++ b/lib/zstd/compress/clevels.h
@@ -0,0 +1,132 @@
+/*
+ * Copyright (c) Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#ifndef ZSTD_CLEVELS_H
+#define ZSTD_CLEVELS_H
+
+#define ZSTD_STATIC_LINKING_ONLY /* ZSTD_compressionParameters */
+#include <linux/zstd.h>
+
+/*-===== Pre-defined compression levels =====-*/
+
+#define ZSTD_MAX_CLEVEL 22
+
+__attribute__((__unused__))
+
+static const ZSTD_compressionParameters ZSTD_defaultCParameters[4][ZSTD_MAX_CLEVEL+1] = {
+{ /* "default" - for any srcSize > 256 KB */
+ /* W, C, H, S, L, TL, strat */
+ { 19, 12, 13, 1, 6, 1, ZSTD_fast }, /* base for negative levels */
+ { 19, 13, 14, 1, 7, 0, ZSTD_fast }, /* level 1 */
+ { 20, 15, 16, 1, 6, 0, ZSTD_fast }, /* level 2 */
+ { 21, 16, 17, 1, 5, 0, ZSTD_dfast }, /* level 3 */
+ { 21, 18, 18, 1, 5, 0, ZSTD_dfast }, /* level 4 */
+ { 21, 18, 19, 3, 5, 2, ZSTD_greedy }, /* level 5 */
+ { 21, 18, 19, 3, 5, 4, ZSTD_lazy }, /* level 6 */
+ { 21, 19, 20, 4, 5, 8, ZSTD_lazy }, /* level 7 */
+ { 21, 19, 20, 4, 5, 16, ZSTD_lazy2 }, /* level 8 */
+ { 22, 20, 21, 4, 5, 16, ZSTD_lazy2 }, /* level 9 */
+ { 22, 21, 22, 5, 5, 16, ZSTD_lazy2 }, /* level 10 */
+ { 22, 21, 22, 6, 5, 16, ZSTD_lazy2 }, /* level 11 */
+ { 22, 22, 23, 6, 5, 32, ZSTD_lazy2 }, /* level 12 */
+ { 22, 22, 22, 4, 5, 32, ZSTD_btlazy2 }, /* level 13 */
+ { 22, 22, 23, 5, 5, 32, ZSTD_btlazy2 }, /* level 14 */
+ { 22, 23, 23, 6, 5, 32, ZSTD_btlazy2 }, /* level 15 */
+ { 22, 22, 22, 5, 5, 48, ZSTD_btopt }, /* level 16 */
+ { 23, 23, 22, 5, 4, 64, ZSTD_btopt }, /* level 17 */
+ { 23, 23, 22, 6, 3, 64, ZSTD_btultra }, /* level 18 */
+ { 23, 24, 22, 7, 3,256, ZSTD_btultra2}, /* level 19 */
+ { 25, 25, 23, 7, 3,256, ZSTD_btultra2}, /* level 20 */
+ { 26, 26, 24, 7, 3,512, ZSTD_btultra2}, /* level 21 */
+ { 27, 27, 25, 9, 3,999, ZSTD_btultra2}, /* level 22 */
+},
+{ /* for srcSize <= 256 KB */
+ /* W, C, H, S, L, T, strat */
+ { 18, 12, 13, 1, 5, 1, ZSTD_fast }, /* base for negative levels */
+ { 18, 13, 14, 1, 6, 0, ZSTD_fast }, /* level 1 */
+ { 18, 14, 14, 1, 5, 0, ZSTD_dfast }, /* level 2 */
+ { 18, 16, 16, 1, 4, 0, ZSTD_dfast }, /* level 3 */
+ { 18, 16, 17, 3, 5, 2, ZSTD_greedy }, /* level 4.*/
+ { 18, 17, 18, 5, 5, 2, ZSTD_greedy }, /* level 5.*/
+ { 18, 18, 19, 3, 5, 4, ZSTD_lazy }, /* level 6.*/
+ { 18, 18, 19, 4, 4, 4, ZSTD_lazy }, /* level 7 */
+ { 18, 18, 19, 4, 4, 8, ZSTD_lazy2 }, /* level 8 */
+ { 18, 18, 19, 5, 4, 8, ZSTD_lazy2 }, /* level 9 */
+ { 18, 18, 19, 6, 4, 8, ZSTD_lazy2 }, /* level 10 */
+ { 18, 18, 19, 5, 4, 12, ZSTD_btlazy2 }, /* level 11.*/
+ { 18, 19, 19, 7, 4, 12, ZSTD_btlazy2 }, /* level 12.*/
+ { 18, 18, 19, 4, 4, 16, ZSTD_btopt }, /* level 13 */
+ { 18, 18, 19, 4, 3, 32, ZSTD_btopt }, /* level 14.*/
+ { 18, 18, 19, 6, 3,128, ZSTD_btopt }, /* level 15.*/
+ { 18, 19, 19, 6, 3,128, ZSTD_btultra }, /* level 16.*/
+ { 18, 19, 19, 8, 3,256, ZSTD_btultra }, /* level 17.*/
+ { 18, 19, 19, 6, 3,128, ZSTD_btultra2}, /* level 18.*/
+ { 18, 19, 19, 8, 3,256, ZSTD_btultra2}, /* level 19.*/
+ { 18, 19, 19, 10, 3,512, ZSTD_btultra2}, /* level 20.*/
+ { 18, 19, 19, 12, 3,512, ZSTD_btultra2}, /* level 21.*/
+ { 18, 19, 19, 13, 3,999, ZSTD_btultra2}, /* level 22.*/
+},
+{ /* for srcSize <= 128 KB */
+ /* W, C, H, S, L, T, strat */
+ { 17, 12, 12, 1, 5, 1, ZSTD_fast }, /* base for negative levels */
+ { 17, 12, 13, 1, 6, 0, ZSTD_fast }, /* level 1 */
+ { 17, 13, 15, 1, 5, 0, ZSTD_fast }, /* level 2 */
+ { 17, 15, 16, 2, 5, 0, ZSTD_dfast }, /* level 3 */
+ { 17, 17, 17, 2, 4, 0, ZSTD_dfast }, /* level 4 */
+ { 17, 16, 17, 3, 4, 2, ZSTD_greedy }, /* level 5 */
+ { 17, 16, 17, 3, 4, 4, ZSTD_lazy }, /* level 6 */
+ { 17, 16, 17, 3, 4, 8, ZSTD_lazy2 }, /* level 7 */
+ { 17, 16, 17, 4, 4, 8, ZSTD_lazy2 }, /* level 8 */
+ { 17, 16, 17, 5, 4, 8, ZSTD_lazy2 }, /* level 9 */
+ { 17, 16, 17, 6, 4, 8, ZSTD_lazy2 }, /* level 10 */
+ { 17, 17, 17, 5, 4, 8, ZSTD_btlazy2 }, /* level 11 */
+ { 17, 18, 17, 7, 4, 12, ZSTD_btlazy2 }, /* level 12 */
+ { 17, 18, 17, 3, 4, 12, ZSTD_btopt }, /* level 13.*/
+ { 17, 18, 17, 4, 3, 32, ZSTD_btopt }, /* level 14.*/
+ { 17, 18, 17, 6, 3,256, ZSTD_btopt }, /* level 15.*/
+ { 17, 18, 17, 6, 3,128, ZSTD_btultra }, /* level 16.*/
+ { 17, 18, 17, 8, 3,256, ZSTD_btultra }, /* level 17.*/
+ { 17, 18, 17, 10, 3,512, ZSTD_btultra }, /* level 18.*/
+ { 17, 18, 17, 5, 3,256, ZSTD_btultra2}, /* level 19.*/
+ { 17, 18, 17, 7, 3,512, ZSTD_btultra2}, /* level 20.*/
+ { 17, 18, 17, 9, 3,512, ZSTD_btultra2}, /* level 21.*/
+ { 17, 18, 17, 11, 3,999, ZSTD_btultra2}, /* level 22.*/
+},
+{ /* for srcSize <= 16 KB */
+ /* W, C, H, S, L, T, strat */
+ { 14, 12, 13, 1, 5, 1, ZSTD_fast }, /* base for negative levels */
+ { 14, 14, 15, 1, 5, 0, ZSTD_fast }, /* level 1 */
+ { 14, 14, 15, 1, 4, 0, ZSTD_fast }, /* level 2 */
+ { 14, 14, 15, 2, 4, 0, ZSTD_dfast }, /* level 3 */
+ { 14, 14, 14, 4, 4, 2, ZSTD_greedy }, /* level 4 */
+ { 14, 14, 14, 3, 4, 4, ZSTD_lazy }, /* level 5.*/
+ { 14, 14, 14, 4, 4, 8, ZSTD_lazy2 }, /* level 6 */
+ { 14, 14, 14, 6, 4, 8, ZSTD_lazy2 }, /* level 7 */
+ { 14, 14, 14, 8, 4, 8, ZSTD_lazy2 }, /* level 8.*/
+ { 14, 15, 14, 5, 4, 8, ZSTD_btlazy2 }, /* level 9.*/
+ { 14, 15, 14, 9, 4, 8, ZSTD_btlazy2 }, /* level 10.*/
+ { 14, 15, 14, 3, 4, 12, ZSTD_btopt }, /* level 11.*/
+ { 14, 15, 14, 4, 3, 24, ZSTD_btopt }, /* level 12.*/
+ { 14, 15, 14, 5, 3, 32, ZSTD_btultra }, /* level 13.*/
+ { 14, 15, 15, 6, 3, 64, ZSTD_btultra }, /* level 14.*/
+ { 14, 15, 15, 7, 3,256, ZSTD_btultra }, /* level 15.*/
+ { 14, 15, 15, 5, 3, 48, ZSTD_btultra2}, /* level 16.*/
+ { 14, 15, 15, 6, 3,128, ZSTD_btultra2}, /* level 17.*/
+ { 14, 15, 15, 7, 3,256, ZSTD_btultra2}, /* level 18.*/
+ { 14, 15, 15, 8, 3,256, ZSTD_btultra2}, /* level 19.*/
+ { 14, 15, 15, 8, 3,512, ZSTD_btultra2}, /* level 20.*/
+ { 14, 15, 15, 9, 3,512, ZSTD_btultra2}, /* level 21.*/
+ { 14, 15, 15, 10, 3,999, ZSTD_btultra2}, /* level 22.*/
+},
+};
+
+
+
+#endif /* ZSTD_CLEVELS_H */
diff --git a/lib/zstd/compress/fse_compress.c b/lib/zstd/compress/fse_compress.c
new file mode 100644
index 000000000000..ec5b1ca6d71a
--- /dev/null
+++ b/lib/zstd/compress/fse_compress.c
@@ -0,0 +1,668 @@
+/* ******************************************************************
+ * FSE : Finite State Entropy encoder
+ * Copyright (c) Yann Collet, Facebook, Inc.
+ *
+ * You can contact the author at :
+ * - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy
+ * - Public forum : https://groups.google.com/forum/#!forum/lz4c
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+****************************************************************** */
+
+/* **************************************************************
+* Includes
+****************************************************************/
+#include "../common/compiler.h"
+#include "../common/mem.h" /* U32, U16, etc. */
+#include "../common/debug.h" /* assert, DEBUGLOG */
+#include "hist.h" /* HIST_count_wksp */
+#include "../common/bitstream.h"
+#define FSE_STATIC_LINKING_ONLY
+#include "../common/fse.h"
+#include "../common/error_private.h"
+#define ZSTD_DEPS_NEED_MALLOC
+#define ZSTD_DEPS_NEED_MATH64
+#include "../common/zstd_deps.h" /* ZSTD_malloc, ZSTD_free, ZSTD_memcpy, ZSTD_memset */
+
+
+/* **************************************************************
+* Error Management
+****************************************************************/
+#define FSE_isError ERR_isError
+
+
+/* **************************************************************
+* Templates
+****************************************************************/
+/*
+ designed to be included
+ for type-specific functions (template emulation in C)
+ Objective is to write these functions only once, for improved maintenance
+*/
+
+/* safety checks */
+#ifndef FSE_FUNCTION_EXTENSION
+# error "FSE_FUNCTION_EXTENSION must be defined"
+#endif
+#ifndef FSE_FUNCTION_TYPE
+# error "FSE_FUNCTION_TYPE must be defined"
+#endif
+
+/* Function names */
+#define FSE_CAT(X,Y) X##Y
+#define FSE_FUNCTION_NAME(X,Y) FSE_CAT(X,Y)
+#define FSE_TYPE_NAME(X,Y) FSE_CAT(X,Y)
+
+
+/* Function templates */
+
+/* FSE_buildCTable_wksp() :
+ * Same as FSE_buildCTable(), but using an externally allocated scratch buffer (`workSpace`).
+ * wkspSize should be sized to handle worst case situation, which is `1<<max_tableLog * sizeof(FSE_FUNCTION_TYPE)`
+ * workSpace must also be properly aligned with FSE_FUNCTION_TYPE requirements
+ */
+size_t FSE_buildCTable_wksp(FSE_CTable* ct,
+ const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog,
+ void* workSpace, size_t wkspSize)
+{
+ U32 const tableSize = 1 << tableLog;
+ U32 const tableMask = tableSize - 1;
+ void* const ptr = ct;
+ U16* const tableU16 = ( (U16*) ptr) + 2;
+ void* const FSCT = ((U32*)ptr) + 1 /* header */ + (tableLog ? tableSize>>1 : 1) ;
+ FSE_symbolCompressionTransform* const symbolTT = (FSE_symbolCompressionTransform*) (FSCT);
+ U32 const step = FSE_TABLESTEP(tableSize);
+ U32 const maxSV1 = maxSymbolValue+1;
+
+ U16* cumul = (U16*)workSpace; /* size = maxSV1 */
+ FSE_FUNCTION_TYPE* const tableSymbol = (FSE_FUNCTION_TYPE*)(cumul + (maxSV1+1)); /* size = tableSize */
+
+ U32 highThreshold = tableSize-1;
+
+ assert(((size_t)workSpace & 1) == 0); /* Must be 2 bytes-aligned */
+ if (FSE_BUILD_CTABLE_WORKSPACE_SIZE(maxSymbolValue, tableLog) > wkspSize) return ERROR(tableLog_tooLarge);
+ /* CTable header */
+ tableU16[-2] = (U16) tableLog;
+ tableU16[-1] = (U16) maxSymbolValue;
+ assert(tableLog < 16); /* required for threshold strategy to work */
+
+ /* For explanations on how to distribute symbol values over the table :
+ * http://fastcompression.blogspot.fr/2014/02/fse-distributing-symbol-values.html */
+
+ #ifdef __clang_analyzer__
+ ZSTD_memset(tableSymbol, 0, sizeof(*tableSymbol) * tableSize); /* useless initialization, just to keep scan-build happy */
+ #endif
+
+ /* symbol start positions */
+ { U32 u;
+ cumul[0] = 0;
+ for (u=1; u <= maxSV1; u++) {
+ if (normalizedCounter[u-1]==-1) { /* Low proba symbol */
+ cumul[u] = cumul[u-1] + 1;
+ tableSymbol[highThreshold--] = (FSE_FUNCTION_TYPE)(u-1);
+ } else {
+ assert(normalizedCounter[u-1] >= 0);
+ cumul[u] = cumul[u-1] + (U16)normalizedCounter[u-1];
+ assert(cumul[u] >= cumul[u-1]); /* no overflow */
+ } }
+ cumul[maxSV1] = (U16)(tableSize+1);
+ }
+
+ /* Spread symbols */
+ if (highThreshold == tableSize - 1) {
+ /* Case for no low prob count symbols. Lay down 8 bytes at a time
+ * to reduce branch misses since we are operating on a small block
+ */
+ BYTE* const spread = tableSymbol + tableSize; /* size = tableSize + 8 (may write beyond tableSize) */
+ { U64 const add = 0x0101010101010101ull;
+ size_t pos = 0;
+ U64 sv = 0;
+ U32 s;
+ for (s=0; s<maxSV1; ++s, sv += add) {
+ int i;
+ int const n = normalizedCounter[s];
+ MEM_write64(spread + pos, sv);
+ for (i = 8; i < n; i += 8) {
+ MEM_write64(spread + pos + i, sv);
+ }
+ assert(n>=0);
+ pos += (size_t)n;
+ }
+ }
+ /* Spread symbols across the table. Lack of lowprob symbols means that
+ * we don't need variable sized inner loop, so we can unroll the loop and
+ * reduce branch misses.
+ */
+ { size_t position = 0;
+ size_t s;
+ size_t const unroll = 2; /* Experimentally determined optimal unroll */
+ assert(tableSize % unroll == 0); /* FSE_MIN_TABLELOG is 5 */
+ for (s = 0; s < (size_t)tableSize; s += unroll) {
+ size_t u;
+ for (u = 0; u < unroll; ++u) {
+ size_t const uPosition = (position + (u * step)) & tableMask;
+ tableSymbol[uPosition] = spread[s + u];
+ }
+ position = (position + (unroll * step)) & tableMask;
+ }
+ assert(position == 0); /* Must have initialized all positions */
+ }
+ } else {
+ U32 position = 0;
+ U32 symbol;
+ for (symbol=0; symbol<maxSV1; symbol++) {
+ int nbOccurrences;
+ int const freq = normalizedCounter[symbol];
+ for (nbOccurrences=0; nbOccurrences<freq; nbOccurrences++) {
+ tableSymbol[position] = (FSE_FUNCTION_TYPE)symbol;
+ position = (position + step) & tableMask;
+ while (position > highThreshold)
+ position = (position + step) & tableMask; /* Low proba area */
+ } }
+ assert(position==0); /* Must have initialized all positions */
+ }
+
+ /* Build table */
+ { U32 u; for (u=0; u<tableSize; u++) {
+ FSE_FUNCTION_TYPE s = tableSymbol[u]; /* note : static analyzer may not understand tableSymbol is properly initialized */
+ tableU16[cumul[s]++] = (U16) (tableSize+u); /* TableU16 : sorted by symbol order; gives next state value */
+ } }
+
+ /* Build Symbol Transformation Table */
+ { unsigned total = 0;
+ unsigned s;
+ for (s=0; s<=maxSymbolValue; s++) {
+ switch (normalizedCounter[s])
+ {
+ case 0:
+ /* filling nonetheless, for compatibility with FSE_getMaxNbBits() */
+ symbolTT[s].deltaNbBits = ((tableLog+1) << 16) - (1<<tableLog);
+ break;
+
+ case -1:
+ case 1:
+ symbolTT[s].deltaNbBits = (tableLog << 16) - (1<<tableLog);
+ assert(total <= INT_MAX);
+ symbolTT[s].deltaFindState = (int)(total - 1);
+ total ++;
+ break;
+ default :
+ assert(normalizedCounter[s] > 1);
+ { U32 const maxBitsOut = tableLog - BIT_highbit32 ((U32)normalizedCounter[s]-1);
+ U32 const minStatePlus = (U32)normalizedCounter[s] << maxBitsOut;
+ symbolTT[s].deltaNbBits = (maxBitsOut << 16) - minStatePlus;
+ symbolTT[s].deltaFindState = (int)(total - (unsigned)normalizedCounter[s]);
+ total += (unsigned)normalizedCounter[s];
+ } } } }
+
+#if 0 /* debug : symbol costs */
+ DEBUGLOG(5, "\n --- table statistics : ");
+ { U32 symbol;
+ for (symbol=0; symbol<=maxSymbolValue; symbol++) {
+ DEBUGLOG(5, "%3u: w=%3i, maxBits=%u, fracBits=%.2f",
+ symbol, normalizedCounter[symbol],
+ FSE_getMaxNbBits(symbolTT, symbol),
+ (double)FSE_bitCost(symbolTT, tableLog, symbol, 8) / 256);
+ } }
+#endif
+
+ return 0;
+}
+
+
+
+#ifndef FSE_COMMONDEFS_ONLY
+
+/*-**************************************************************
+* FSE NCount encoding
+****************************************************************/
+size_t FSE_NCountWriteBound(unsigned maxSymbolValue, unsigned tableLog)
+{
+ size_t const maxHeaderSize = (((maxSymbolValue+1) * tableLog
+ + 4 /* bitCount initialized at 4 */
+ + 2 /* first two symbols may use one additional bit each */) / 8)
+ + 1 /* round up to whole nb bytes */
+ + 2 /* additional two bytes for bitstream flush */;
+ return maxSymbolValue ? maxHeaderSize : FSE_NCOUNTBOUND; /* maxSymbolValue==0 ? use default */
+}
+
+static size_t
+FSE_writeNCount_generic (void* header, size_t headerBufferSize,
+ const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog,
+ unsigned writeIsSafe)
+{
+ BYTE* const ostart = (BYTE*) header;
+ BYTE* out = ostart;
+ BYTE* const oend = ostart + headerBufferSize;
+ int nbBits;
+ const int tableSize = 1 << tableLog;
+ int remaining;
+ int threshold;
+ U32 bitStream = 0;
+ int bitCount = 0;
+ unsigned symbol = 0;
+ unsigned const alphabetSize = maxSymbolValue + 1;
+ int previousIs0 = 0;
+
+ /* Table Size */
+ bitStream += (tableLog-FSE_MIN_TABLELOG) << bitCount;
+ bitCount += 4;
+
+ /* Init */
+ remaining = tableSize+1; /* +1 for extra accuracy */
+ threshold = tableSize;
+ nbBits = tableLog+1;
+
+ while ((symbol < alphabetSize) && (remaining>1)) { /* stops at 1 */
+ if (previousIs0) {
+ unsigned start = symbol;
+ while ((symbol < alphabetSize) && !normalizedCounter[symbol]) symbol++;
+ if (symbol == alphabetSize) break; /* incorrect distribution */
+ while (symbol >= start+24) {
+ start+=24;
+ bitStream += 0xFFFFU << bitCount;
+ if ((!writeIsSafe) && (out > oend-2))
+ return ERROR(dstSize_tooSmall); /* Buffer overflow */
+ out[0] = (BYTE) bitStream;
+ out[1] = (BYTE)(bitStream>>8);
+ out+=2;
+ bitStream>>=16;
+ }
+ while (symbol >= start+3) {
+ start+=3;
+ bitStream += 3 << bitCount;
+ bitCount += 2;
+ }
+ bitStream += (symbol-start) << bitCount;
+ bitCount += 2;
+ if (bitCount>16) {
+ if ((!writeIsSafe) && (out > oend - 2))
+ return ERROR(dstSize_tooSmall); /* Buffer overflow */
+ out[0] = (BYTE)bitStream;
+ out[1] = (BYTE)(bitStream>>8);
+ out += 2;
+ bitStream >>= 16;
+ bitCount -= 16;
+ } }
+ { int count = normalizedCounter[symbol++];
+ int const max = (2*threshold-1) - remaining;
+ remaining -= count < 0 ? -count : count;
+ count++; /* +1 for extra accuracy */
+ if (count>=threshold)
+ count += max; /* [0..max[ [max..threshold[ (...) [threshold+max 2*threshold[ */
+ bitStream += count << bitCount;
+ bitCount += nbBits;
+ bitCount -= (count<max);
+ previousIs0 = (count==1);
+ if (remaining<1) return ERROR(GENERIC);
+ while (remaining<threshold) { nbBits--; threshold>>=1; }
+ }
+ if (bitCount>16) {
+ if ((!writeIsSafe) && (out > oend - 2))
+ return ERROR(dstSize_tooSmall); /* Buffer overflow */
+ out[0] = (BYTE)bitStream;
+ out[1] = (BYTE)(bitStream>>8);
+ out += 2;
+ bitStream >>= 16;
+ bitCount -= 16;
+ } }
+
+ if (remaining != 1)
+ return ERROR(GENERIC); /* incorrect normalized distribution */
+ assert(symbol <= alphabetSize);
+
+ /* flush remaining bitStream */
+ if ((!writeIsSafe) && (out > oend - 2))
+ return ERROR(dstSize_tooSmall); /* Buffer overflow */
+ out[0] = (BYTE)bitStream;
+ out[1] = (BYTE)(bitStream>>8);
+ out+= (bitCount+7) /8;
+
+ return (out-ostart);
+}
+
+
+size_t FSE_writeNCount (void* buffer, size_t bufferSize,
+ const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog)
+{
+ if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge); /* Unsupported */
+ if (tableLog < FSE_MIN_TABLELOG) return ERROR(GENERIC); /* Unsupported */
+
+ if (bufferSize < FSE_NCountWriteBound(maxSymbolValue, tableLog))
+ return FSE_writeNCount_generic(buffer, bufferSize, normalizedCounter, maxSymbolValue, tableLog, 0);
+
+ return FSE_writeNCount_generic(buffer, bufferSize, normalizedCounter, maxSymbolValue, tableLog, 1 /* write in buffer is safe */);
+}
+
+
+/*-**************************************************************
+* FSE Compression Code
+****************************************************************/
+
+FSE_CTable* FSE_createCTable (unsigned maxSymbolValue, unsigned tableLog)
+{
+ size_t size;
+ if (tableLog > FSE_TABLELOG_ABSOLUTE_MAX) tableLog = FSE_TABLELOG_ABSOLUTE_MAX;
+ size = FSE_CTABLE_SIZE_U32 (tableLog, maxSymbolValue) * sizeof(U32);
+ return (FSE_CTable*)ZSTD_malloc(size);
+}
+
+void FSE_freeCTable (FSE_CTable* ct) { ZSTD_free(ct); }
+
+/* provides the minimum logSize to safely represent a distribution */
+static unsigned FSE_minTableLog(size_t srcSize, unsigned maxSymbolValue)
+{
+ U32 minBitsSrc = BIT_highbit32((U32)(srcSize)) + 1;
+ U32 minBitsSymbols = BIT_highbit32(maxSymbolValue) + 2;
+ U32 minBits = minBitsSrc < minBitsSymbols ? minBitsSrc : minBitsSymbols;
+ assert(srcSize > 1); /* Not supported, RLE should be used instead */
+ return minBits;
+}
+
+unsigned FSE_optimalTableLog_internal(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue, unsigned minus)
+{
+ U32 maxBitsSrc = BIT_highbit32((U32)(srcSize - 1)) - minus;
+ U32 tableLog = maxTableLog;
+ U32 minBits = FSE_minTableLog(srcSize, maxSymbolValue);
+ assert(srcSize > 1); /* Not supported, RLE should be used instead */
+ if (tableLog==0) tableLog = FSE_DEFAULT_TABLELOG;
+ if (maxBitsSrc < tableLog) tableLog = maxBitsSrc; /* Accuracy can be reduced */
+ if (minBits > tableLog) tableLog = minBits; /* Need a minimum to safely represent all symbol values */
+ if (tableLog < FSE_MIN_TABLELOG) tableLog = FSE_MIN_TABLELOG;
+ if (tableLog > FSE_MAX_TABLELOG) tableLog = FSE_MAX_TABLELOG;
+ return tableLog;
+}
+
+unsigned FSE_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue)
+{
+ return FSE_optimalTableLog_internal(maxTableLog, srcSize, maxSymbolValue, 2);
+}
+
+/* Secondary normalization method.
+ To be used when primary method fails. */
+
+static size_t FSE_normalizeM2(short* norm, U32 tableLog, const unsigned* count, size_t total, U32 maxSymbolValue, short lowProbCount)
+{
+ short const NOT_YET_ASSIGNED = -2;
+ U32 s;
+ U32 distributed = 0;
+ U32 ToDistribute;
+
+ /* Init */
+ U32 const lowThreshold = (U32)(total >> tableLog);
+ U32 lowOne = (U32)((total * 3) >> (tableLog + 1));
+
+ for (s=0; s<=maxSymbolValue; s++) {
+ if (count[s] == 0) {
+ norm[s]=0;
+ continue;
+ }
+ if (count[s] <= lowThreshold) {
+ norm[s] = lowProbCount;
+ distributed++;
+ total -= count[s];
+ continue;
+ }
+ if (count[s] <= lowOne) {
+ norm[s] = 1;
+ distributed++;
+ total -= count[s];
+ continue;
+ }
+
+ norm[s]=NOT_YET_ASSIGNED;
+ }
+ ToDistribute = (1 << tableLog) - distributed;
+
+ if (ToDistribute == 0)
+ return 0;
+
+ if ((total / ToDistribute) > lowOne) {
+ /* risk of rounding to zero */
+ lowOne = (U32)((total * 3) / (ToDistribute * 2));
+ for (s=0; s<=maxSymbolValue; s++) {
+ if ((norm[s] == NOT_YET_ASSIGNED) && (count[s] <= lowOne)) {
+ norm[s] = 1;
+ distributed++;
+ total -= count[s];
+ continue;
+ } }
+ ToDistribute = (1 << tableLog) - distributed;
+ }
+
+ if (distributed == maxSymbolValue+1) {
+ /* all values are pretty poor;
+ probably incompressible data (should have already been detected);
+ find max, then give all remaining points to max */
+ U32 maxV = 0, maxC = 0;
+ for (s=0; s<=maxSymbolValue; s++)
+ if (count[s] > maxC) { maxV=s; maxC=count[s]; }
+ norm[maxV] += (short)ToDistribute;
+ return 0;
+ }
+
+ if (total == 0) {
+ /* all of the symbols were low enough for the lowOne or lowThreshold */
+ for (s=0; ToDistribute > 0; s = (s+1)%(maxSymbolValue+1))
+ if (norm[s] > 0) { ToDistribute--; norm[s]++; }
+ return 0;
+ }
+
+ { U64 const vStepLog = 62 - tableLog;
+ U64 const mid = (1ULL << (vStepLog-1)) - 1;
+ U64 const rStep = ZSTD_div64((((U64)1<<vStepLog) * ToDistribute) + mid, (U32)total); /* scale on remaining */
+ U64 tmpTotal = mid;
+ for (s=0; s<=maxSymbolValue; s++) {
+ if (norm[s]==NOT_YET_ASSIGNED) {
+ U64 const end = tmpTotal + (count[s] * rStep);
+ U32 const sStart = (U32)(tmpTotal >> vStepLog);
+ U32 const sEnd = (U32)(end >> vStepLog);
+ U32 const weight = sEnd - sStart;
+ if (weight < 1)
+ return ERROR(GENERIC);
+ norm[s] = (short)weight;
+ tmpTotal = end;
+ } } }
+
+ return 0;
+}
+
+size_t FSE_normalizeCount (short* normalizedCounter, unsigned tableLog,
+ const unsigned* count, size_t total,
+ unsigned maxSymbolValue, unsigned useLowProbCount)
+{
+ /* Sanity checks */
+ if (tableLog==0) tableLog = FSE_DEFAULT_TABLELOG;
+ if (tableLog < FSE_MIN_TABLELOG) return ERROR(GENERIC); /* Unsupported size */
+ if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge); /* Unsupported size */
+ if (tableLog < FSE_minTableLog(total, maxSymbolValue)) return ERROR(GENERIC); /* Too small tableLog, compression potentially impossible */
+
+ { static U32 const rtbTable[] = { 0, 473195, 504333, 520860, 550000, 700000, 750000, 830000 };
+ short const lowProbCount = useLowProbCount ? -1 : 1;
+ U64 const scale = 62 - tableLog;
+ U64 const step = ZSTD_div64((U64)1<<62, (U32)total); /* <== here, one division ! */
+ U64 const vStep = 1ULL<<(scale-20);
+ int stillToDistribute = 1<<tableLog;
+ unsigned s;
+ unsigned largest=0;
+ short largestP=0;
+ U32 lowThreshold = (U32)(total >> tableLog);
+
+ for (s=0; s<=maxSymbolValue; s++) {
+ if (count[s] == total) return 0; /* rle special case */
+ if (count[s] == 0) { normalizedCounter[s]=0; continue; }
+ if (count[s] <= lowThreshold) {
+ normalizedCounter[s] = lowProbCount;
+ stillToDistribute--;
+ } else {
+ short proba = (short)((count[s]*step) >> scale);
+ if (proba<8) {
+ U64 restToBeat = vStep * rtbTable[proba];
+ proba += (count[s]*step) - ((U64)proba<<scale) > restToBeat;
+ }
+ if (proba > largestP) { largestP=proba; largest=s; }
+ normalizedCounter[s] = proba;
+ stillToDistribute -= proba;
+ } }
+ if (-stillToDistribute >= (normalizedCounter[largest] >> 1)) {
+ /* corner case, need another normalization method */
+ size_t const errorCode = FSE_normalizeM2(normalizedCounter, tableLog, count, total, maxSymbolValue, lowProbCount);
+ if (FSE_isError(errorCode)) return errorCode;
+ }
+ else normalizedCounter[largest] += (short)stillToDistribute;
+ }
+
+#if 0
+ { /* Print Table (debug) */
+ U32 s;
+ U32 nTotal = 0;
+ for (s=0; s<=maxSymbolValue; s++)
+ RAWLOG(2, "%3i: %4i \n", s, normalizedCounter[s]);
+ for (s=0; s<=maxSymbolValue; s++)
+ nTotal += abs(normalizedCounter[s]);
+ if (nTotal != (1U<<tableLog))
+ RAWLOG(2, "Warning !!! Total == %u != %u !!!", nTotal, 1U<<tableLog);
+ getchar();
+ }
+#endif
+
+ return tableLog;
+}
+
+
+/* fake FSE_CTable, for raw (uncompressed) input */
+size_t FSE_buildCTable_raw (FSE_CTable* ct, unsigned nbBits)
+{
+ const unsigned tableSize = 1 << nbBits;
+ const unsigned tableMask = tableSize - 1;
+ const unsigned maxSymbolValue = tableMask;
+ void* const ptr = ct;
+ U16* const tableU16 = ( (U16*) ptr) + 2;
+ void* const FSCT = ((U32*)ptr) + 1 /* header */ + (tableSize>>1); /* assumption : tableLog >= 1 */
+ FSE_symbolCompressionTransform* const symbolTT = (FSE_symbolCompressionTransform*) (FSCT);
+ unsigned s;
+
+ /* Sanity checks */
+ if (nbBits < 1) return ERROR(GENERIC); /* min size */
+
+ /* header */
+ tableU16[-2] = (U16) nbBits;
+ tableU16[-1] = (U16) maxSymbolValue;
+
+ /* Build table */
+ for (s=0; s<tableSize; s++)
+ tableU16[s] = (U16)(tableSize + s);
+
+ /* Build Symbol Transformation Table */
+ { const U32 deltaNbBits = (nbBits << 16) - (1 << nbBits);
+ for (s=0; s<=maxSymbolValue; s++) {
+ symbolTT[s].deltaNbBits = deltaNbBits;
+ symbolTT[s].deltaFindState = s-1;
+ } }
+
+ return 0;
+}
+
+/* fake FSE_CTable, for rle input (always same symbol) */
+size_t FSE_buildCTable_rle (FSE_CTable* ct, BYTE symbolValue)
+{
+ void* ptr = ct;
+ U16* tableU16 = ( (U16*) ptr) + 2;
+ void* FSCTptr = (U32*)ptr + 2;
+ FSE_symbolCompressionTransform* symbolTT = (FSE_symbolCompressionTransform*) FSCTptr;
+
+ /* header */
+ tableU16[-2] = (U16) 0;
+ tableU16[-1] = (U16) symbolValue;
+
+ /* Build table */
+ tableU16[0] = 0;
+ tableU16[1] = 0; /* just in case */
+
+ /* Build Symbol Transformation Table */
+ symbolTT[symbolValue].deltaNbBits = 0;
+ symbolTT[symbolValue].deltaFindState = 0;
+
+ return 0;
+}
+
+
+static size_t FSE_compress_usingCTable_generic (void* dst, size_t dstSize,
+ const void* src, size_t srcSize,
+ const FSE_CTable* ct, const unsigned fast)
+{
+ const BYTE* const istart = (const BYTE*) src;
+ const BYTE* const iend = istart + srcSize;
+ const BYTE* ip=iend;
+
+ BIT_CStream_t bitC;
+ FSE_CState_t CState1, CState2;
+
+ /* init */
+ if (srcSize <= 2) return 0;
+ { size_t const initError = BIT_initCStream(&bitC, dst, dstSize);
+ if (FSE_isError(initError)) return 0; /* not enough space available to write a bitstream */ }
+
+#define FSE_FLUSHBITS(s) (fast ? BIT_flushBitsFast(s) : BIT_flushBits(s))
+
+ if (srcSize & 1) {
+ FSE_initCState2(&CState1, ct, *--ip);
+ FSE_initCState2(&CState2, ct, *--ip);
+ FSE_encodeSymbol(&bitC, &CState1, *--ip);
+ FSE_FLUSHBITS(&bitC);
+ } else {
+ FSE_initCState2(&CState2, ct, *--ip);
+ FSE_initCState2(&CState1, ct, *--ip);
+ }
+
+ /* join to mod 4 */
+ srcSize -= 2;
+ if ((sizeof(bitC.bitContainer)*8 > FSE_MAX_TABLELOG*4+7 ) && (srcSize & 2)) { /* test bit 2 */
+ FSE_encodeSymbol(&bitC, &CState2, *--ip);
+ FSE_encodeSymbol(&bitC, &CState1, *--ip);
+ FSE_FLUSHBITS(&bitC);
+ }
+
+ /* 2 or 4 encoding per loop */
+ while ( ip>istart ) {
+
+ FSE_encodeSymbol(&bitC, &CState2, *--ip);
+
+ if (sizeof(bitC.bitContainer)*8 < FSE_MAX_TABLELOG*2+7 ) /* this test must be static */
+ FSE_FLUSHBITS(&bitC);
+
+ FSE_encodeSymbol(&bitC, &CState1, *--ip);
+
+ if (sizeof(bitC.bitContainer)*8 > FSE_MAX_TABLELOG*4+7 ) { /* this test must be static */
+ FSE_encodeSymbol(&bitC, &CState2, *--ip);
+ FSE_encodeSymbol(&bitC, &CState1, *--ip);
+ }
+
+ FSE_FLUSHBITS(&bitC);
+ }
+
+ FSE_flushCState(&bitC, &CState2);
+ FSE_flushCState(&bitC, &CState1);
+ return BIT_closeCStream(&bitC);
+}
+
+size_t FSE_compress_usingCTable (void* dst, size_t dstSize,
+ const void* src, size_t srcSize,
+ const FSE_CTable* ct)
+{
+ unsigned const fast = (dstSize >= FSE_BLOCKBOUND(srcSize));
+
+ if (fast)
+ return FSE_compress_usingCTable_generic(dst, dstSize, src, srcSize, ct, 1);
+ else
+ return FSE_compress_usingCTable_generic(dst, dstSize, src, srcSize, ct, 0);
+}
+
+
+size_t FSE_compressBound(size_t size) { return FSE_COMPRESSBOUND(size); }
+
+
+#endif /* FSE_COMMONDEFS_ONLY */
diff --git a/lib/zstd/compress/hist.c b/lib/zstd/compress/hist.c
new file mode 100644
index 000000000000..3ddc6dfb6894
--- /dev/null
+++ b/lib/zstd/compress/hist.c
@@ -0,0 +1,165 @@
+/* ******************************************************************
+ * hist : Histogram functions
+ * part of Finite State Entropy project
+ * Copyright (c) Yann Collet, Facebook, Inc.
+ *
+ * You can contact the author at :
+ * - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy
+ * - Public forum : https://groups.google.com/forum/#!forum/lz4c
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+****************************************************************** */
+
+/* --- dependencies --- */
+#include "../common/mem.h" /* U32, BYTE, etc. */
+#include "../common/debug.h" /* assert, DEBUGLOG */
+#include "../common/error_private.h" /* ERROR */
+#include "hist.h"
+
+
+/* --- Error management --- */
+unsigned HIST_isError(size_t code) { return ERR_isError(code); }
+
+/*-**************************************************************
+ * Histogram functions
+ ****************************************************************/
+unsigned HIST_count_simple(unsigned* count, unsigned* maxSymbolValuePtr,
+ const void* src, size_t srcSize)
+{
+ const BYTE* ip = (const BYTE*)src;
+ const BYTE* const end = ip + srcSize;
+ unsigned maxSymbolValue = *maxSymbolValuePtr;
+ unsigned largestCount=0;
+
+ ZSTD_memset(count, 0, (maxSymbolValue+1) * sizeof(*count));
+ if (srcSize==0) { *maxSymbolValuePtr = 0; return 0; }
+
+ while (ip<end) {
+ assert(*ip <= maxSymbolValue);
+ count[*ip++]++;
+ }
+
+ while (!count[maxSymbolValue]) maxSymbolValue--;
+ *maxSymbolValuePtr = maxSymbolValue;
+
+ { U32 s;
+ for (s=0; s<=maxSymbolValue; s++)
+ if (count[s] > largestCount) largestCount = count[s];
+ }
+
+ return largestCount;
+}
+
+typedef enum { trustInput, checkMaxSymbolValue } HIST_checkInput_e;
+
+/* HIST_count_parallel_wksp() :
+ * store histogram into 4 intermediate tables, recombined at the end.
+ * this design makes better use of OoO cpus,
+ * and is noticeably faster when some values are heavily repeated.
+ * But it needs some additional workspace for intermediate tables.
+ * `workSpace` must be a U32 table of size >= HIST_WKSP_SIZE_U32.
+ * @return : largest histogram frequency,
+ * or an error code (notably when histogram's alphabet is larger than *maxSymbolValuePtr) */
+static size_t HIST_count_parallel_wksp(
+ unsigned* count, unsigned* maxSymbolValuePtr,
+ const void* source, size_t sourceSize,
+ HIST_checkInput_e check,
+ U32* const workSpace)
+{
+ const BYTE* ip = (const BYTE*)source;
+ const BYTE* const iend = ip+sourceSize;
+ size_t const countSize = (*maxSymbolValuePtr + 1) * sizeof(*count);
+ unsigned max=0;
+ U32* const Counting1 = workSpace;
+ U32* const Counting2 = Counting1 + 256;
+ U32* const Counting3 = Counting2 + 256;
+ U32* const Counting4 = Counting3 + 256;
+
+ /* safety checks */
+ assert(*maxSymbolValuePtr <= 255);
+ if (!sourceSize) {
+ ZSTD_memset(count, 0, countSize);
+ *maxSymbolValuePtr = 0;
+ return 0;
+ }
+ ZSTD_memset(workSpace, 0, 4*256*sizeof(unsigned));
+
+ /* by stripes of 16 bytes */
+ { U32 cached = MEM_read32(ip); ip += 4;
+ while (ip < iend-15) {
+ U32 c = cached; cached = MEM_read32(ip); ip += 4;
+ Counting1[(BYTE) c ]++;
+ Counting2[(BYTE)(c>>8) ]++;
+ Counting3[(BYTE)(c>>16)]++;
+ Counting4[ c>>24 ]++;
+ c = cached; cached = MEM_read32(ip); ip += 4;
+ Counting1[(BYTE) c ]++;
+ Counting2[(BYTE)(c>>8) ]++;
+ Counting3[(BYTE)(c>>16)]++;
+ Counting4[ c>>24 ]++;
+ c = cached; cached = MEM_read32(ip); ip += 4;
+ Counting1[(BYTE) c ]++;
+ Counting2[(BYTE)(c>>8) ]++;
+ Counting3[(BYTE)(c>>16)]++;
+ Counting4[ c>>24 ]++;
+ c = cached; cached = MEM_read32(ip); ip += 4;
+ Counting1[(BYTE) c ]++;
+ Counting2[(BYTE)(c>>8) ]++;
+ Counting3[(BYTE)(c>>16)]++;
+ Counting4[ c>>24 ]++;
+ }
+ ip-=4;
+ }
+
+ /* finish last symbols */
+ while (ip<iend) Counting1[*ip++]++;
+
+ { U32 s;
+ for (s=0; s<256; s++) {
+ Counting1[s] += Counting2[s] + Counting3[s] + Counting4[s];
+ if (Counting1[s] > max) max = Counting1[s];
+ } }
+
+ { unsigned maxSymbolValue = 255;
+ while (!Counting1[maxSymbolValue]) maxSymbolValue--;
+ if (check && maxSymbolValue > *maxSymbolValuePtr) return ERROR(maxSymbolValue_tooSmall);
+ *maxSymbolValuePtr = maxSymbolValue;
+ ZSTD_memmove(count, Counting1, countSize); /* in case count & Counting1 are overlapping */
+ }
+ return (size_t)max;
+}
+
+/* HIST_countFast_wksp() :
+ * Same as HIST_countFast(), but using an externally provided scratch buffer.
+ * `workSpace` is a writable buffer which must be 4-bytes aligned,
+ * `workSpaceSize` must be >= HIST_WKSP_SIZE
+ */
+size_t HIST_countFast_wksp(unsigned* count, unsigned* maxSymbolValuePtr,
+ const void* source, size_t sourceSize,
+ void* workSpace, size_t workSpaceSize)
+{
+ if (sourceSize < 1500) /* heuristic threshold */
+ return HIST_count_simple(count, maxSymbolValuePtr, source, sourceSize);
+ if ((size_t)workSpace & 3) return ERROR(GENERIC); /* must be aligned on 4-bytes boundaries */
+ if (workSpaceSize < HIST_WKSP_SIZE) return ERROR(workSpace_tooSmall);
+ return HIST_count_parallel_wksp(count, maxSymbolValuePtr, source, sourceSize, trustInput, (U32*)workSpace);
+}
+
+/* HIST_count_wksp() :
+ * Same as HIST_count(), but using an externally provided scratch buffer.
+ * `workSpace` size must be table of >= HIST_WKSP_SIZE_U32 unsigned */
+size_t HIST_count_wksp(unsigned* count, unsigned* maxSymbolValuePtr,
+ const void* source, size_t sourceSize,
+ void* workSpace, size_t workSpaceSize)
+{
+ if ((size_t)workSpace & 3) return ERROR(GENERIC); /* must be aligned on 4-bytes boundaries */
+ if (workSpaceSize < HIST_WKSP_SIZE) return ERROR(workSpace_tooSmall);
+ if (*maxSymbolValuePtr < 255)
+ return HIST_count_parallel_wksp(count, maxSymbolValuePtr, source, sourceSize, checkMaxSymbolValue, (U32*)workSpace);
+ *maxSymbolValuePtr = 255;
+ return HIST_countFast_wksp(count, maxSymbolValuePtr, source, sourceSize, workSpace, workSpaceSize);
+}
+
diff --git a/lib/zstd/compress/hist.h b/lib/zstd/compress/hist.h
new file mode 100644
index 000000000000..fc1830abc9c6
--- /dev/null
+++ b/lib/zstd/compress/hist.h
@@ -0,0 +1,75 @@
+/* ******************************************************************
+ * hist : Histogram functions
+ * part of Finite State Entropy project
+ * Copyright (c) Yann Collet, Facebook, Inc.
+ *
+ * You can contact the author at :
+ * - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy
+ * - Public forum : https://groups.google.com/forum/#!forum/lz4c
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+****************************************************************** */
+
+/* --- dependencies --- */
+#include "../common/zstd_deps.h" /* size_t */
+
+
+/* --- simple histogram functions --- */
+
+/*! HIST_count():
+ * Provides the precise count of each byte within a table 'count'.
+ * 'count' is a table of unsigned int, of minimum size (*maxSymbolValuePtr+1).
+ * Updates *maxSymbolValuePtr with actual largest symbol value detected.
+ * @return : count of the most frequent symbol (which isn't identified).
+ * or an error code, which can be tested using HIST_isError().
+ * note : if return == srcSize, there is only one symbol.
+ */
+size_t HIST_count(unsigned* count, unsigned* maxSymbolValuePtr,
+ const void* src, size_t srcSize);
+
+unsigned HIST_isError(size_t code); /*< tells if a return value is an error code */
+
+
+/* --- advanced histogram functions --- */
+
+#define HIST_WKSP_SIZE_U32 1024
+#define HIST_WKSP_SIZE (HIST_WKSP_SIZE_U32 * sizeof(unsigned))
+/* HIST_count_wksp() :
+ * Same as HIST_count(), but using an externally provided scratch buffer.
+ * Benefit is this function will use very little stack space.
+ * `workSpace` is a writable buffer which must be 4-bytes aligned,
+ * `workSpaceSize` must be >= HIST_WKSP_SIZE
+ */
+size_t HIST_count_wksp(unsigned* count, unsigned* maxSymbolValuePtr,
+ const void* src, size_t srcSize,
+ void* workSpace, size_t workSpaceSize);
+
+/* HIST_countFast() :
+ * same as HIST_count(), but blindly trusts that all byte values within src are <= *maxSymbolValuePtr.
+ * This function is unsafe, and will segfault if any value within `src` is `> *maxSymbolValuePtr`
+ */
+size_t HIST_countFast(unsigned* count, unsigned* maxSymbolValuePtr,
+ const void* src, size_t srcSize);
+
+/* HIST_countFast_wksp() :
+ * Same as HIST_countFast(), but using an externally provided scratch buffer.
+ * `workSpace` is a writable buffer which must be 4-bytes aligned,
+ * `workSpaceSize` must be >= HIST_WKSP_SIZE
+ */
+size_t HIST_countFast_wksp(unsigned* count, unsigned* maxSymbolValuePtr,
+ const void* src, size_t srcSize,
+ void* workSpace, size_t workSpaceSize);
+
+/*! HIST_count_simple() :
+ * Same as HIST_countFast(), this function is unsafe,
+ * and will segfault if any value within `src` is `> *maxSymbolValuePtr`.
+ * It is also a bit slower for large inputs.
+ * However, it does not need any additional memory (not even on stack).
+ * @return : count of the most frequent symbol.
+ * Note this function doesn't produce any error (i.e. it must succeed).
+ */
+unsigned HIST_count_simple(unsigned* count, unsigned* maxSymbolValuePtr,
+ const void* src, size_t srcSize);
diff --git a/lib/zstd/compress/huf_compress.c b/lib/zstd/compress/huf_compress.c
new file mode 100644
index 000000000000..74ef0db47621
--- /dev/null
+++ b/lib/zstd/compress/huf_compress.c
@@ -0,0 +1,1335 @@
+/* ******************************************************************
+ * Huffman encoder, part of New Generation Entropy library
+ * Copyright (c) Yann Collet, Facebook, Inc.
+ *
+ * You can contact the author at :
+ * - FSE+HUF source repository : https://github.com/Cyan4973/FiniteStateEntropy
+ * - Public forum : https://groups.google.com/forum/#!forum/lz4c
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+****************************************************************** */
+
+/* **************************************************************
+* Compiler specifics
+****************************************************************/
+
+
+/* **************************************************************
+* Includes
+****************************************************************/
+#include "../common/zstd_deps.h" /* ZSTD_memcpy, ZSTD_memset */
+#include "../common/compiler.h"
+#include "../common/bitstream.h"
+#include "hist.h"
+#define FSE_STATIC_LINKING_ONLY /* FSE_optimalTableLog_internal */
+#include "../common/fse.h" /* header compression */
+#define HUF_STATIC_LINKING_ONLY
+#include "../common/huf.h"
+#include "../common/error_private.h"
+
+
+/* **************************************************************
+* Error Management
+****************************************************************/
+#define HUF_isError ERR_isError
+#define HUF_STATIC_ASSERT(c) DEBUG_STATIC_ASSERT(c) /* use only *after* variable declarations */
+
+
+/* **************************************************************
+* Utils
+****************************************************************/
+unsigned HUF_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue)
+{
+ return FSE_optimalTableLog_internal(maxTableLog, srcSize, maxSymbolValue, 1);
+}
+
+
+/* *******************************************************
+* HUF : Huffman block compression
+*********************************************************/
+#define HUF_WORKSPACE_MAX_ALIGNMENT 8
+
+static void* HUF_alignUpWorkspace(void* workspace, size_t* workspaceSizePtr, size_t align)
+{
+ size_t const mask = align - 1;
+ size_t const rem = (size_t)workspace & mask;
+ size_t const add = (align - rem) & mask;
+ BYTE* const aligned = (BYTE*)workspace + add;
+ assert((align & (align - 1)) == 0); /* pow 2 */
+ assert(align <= HUF_WORKSPACE_MAX_ALIGNMENT);
+ if (*workspaceSizePtr >= add) {
+ assert(add < align);
+ assert(((size_t)aligned & mask) == 0);
+ *workspaceSizePtr -= add;
+ return aligned;
+ } else {
+ *workspaceSizePtr = 0;
+ return NULL;
+ }
+}
+
+
+/* HUF_compressWeights() :
+ * Same as FSE_compress(), but dedicated to huff0's weights compression.
+ * The use case needs much less stack memory.
+ * Note : all elements within weightTable are supposed to be <= HUF_TABLELOG_MAX.
+ */
+#define MAX_FSE_TABLELOG_FOR_HUFF_HEADER 6
+
+typedef struct {
+ FSE_CTable CTable[FSE_CTABLE_SIZE_U32(MAX_FSE_TABLELOG_FOR_HUFF_HEADER, HUF_TABLELOG_MAX)];
+ U32 scratchBuffer[FSE_BUILD_CTABLE_WORKSPACE_SIZE_U32(HUF_TABLELOG_MAX, MAX_FSE_TABLELOG_FOR_HUFF_HEADER)];
+ unsigned count[HUF_TABLELOG_MAX+1];
+ S16 norm[HUF_TABLELOG_MAX+1];
+} HUF_CompressWeightsWksp;
+
+static size_t HUF_compressWeights(void* dst, size_t dstSize, const void* weightTable, size_t wtSize, void* workspace, size_t workspaceSize)
+{
+ BYTE* const ostart = (BYTE*) dst;
+ BYTE* op = ostart;
+ BYTE* const oend = ostart + dstSize;
+
+ unsigned maxSymbolValue = HUF_TABLELOG_MAX;
+ U32 tableLog = MAX_FSE_TABLELOG_FOR_HUFF_HEADER;
+ HUF_CompressWeightsWksp* wksp = (HUF_CompressWeightsWksp*)HUF_alignUpWorkspace(workspace, &workspaceSize, ZSTD_ALIGNOF(U32));
+
+ if (workspaceSize < sizeof(HUF_CompressWeightsWksp)) return ERROR(GENERIC);
+
+ /* init conditions */
+ if (wtSize <= 1) return 0; /* Not compressible */
+
+ /* Scan input and build symbol stats */
+ { unsigned const maxCount = HIST_count_simple(wksp->count, &maxSymbolValue, weightTable, wtSize); /* never fails */
+ if (maxCount == wtSize) return 1; /* only a single symbol in src : rle */
+ if (maxCount == 1) return 0; /* each symbol present maximum once => not compressible */
+ }
+
+ tableLog = FSE_optimalTableLog(tableLog, wtSize, maxSymbolValue);
+ CHECK_F( FSE_normalizeCount(wksp->norm, tableLog, wksp->count, wtSize, maxSymbolValue, /* useLowProbCount */ 0) );
+
+ /* Write table description header */
+ { CHECK_V_F(hSize, FSE_writeNCount(op, (size_t)(oend-op), wksp->norm, maxSymbolValue, tableLog) );
+ op += hSize;
+ }
+
+ /* Compress */
+ CHECK_F( FSE_buildCTable_wksp(wksp->CTable, wksp->norm, maxSymbolValue, tableLog, wksp->scratchBuffer, sizeof(wksp->scratchBuffer)) );
+ { CHECK_V_F(cSize, FSE_compress_usingCTable(op, (size_t)(oend - op), weightTable, wtSize, wksp->CTable) );
+ if (cSize == 0) return 0; /* not enough space for compressed data */
+ op += cSize;
+ }
+
+ return (size_t)(op-ostart);
+}
+
+static size_t HUF_getNbBits(HUF_CElt elt)
+{
+ return elt & 0xFF;
+}
+
+static size_t HUF_getNbBitsFast(HUF_CElt elt)
+{
+ return elt;
+}
+
+static size_t HUF_getValue(HUF_CElt elt)
+{
+ return elt & ~0xFF;
+}
+
+static size_t HUF_getValueFast(HUF_CElt elt)
+{
+ return elt;
+}
+
+static void HUF_setNbBits(HUF_CElt* elt, size_t nbBits)
+{
+ assert(nbBits <= HUF_TABLELOG_ABSOLUTEMAX);
+ *elt = nbBits;
+}
+
+static void HUF_setValue(HUF_CElt* elt, size_t value)
+{
+ size_t const nbBits = HUF_getNbBits(*elt);
+ if (nbBits > 0) {
+ assert((value >> nbBits) == 0);
+ *elt |= value << (sizeof(HUF_CElt) * 8 - nbBits);
+ }
+}
+
+typedef struct {
+ HUF_CompressWeightsWksp wksp;
+ BYTE bitsToWeight[HUF_TABLELOG_MAX + 1]; /* precomputed conversion table */
+ BYTE huffWeight[HUF_SYMBOLVALUE_MAX];
+} HUF_WriteCTableWksp;
+
+size_t HUF_writeCTable_wksp(void* dst, size_t maxDstSize,
+ const HUF_CElt* CTable, unsigned maxSymbolValue, unsigned huffLog,
+ void* workspace, size_t workspaceSize)
+{
+ HUF_CElt const* const ct = CTable + 1;
+ BYTE* op = (BYTE*)dst;
+ U32 n;
+ HUF_WriteCTableWksp* wksp = (HUF_WriteCTableWksp*)HUF_alignUpWorkspace(workspace, &workspaceSize, ZSTD_ALIGNOF(U32));
+
+ /* check conditions */
+ if (workspaceSize < sizeof(HUF_WriteCTableWksp)) return ERROR(GENERIC);
+ if (maxSymbolValue > HUF_SYMBOLVALUE_MAX) return ERROR(maxSymbolValue_tooLarge);
+
+ /* convert to weight */
+ wksp->bitsToWeight[0] = 0;
+ for (n=1; n<huffLog+1; n++)
+ wksp->bitsToWeight[n] = (BYTE)(huffLog + 1 - n);
+ for (n=0; n<maxSymbolValue; n++)
+ wksp->huffWeight[n] = wksp->bitsToWeight[HUF_getNbBits(ct[n])];
+
+ /* attempt weights compression by FSE */
+ if (maxDstSize < 1) return ERROR(dstSize_tooSmall);
+ { CHECK_V_F(hSize, HUF_compressWeights(op+1, maxDstSize-1, wksp->huffWeight, maxSymbolValue, &wksp->wksp, sizeof(wksp->wksp)) );
+ if ((hSize>1) & (hSize < maxSymbolValue/2)) { /* FSE compressed */
+ op[0] = (BYTE)hSize;
+ return hSize+1;
+ } }
+
+ /* write raw values as 4-bits (max : 15) */
+ if (maxSymbolValue > (256-128)) return ERROR(GENERIC); /* should not happen : likely means source cannot be compressed */
+ if (((maxSymbolValue+1)/2) + 1 > maxDstSize) return ERROR(dstSize_tooSmall); /* not enough space within dst buffer */
+ op[0] = (BYTE)(128 /*special case*/ + (maxSymbolValue-1));
+ wksp->huffWeight[maxSymbolValue] = 0; /* to be sure it doesn't cause msan issue in final combination */
+ for (n=0; n<maxSymbolValue; n+=2)
+ op[(n/2)+1] = (BYTE)((wksp->huffWeight[n] << 4) + wksp->huffWeight[n+1]);
+ return ((maxSymbolValue+1)/2) + 1;
+}
+
+/*! HUF_writeCTable() :
+ `CTable` : Huffman tree to save, using huf representation.
+ @return : size of saved CTable */
+size_t HUF_writeCTable (void* dst, size_t maxDstSize,
+ const HUF_CElt* CTable, unsigned maxSymbolValue, unsigned huffLog)
+{
+ HUF_WriteCTableWksp wksp;
+ return HUF_writeCTable_wksp(dst, maxDstSize, CTable, maxSymbolValue, huffLog, &wksp, sizeof(wksp));
+}
+
+
+size_t HUF_readCTable (HUF_CElt* CTable, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize, unsigned* hasZeroWeights)
+{
+ BYTE huffWeight[HUF_SYMBOLVALUE_MAX + 1]; /* init not required, even though some static analyzer may complain */
+ U32 rankVal[HUF_TABLELOG_ABSOLUTEMAX + 1]; /* large enough for values from 0 to 16 */
+ U32 tableLog = 0;
+ U32 nbSymbols = 0;
+ HUF_CElt* const ct = CTable + 1;
+
+ /* get symbol weights */
+ CHECK_V_F(readSize, HUF_readStats(huffWeight, HUF_SYMBOLVALUE_MAX+1, rankVal, &nbSymbols, &tableLog, src, srcSize));
+ *hasZeroWeights = (rankVal[0] > 0);
+
+ /* check result */
+ if (tableLog > HUF_TABLELOG_MAX) return ERROR(tableLog_tooLarge);
+ if (nbSymbols > *maxSymbolValuePtr+1) return ERROR(maxSymbolValue_tooSmall);
+
+ CTable[0] = tableLog;
+
+ /* Prepare base value per rank */
+ { U32 n, nextRankStart = 0;
+ for (n=1; n<=tableLog; n++) {
+ U32 curr = nextRankStart;
+ nextRankStart += (rankVal[n] << (n-1));
+ rankVal[n] = curr;
+ } }
+
+ /* fill nbBits */
+ { U32 n; for (n=0; n<nbSymbols; n++) {
+ const U32 w = huffWeight[n];
+ HUF_setNbBits(ct + n, (BYTE)(tableLog + 1 - w) & -(w != 0));
+ } }
+
+ /* fill val */
+ { U16 nbPerRank[HUF_TABLELOG_MAX+2] = {0}; /* support w=0=>n=tableLog+1 */
+ U16 valPerRank[HUF_TABLELOG_MAX+2] = {0};
+ { U32 n; for (n=0; n<nbSymbols; n++) nbPerRank[HUF_getNbBits(ct[n])]++; }
+ /* determine stating value per rank */
+ valPerRank[tableLog+1] = 0; /* for w==0 */
+ { U16 min = 0;
+ U32 n; for (n=tableLog; n>0; n--) { /* start at n=tablelog <-> w=1 */
+ valPerRank[n] = min; /* get starting value within each rank */
+ min += nbPerRank[n];
+ min >>= 1;
+ } }
+ /* assign value within rank, symbol order */
+ { U32 n; for (n=0; n<nbSymbols; n++) HUF_setValue(ct + n, valPerRank[HUF_getNbBits(ct[n])]++); }
+ }
+
+ *maxSymbolValuePtr = nbSymbols - 1;
+ return readSize;
+}
+
+U32 HUF_getNbBitsFromCTable(HUF_CElt const* CTable, U32 symbolValue)
+{
+ const HUF_CElt* ct = CTable + 1;
+ assert(symbolValue <= HUF_SYMBOLVALUE_MAX);
+ return (U32)HUF_getNbBits(ct[symbolValue]);
+}
+
+
+typedef struct nodeElt_s {
+ U32 count;
+ U16 parent;
+ BYTE byte;
+ BYTE nbBits;
+} nodeElt;
+
+/*
+ * HUF_setMaxHeight():
+ * Enforces maxNbBits on the Huffman tree described in huffNode.
+ *
+ * It sets all nodes with nbBits > maxNbBits to be maxNbBits. Then it adjusts
+ * the tree to so that it is a valid canonical Huffman tree.
+ *
+ * @pre The sum of the ranks of each symbol == 2^largestBits,
+ * where largestBits == huffNode[lastNonNull].nbBits.
+ * @post The sum of the ranks of each symbol == 2^largestBits,
+ * where largestBits is the return value <= maxNbBits.
+ *
+ * @param huffNode The Huffman tree modified in place to enforce maxNbBits.
+ * @param lastNonNull The symbol with the lowest count in the Huffman tree.
+ * @param maxNbBits The maximum allowed number of bits, which the Huffman tree
+ * may not respect. After this function the Huffman tree will
+ * respect maxNbBits.
+ * @return The maximum number of bits of the Huffman tree after adjustment,
+ * necessarily no more than maxNbBits.
+ */
+static U32 HUF_setMaxHeight(nodeElt* huffNode, U32 lastNonNull, U32 maxNbBits)
+{
+ const U32 largestBits = huffNode[lastNonNull].nbBits;
+ /* early exit : no elt > maxNbBits, so the tree is already valid. */
+ if (largestBits <= maxNbBits) return largestBits;
+
+ /* there are several too large elements (at least >= 2) */
+ { int totalCost = 0;
+ const U32 baseCost = 1 << (largestBits - maxNbBits);
+ int n = (int)lastNonNull;
+
+ /* Adjust any ranks > maxNbBits to maxNbBits.
+ * Compute totalCost, which is how far the sum of the ranks is
+ * we are over 2^largestBits after adjust the offending ranks.
+ */
+ while (huffNode[n].nbBits > maxNbBits) {
+ totalCost += baseCost - (1 << (largestBits - huffNode[n].nbBits));
+ huffNode[n].nbBits = (BYTE)maxNbBits;
+ n--;
+ }
+ /* n stops at huffNode[n].nbBits <= maxNbBits */
+ assert(huffNode[n].nbBits <= maxNbBits);
+ /* n end at index of smallest symbol using < maxNbBits */
+ while (huffNode[n].nbBits == maxNbBits) --n;
+
+ /* renorm totalCost from 2^largestBits to 2^maxNbBits
+ * note : totalCost is necessarily a multiple of baseCost */
+ assert((totalCost & (baseCost - 1)) == 0);
+ totalCost >>= (largestBits - maxNbBits);
+ assert(totalCost > 0);
+
+ /* repay normalized cost */
+ { U32 const noSymbol = 0xF0F0F0F0;
+ U32 rankLast[HUF_TABLELOG_MAX+2];
+
+ /* Get pos of last (smallest = lowest cum. count) symbol per rank */
+ ZSTD_memset(rankLast, 0xF0, sizeof(rankLast));
+ { U32 currentNbBits = maxNbBits;
+ int pos;
+ for (pos=n ; pos >= 0; pos--) {
+ if (huffNode[pos].nbBits >= currentNbBits) continue;
+ currentNbBits = huffNode[pos].nbBits; /* < maxNbBits */
+ rankLast[maxNbBits-currentNbBits] = (U32)pos;
+ } }
+
+ while (totalCost > 0) {
+ /* Try to reduce the next power of 2 above totalCost because we
+ * gain back half the rank.
+ */
+ U32 nBitsToDecrease = BIT_highbit32((U32)totalCost) + 1;
+ for ( ; nBitsToDecrease > 1; nBitsToDecrease--) {
+ U32 const highPos = rankLast[nBitsToDecrease];
+ U32 const lowPos = rankLast[nBitsToDecrease-1];
+ if (highPos == noSymbol) continue;
+ /* Decrease highPos if no symbols of lowPos or if it is
+ * not cheaper to remove 2 lowPos than highPos.
+ */
+ if (lowPos == noSymbol) break;
+ { U32 const highTotal = huffNode[highPos].count;
+ U32 const lowTotal = 2 * huffNode[lowPos].count;
+ if (highTotal <= lowTotal) break;
+ } }
+ /* only triggered when no more rank 1 symbol left => find closest one (note : there is necessarily at least one !) */
+ assert(rankLast[nBitsToDecrease] != noSymbol || nBitsToDecrease == 1);
+ /* HUF_MAX_TABLELOG test just to please gcc 5+; but it should not be necessary */
+ while ((nBitsToDecrease<=HUF_TABLELOG_MAX) && (rankLast[nBitsToDecrease] == noSymbol))
+ nBitsToDecrease++;
+ assert(rankLast[nBitsToDecrease] != noSymbol);
+ /* Increase the number of bits to gain back half the rank cost. */
+ totalCost -= 1 << (nBitsToDecrease-1);
+ huffNode[rankLast[nBitsToDecrease]].nbBits++;
+
+ /* Fix up the new rank.
+ * If the new rank was empty, this symbol is now its smallest.
+ * Otherwise, this symbol will be the largest in the new rank so no adjustment.
+ */
+ if (rankLast[nBitsToDecrease-1] == noSymbol)
+ rankLast[nBitsToDecrease-1] = rankLast[nBitsToDecrease];
+ /* Fix up the old rank.
+ * If the symbol was at position 0, meaning it was the highest weight symbol in the tree,
+ * it must be the only symbol in its rank, so the old rank now has no symbols.
+ * Otherwise, since the Huffman nodes are sorted by count, the previous position is now
+ * the smallest node in the rank. If the previous position belongs to a different rank,
+ * then the rank is now empty.
+ */
+ if (rankLast[nBitsToDecrease] == 0) /* special case, reached largest symbol */
+ rankLast[nBitsToDecrease] = noSymbol;
+ else {
+ rankLast[nBitsToDecrease]--;
+ if (huffNode[rankLast[nBitsToDecrease]].nbBits != maxNbBits-nBitsToDecrease)
+ rankLast[nBitsToDecrease] = noSymbol; /* this rank is now empty */
+ }
+ } /* while (totalCost > 0) */
+
+ /* If we've removed too much weight, then we have to add it back.
+ * To avoid overshooting again, we only adjust the smallest rank.
+ * We take the largest nodes from the lowest rank 0 and move them
+ * to rank 1. There's guaranteed to be enough rank 0 symbols because
+ * TODO.
+ */
+ while (totalCost < 0) { /* Sometimes, cost correction overshoot */
+ /* special case : no rank 1 symbol (using maxNbBits-1);
+ * let's create one from largest rank 0 (using maxNbBits).
+ */
+ if (rankLast[1] == noSymbol) {
+ while (huffNode[n].nbBits == maxNbBits) n--;
+ huffNode[n+1].nbBits--;
+ assert(n >= 0);
+ rankLast[1] = (U32)(n+1);
+ totalCost++;
+ continue;
+ }
+ huffNode[ rankLast[1] + 1 ].nbBits--;
+ rankLast[1]++;
+ totalCost ++;
+ }
+ } /* repay normalized cost */
+ } /* there are several too large elements (at least >= 2) */
+
+ return maxNbBits;
+}
+
+typedef struct {
+ U16 base;
+ U16 curr;
+} rankPos;
+
+typedef nodeElt huffNodeTable[HUF_CTABLE_WORKSPACE_SIZE_U32];
+
+/* Number of buckets available for HUF_sort() */
+#define RANK_POSITION_TABLE_SIZE 192
+
+typedef struct {
+ huffNodeTable huffNodeTbl;
+ rankPos rankPosition[RANK_POSITION_TABLE_SIZE];
+} HUF_buildCTable_wksp_tables;
+
+/* RANK_POSITION_DISTINCT_COUNT_CUTOFF == Cutoff point in HUF_sort() buckets for which we use log2 bucketing.
+ * Strategy is to use as many buckets as possible for representing distinct
+ * counts while using the remainder to represent all "large" counts.
+ *
+ * To satisfy this requirement for 192 buckets, we can do the following:
+ * Let buckets 0-166 represent distinct counts of [0, 166]
+ * Let buckets 166 to 192 represent all remaining counts up to RANK_POSITION_MAX_COUNT_LOG using log2 bucketing.
+ */
+#define RANK_POSITION_MAX_COUNT_LOG 32
+#define RANK_POSITION_LOG_BUCKETS_BEGIN (RANK_POSITION_TABLE_SIZE - 1) - RANK_POSITION_MAX_COUNT_LOG - 1 /* == 158 */
+#define RANK_POSITION_DISTINCT_COUNT_CUTOFF RANK_POSITION_LOG_BUCKETS_BEGIN + BIT_highbit32(RANK_POSITION_LOG_BUCKETS_BEGIN) /* == 166 */
+
+/* Return the appropriate bucket index for a given count. See definition of
+ * RANK_POSITION_DISTINCT_COUNT_CUTOFF for explanation of bucketing strategy.
+ */
+static U32 HUF_getIndex(U32 const count) {
+ return (count < RANK_POSITION_DISTINCT_COUNT_CUTOFF)
+ ? count
+ : BIT_highbit32(count) + RANK_POSITION_LOG_BUCKETS_BEGIN;
+}
+
+/* Helper swap function for HUF_quickSortPartition() */
+static void HUF_swapNodes(nodeElt* a, nodeElt* b) {
+ nodeElt tmp = *a;
+ *a = *b;
+ *b = tmp;
+}
+
+/* Returns 0 if the huffNode array is not sorted by descending count */
+MEM_STATIC int HUF_isSorted(nodeElt huffNode[], U32 const maxSymbolValue1) {
+ U32 i;
+ for (i = 1; i < maxSymbolValue1; ++i) {
+ if (huffNode[i].count > huffNode[i-1].count) {
+ return 0;
+ }
+ }
+ return 1;
+}
+
+/* Insertion sort by descending order */
+HINT_INLINE void HUF_insertionSort(nodeElt huffNode[], int const low, int const high) {
+ int i;
+ int const size = high-low+1;
+ huffNode += low;
+ for (i = 1; i < size; ++i) {
+ nodeElt const key = huffNode[i];
+ int j = i - 1;
+ while (j >= 0 && huffNode[j].count < key.count) {
+ huffNode[j + 1] = huffNode[j];
+ j--;
+ }
+ huffNode[j + 1] = key;
+ }
+}
+
+/* Pivot helper function for quicksort. */
+static int HUF_quickSortPartition(nodeElt arr[], int const low, int const high) {
+ /* Simply select rightmost element as pivot. "Better" selectors like
+ * median-of-three don't experimentally appear to have any benefit.
+ */
+ U32 const pivot = arr[high].count;
+ int i = low - 1;
+ int j = low;
+ for ( ; j < high; j++) {
+ if (arr[j].count > pivot) {
+ i++;
+ HUF_swapNodes(&arr[i], &arr[j]);
+ }
+ }
+ HUF_swapNodes(&arr[i + 1], &arr[high]);
+ return i + 1;
+}
+
+/* Classic quicksort by descending with partially iterative calls
+ * to reduce worst case callstack size.
+ */
+static void HUF_simpleQuickSort(nodeElt arr[], int low, int high) {
+ int const kInsertionSortThreshold = 8;
+ if (high - low < kInsertionSortThreshold) {
+ HUF_insertionSort(arr, low, high);
+ return;
+ }
+ while (low < high) {
+ int const idx = HUF_quickSortPartition(arr, low, high);
+ if (idx - low < high - idx) {
+ HUF_simpleQuickSort(arr, low, idx - 1);
+ low = idx + 1;
+ } else {
+ HUF_simpleQuickSort(arr, idx + 1, high);
+ high = idx - 1;
+ }
+ }
+}
+
+/*
+ * HUF_sort():
+ * Sorts the symbols [0, maxSymbolValue] by count[symbol] in decreasing order.
+ * This is a typical bucket sorting strategy that uses either quicksort or insertion sort to sort each bucket.
+ *
+ * @param[out] huffNode Sorted symbols by decreasing count. Only members `.count` and `.byte` are filled.
+ * Must have (maxSymbolValue + 1) entries.
+ * @param[in] count Histogram of the symbols.
+ * @param[in] maxSymbolValue Maximum symbol value.
+ * @param rankPosition This is a scratch workspace. Must have RANK_POSITION_TABLE_SIZE entries.
+ */
+static void HUF_sort(nodeElt huffNode[], const unsigned count[], U32 const maxSymbolValue, rankPos rankPosition[]) {
+ U32 n;
+ U32 const maxSymbolValue1 = maxSymbolValue+1;
+
+ /* Compute base and set curr to base.
+ * For symbol s let lowerRank = HUF_getIndex(count[n]) and rank = lowerRank + 1.
+ * See HUF_getIndex to see bucketing strategy.
+ * We attribute each symbol to lowerRank's base value, because we want to know where
+ * each rank begins in the output, so for rank R we want to count ranks R+1 and above.
+ */
+ ZSTD_memset(rankPosition, 0, sizeof(*rankPosition) * RANK_POSITION_TABLE_SIZE);
+ for (n = 0; n < maxSymbolValue1; ++n) {
+ U32 lowerRank = HUF_getIndex(count[n]);
+ assert(lowerRank < RANK_POSITION_TABLE_SIZE - 1);
+ rankPosition[lowerRank].base++;
+ }
+
+ assert(rankPosition[RANK_POSITION_TABLE_SIZE - 1].base == 0);
+ /* Set up the rankPosition table */
+ for (n = RANK_POSITION_TABLE_SIZE - 1; n > 0; --n) {
+ rankPosition[n-1].base += rankPosition[n].base;
+ rankPosition[n-1].curr = rankPosition[n-1].base;
+ }
+
+ /* Insert each symbol into their appropriate bucket, setting up rankPosition table. */
+ for (n = 0; n < maxSymbolValue1; ++n) {
+ U32 const c = count[n];
+ U32 const r = HUF_getIndex(c) + 1;
+ U32 const pos = rankPosition[r].curr++;
+ assert(pos < maxSymbolValue1);
+ huffNode[pos].count = c;
+ huffNode[pos].byte = (BYTE)n;
+ }
+
+ /* Sort each bucket. */
+ for (n = RANK_POSITION_DISTINCT_COUNT_CUTOFF; n < RANK_POSITION_TABLE_SIZE - 1; ++n) {
+ U32 const bucketSize = rankPosition[n].curr-rankPosition[n].base;
+ U32 const bucketStartIdx = rankPosition[n].base;
+ if (bucketSize > 1) {
+ assert(bucketStartIdx < maxSymbolValue1);
+ HUF_simpleQuickSort(huffNode + bucketStartIdx, 0, bucketSize-1);
+ }
+ }
+
+ assert(HUF_isSorted(huffNode, maxSymbolValue1));
+}
+
+/* HUF_buildCTable_wksp() :
+ * Same as HUF_buildCTable(), but using externally allocated scratch buffer.
+ * `workSpace` must be aligned on 4-bytes boundaries, and be at least as large as sizeof(HUF_buildCTable_wksp_tables).
+ */
+#define STARTNODE (HUF_SYMBOLVALUE_MAX+1)
+
+/* HUF_buildTree():
+ * Takes the huffNode array sorted by HUF_sort() and builds an unlimited-depth Huffman tree.
+ *
+ * @param huffNode The array sorted by HUF_sort(). Builds the Huffman tree in this array.
+ * @param maxSymbolValue The maximum symbol value.
+ * @return The smallest node in the Huffman tree (by count).
+ */
+static int HUF_buildTree(nodeElt* huffNode, U32 maxSymbolValue)
+{
+ nodeElt* const huffNode0 = huffNode - 1;
+ int nonNullRank;
+ int lowS, lowN;
+ int nodeNb = STARTNODE;
+ int n, nodeRoot;
+ /* init for parents */
+ nonNullRank = (int)maxSymbolValue;
+ while(huffNode[nonNullRank].count == 0) nonNullRank--;
+ lowS = nonNullRank; nodeRoot = nodeNb + lowS - 1; lowN = nodeNb;
+ huffNode[nodeNb].count = huffNode[lowS].count + huffNode[lowS-1].count;
+ huffNode[lowS].parent = huffNode[lowS-1].parent = (U16)nodeNb;
+ nodeNb++; lowS-=2;
+ for (n=nodeNb; n<=nodeRoot; n++) huffNode[n].count = (U32)(1U<<30);
+ huffNode0[0].count = (U32)(1U<<31); /* fake entry, strong barrier */
+
+ /* create parents */
+ while (nodeNb <= nodeRoot) {
+ int const n1 = (huffNode[lowS].count < huffNode[lowN].count) ? lowS-- : lowN++;
+ int const n2 = (huffNode[lowS].count < huffNode[lowN].count) ? lowS-- : lowN++;
+ huffNode[nodeNb].count = huffNode[n1].count + huffNode[n2].count;
+ huffNode[n1].parent = huffNode[n2].parent = (U16)nodeNb;
+ nodeNb++;
+ }
+
+ /* distribute weights (unlimited tree height) */
+ huffNode[nodeRoot].nbBits = 0;
+ for (n=nodeRoot-1; n>=STARTNODE; n--)
+ huffNode[n].nbBits = huffNode[ huffNode[n].parent ].nbBits + 1;
+ for (n=0; n<=nonNullRank; n++)
+ huffNode[n].nbBits = huffNode[ huffNode[n].parent ].nbBits + 1;
+
+ return nonNullRank;
+}
+
+/*
+ * HUF_buildCTableFromTree():
+ * Build the CTable given the Huffman tree in huffNode.
+ *
+ * @param[out] CTable The output Huffman CTable.
+ * @param huffNode The Huffman tree.
+ * @param nonNullRank The last and smallest node in the Huffman tree.
+ * @param maxSymbolValue The maximum symbol value.
+ * @param maxNbBits The exact maximum number of bits used in the Huffman tree.
+ */
+static void HUF_buildCTableFromTree(HUF_CElt* CTable, nodeElt const* huffNode, int nonNullRank, U32 maxSymbolValue, U32 maxNbBits)
+{
+ HUF_CElt* const ct = CTable + 1;
+ /* fill result into ctable (val, nbBits) */
+ int n;
+ U16 nbPerRank[HUF_TABLELOG_MAX+1] = {0};
+ U16 valPerRank[HUF_TABLELOG_MAX+1] = {0};
+ int const alphabetSize = (int)(maxSymbolValue + 1);
+ for (n=0; n<=nonNullRank; n++)
+ nbPerRank[huffNode[n].nbBits]++;
+ /* determine starting value per rank */
+ { U16 min = 0;
+ for (n=(int)maxNbBits; n>0; n--) {
+ valPerRank[n] = min; /* get starting value within each rank */
+ min += nbPerRank[n];
+ min >>= 1;
+ } }
+ for (n=0; n<alphabetSize; n++)
+ HUF_setNbBits(ct + huffNode[n].byte, huffNode[n].nbBits); /* push nbBits per symbol, symbol order */
+ for (n=0; n<alphabetSize; n++)
+ HUF_setValue(ct + n, valPerRank[HUF_getNbBits(ct[n])]++); /* assign value within rank, symbol order */
+ CTable[0] = maxNbBits;
+}
+
+size_t HUF_buildCTable_wksp (HUF_CElt* CTable, const unsigned* count, U32 maxSymbolValue, U32 maxNbBits, void* workSpace, size_t wkspSize)
+{
+ HUF_buildCTable_wksp_tables* const wksp_tables = (HUF_buildCTable_wksp_tables*)HUF_alignUpWorkspace(workSpace, &wkspSize, ZSTD_ALIGNOF(U32));
+ nodeElt* const huffNode0 = wksp_tables->huffNodeTbl;
+ nodeElt* const huffNode = huffNode0+1;
+ int nonNullRank;
+
+ /* safety checks */
+ if (wkspSize < sizeof(HUF_buildCTable_wksp_tables))
+ return ERROR(workSpace_tooSmall);
+ if (maxNbBits == 0) maxNbBits = HUF_TABLELOG_DEFAULT;
+ if (maxSymbolValue > HUF_SYMBOLVALUE_MAX)
+ return ERROR(maxSymbolValue_tooLarge);
+ ZSTD_memset(huffNode0, 0, sizeof(huffNodeTable));
+
+ /* sort, decreasing order */
+ HUF_sort(huffNode, count, maxSymbolValue, wksp_tables->rankPosition);
+
+ /* build tree */
+ nonNullRank = HUF_buildTree(huffNode, maxSymbolValue);
+
+ /* enforce maxTableLog */
+ maxNbBits = HUF_setMaxHeight(huffNode, (U32)nonNullRank, maxNbBits);
+ if (maxNbBits > HUF_TABLELOG_MAX) return ERROR(GENERIC); /* check fit into table */
+
+ HUF_buildCTableFromTree(CTable, huffNode, nonNullRank, maxSymbolValue, maxNbBits);
+
+ return maxNbBits;
+}
+
+size_t HUF_estimateCompressedSize(const HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue)
+{
+ HUF_CElt const* ct = CTable + 1;
+ size_t nbBits = 0;
+ int s;
+ for (s = 0; s <= (int)maxSymbolValue; ++s) {
+ nbBits += HUF_getNbBits(ct[s]) * count[s];
+ }
+ return nbBits >> 3;
+}
+
+int HUF_validateCTable(const HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue) {
+ HUF_CElt const* ct = CTable + 1;
+ int bad = 0;
+ int s;
+ for (s = 0; s <= (int)maxSymbolValue; ++s) {
+ bad |= (count[s] != 0) & (HUF_getNbBits(ct[s]) == 0);
+ }
+ return !bad;
+}
+
+size_t HUF_compressBound(size_t size) { return HUF_COMPRESSBOUND(size); }
+
+/* HUF_CStream_t:
+ * Huffman uses its own BIT_CStream_t implementation.
+ * There are three major differences from BIT_CStream_t:
+ * 1. HUF_addBits() takes a HUF_CElt (size_t) which is
+ * the pair (nbBits, value) in the format:
+ * format:
+ * - Bits [0, 4) = nbBits
+ * - Bits [4, 64 - nbBits) = 0
+ * - Bits [64 - nbBits, 64) = value
+ * 2. The bitContainer is built from the upper bits and
+ * right shifted. E.g. to add a new value of N bits
+ * you right shift the bitContainer by N, then or in
+ * the new value into the N upper bits.
+ * 3. The bitstream has two bit containers. You can add
+ * bits to the second container and merge them into
+ * the first container.
+ */
+
+#define HUF_BITS_IN_CONTAINER (sizeof(size_t) * 8)
+
+typedef struct {
+ size_t bitContainer[2];
+ size_t bitPos[2];
+
+ BYTE* startPtr;
+ BYTE* ptr;
+ BYTE* endPtr;
+} HUF_CStream_t;
+
+/*! HUF_initCStream():
+ * Initializes the bitstream.
+ * @returns 0 or an error code.
+ */
+static size_t HUF_initCStream(HUF_CStream_t* bitC,
+ void* startPtr, size_t dstCapacity)
+{
+ ZSTD_memset(bitC, 0, sizeof(*bitC));
+ bitC->startPtr = (BYTE*)startPtr;
+ bitC->ptr = bitC->startPtr;
+ bitC->endPtr = bitC->startPtr + dstCapacity - sizeof(bitC->bitContainer[0]);
+ if (dstCapacity <= sizeof(bitC->bitContainer[0])) return ERROR(dstSize_tooSmall);
+ return 0;
+}
+
+/*! HUF_addBits():
+ * Adds the symbol stored in HUF_CElt elt to the bitstream.
+ *
+ * @param elt The element we're adding. This is a (nbBits, value) pair.
+ * See the HUF_CStream_t docs for the format.
+ * @param idx Insert into the bitstream at this idx.
+ * @param kFast This is a template parameter. If the bitstream is guaranteed
+ * to have at least 4 unused bits after this call it may be 1,
+ * otherwise it must be 0. HUF_addBits() is faster when fast is set.
+ */
+FORCE_INLINE_TEMPLATE void HUF_addBits(HUF_CStream_t* bitC, HUF_CElt elt, int idx, int kFast)
+{
+ assert(idx <= 1);
+ assert(HUF_getNbBits(elt) <= HUF_TABLELOG_ABSOLUTEMAX);
+ /* This is efficient on x86-64 with BMI2 because shrx
+ * only reads the low 6 bits of the register. The compiler
+ * knows this and elides the mask. When fast is set,
+ * every operation can use the same value loaded from elt.
+ */
+ bitC->bitContainer[idx] >>= HUF_getNbBits(elt);
+ bitC->bitContainer[idx] |= kFast ? HUF_getValueFast(elt) : HUF_getValue(elt);
+ /* We only read the low 8 bits of bitC->bitPos[idx] so it
+ * doesn't matter that the high bits have noise from the value.
+ */
+ bitC->bitPos[idx] += HUF_getNbBitsFast(elt);
+ assert((bitC->bitPos[idx] & 0xFF) <= HUF_BITS_IN_CONTAINER);
+ /* The last 4-bits of elt are dirty if fast is set,
+ * so we must not be overwriting bits that have already been
+ * inserted into the bit container.
+ */
+#if DEBUGLEVEL >= 1
+ {
+ size_t const nbBits = HUF_getNbBits(elt);
+ size_t const dirtyBits = nbBits == 0 ? 0 : BIT_highbit32((U32)nbBits) + 1;
+ (void)dirtyBits;
+ /* Middle bits are 0. */
+ assert(((elt >> dirtyBits) << (dirtyBits + nbBits)) == 0);
+ /* We didn't overwrite any bits in the bit container. */
+ assert(!kFast || (bitC->bitPos[idx] & 0xFF) <= HUF_BITS_IN_CONTAINER);
+ (void)dirtyBits;
+ }
+#endif
+}
+
+FORCE_INLINE_TEMPLATE void HUF_zeroIndex1(HUF_CStream_t* bitC)
+{
+ bitC->bitContainer[1] = 0;
+ bitC->bitPos[1] = 0;
+}
+
+/*! HUF_mergeIndex1() :
+ * Merges the bit container @ index 1 into the bit container @ index 0
+ * and zeros the bit container @ index 1.
+ */
+FORCE_INLINE_TEMPLATE void HUF_mergeIndex1(HUF_CStream_t* bitC)
+{
+ assert((bitC->bitPos[1] & 0xFF) < HUF_BITS_IN_CONTAINER);
+ bitC->bitContainer[0] >>= (bitC->bitPos[1] & 0xFF);
+ bitC->bitContainer[0] |= bitC->bitContainer[1];
+ bitC->bitPos[0] += bitC->bitPos[1];
+ assert((bitC->bitPos[0] & 0xFF) <= HUF_BITS_IN_CONTAINER);
+}
+
+/*! HUF_flushBits() :
+* Flushes the bits in the bit container @ index 0.
+*
+* @post bitPos will be < 8.
+* @param kFast If kFast is set then we must know a-priori that
+* the bit container will not overflow.
+*/
+FORCE_INLINE_TEMPLATE void HUF_flushBits(HUF_CStream_t* bitC, int kFast)
+{
+ /* The upper bits of bitPos are noisy, so we must mask by 0xFF. */
+ size_t const nbBits = bitC->bitPos[0] & 0xFF;
+ size_t const nbBytes = nbBits >> 3;
+ /* The top nbBits bits of bitContainer are the ones we need. */
+ size_t const bitContainer = bitC->bitContainer[0] >> (HUF_BITS_IN_CONTAINER - nbBits);
+ /* Mask bitPos to account for the bytes we consumed. */
+ bitC->bitPos[0] &= 7;
+ assert(nbBits > 0);
+ assert(nbBits <= sizeof(bitC->bitContainer[0]) * 8);
+ assert(bitC->ptr <= bitC->endPtr);
+ MEM_writeLEST(bitC->ptr, bitContainer);
+ bitC->ptr += nbBytes;
+ assert(!kFast || bitC->ptr <= bitC->endPtr);
+ if (!kFast && bitC->ptr > bitC->endPtr) bitC->ptr = bitC->endPtr;
+ /* bitContainer doesn't need to be modified because the leftover
+ * bits are already the top bitPos bits. And we don't care about
+ * noise in the lower values.
+ */
+}
+
+/*! HUF_endMark()
+ * @returns The Huffman stream end mark: A 1-bit value = 1.
+ */
+static HUF_CElt HUF_endMark(void)
+{
+ HUF_CElt endMark;
+ HUF_setNbBits(&endMark, 1);
+ HUF_setValue(&endMark, 1);
+ return endMark;
+}
+
+/*! HUF_closeCStream() :
+ * @return Size of CStream, in bytes,
+ * or 0 if it could not fit into dstBuffer */
+static size_t HUF_closeCStream(HUF_CStream_t* bitC)
+{
+ HUF_addBits(bitC, HUF_endMark(), /* idx */ 0, /* kFast */ 0);
+ HUF_flushBits(bitC, /* kFast */ 0);
+ {
+ size_t const nbBits = bitC->bitPos[0] & 0xFF;
+ if (bitC->ptr >= bitC->endPtr) return 0; /* overflow detected */
+ return (bitC->ptr - bitC->startPtr) + (nbBits > 0);
+ }
+}
+
+FORCE_INLINE_TEMPLATE void
+HUF_encodeSymbol(HUF_CStream_t* bitCPtr, U32 symbol, const HUF_CElt* CTable, int idx, int fast)
+{
+ HUF_addBits(bitCPtr, CTable[symbol], idx, fast);
+}
+
+FORCE_INLINE_TEMPLATE void
+HUF_compress1X_usingCTable_internal_body_loop(HUF_CStream_t* bitC,
+ const BYTE* ip, size_t srcSize,
+ const HUF_CElt* ct,
+ int kUnroll, int kFastFlush, int kLastFast)
+{
+ /* Join to kUnroll */
+ int n = (int)srcSize;
+ int rem = n % kUnroll;
+ if (rem > 0) {
+ for (; rem > 0; --rem) {
+ HUF_encodeSymbol(bitC, ip[--n], ct, 0, /* fast */ 0);
+ }
+ HUF_flushBits(bitC, kFastFlush);
+ }
+ assert(n % kUnroll == 0);
+
+ /* Join to 2 * kUnroll */
+ if (n % (2 * kUnroll)) {
+ int u;
+ for (u = 1; u < kUnroll; ++u) {
+ HUF_encodeSymbol(bitC, ip[n - u], ct, 0, 1);
+ }
+ HUF_encodeSymbol(bitC, ip[n - kUnroll], ct, 0, kLastFast);
+ HUF_flushBits(bitC, kFastFlush);
+ n -= kUnroll;
+ }
+ assert(n % (2 * kUnroll) == 0);
+
+ for (; n>0; n-= 2 * kUnroll) {
+ /* Encode kUnroll symbols into the bitstream @ index 0. */
+ int u;
+ for (u = 1; u < kUnroll; ++u) {
+ HUF_encodeSymbol(bitC, ip[n - u], ct, /* idx */ 0, /* fast */ 1);
+ }
+ HUF_encodeSymbol(bitC, ip[n - kUnroll], ct, /* idx */ 0, /* fast */ kLastFast);
+ HUF_flushBits(bitC, kFastFlush);
+ /* Encode kUnroll symbols into the bitstream @ index 1.
+ * This allows us to start filling the bit container
+ * without any data dependencies.
+ */
+ HUF_zeroIndex1(bitC);
+ for (u = 1; u < kUnroll; ++u) {
+ HUF_encodeSymbol(bitC, ip[n - kUnroll - u], ct, /* idx */ 1, /* fast */ 1);
+ }
+ HUF_encodeSymbol(bitC, ip[n - kUnroll - kUnroll], ct, /* idx */ 1, /* fast */ kLastFast);
+ /* Merge bitstream @ index 1 into the bitstream @ index 0 */
+ HUF_mergeIndex1(bitC);
+ HUF_flushBits(bitC, kFastFlush);
+ }
+ assert(n == 0);
+
+}
+
+/*
+ * Returns a tight upper bound on the output space needed by Huffman
+ * with 8 bytes buffer to handle over-writes. If the output is at least
+ * this large we don't need to do bounds checks during Huffman encoding.
+ */
+static size_t HUF_tightCompressBound(size_t srcSize, size_t tableLog)
+{
+ return ((srcSize * tableLog) >> 3) + 8;
+}
+
+
+FORCE_INLINE_TEMPLATE size_t
+HUF_compress1X_usingCTable_internal_body(void* dst, size_t dstSize,
+ const void* src, size_t srcSize,
+ const HUF_CElt* CTable)
+{
+ U32 const tableLog = (U32)CTable[0];
+ HUF_CElt const* ct = CTable + 1;
+ const BYTE* ip = (const BYTE*) src;
+ BYTE* const ostart = (BYTE*)dst;
+ BYTE* const oend = ostart + dstSize;
+ BYTE* op = ostart;
+ HUF_CStream_t bitC;
+
+ /* init */
+ if (dstSize < 8) return 0; /* not enough space to compress */
+ { size_t const initErr = HUF_initCStream(&bitC, op, (size_t)(oend-op));
+ if (HUF_isError(initErr)) return 0; }
+
+ if (dstSize < HUF_tightCompressBound(srcSize, (size_t)tableLog) || tableLog > 11)
+ HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ MEM_32bits() ? 2 : 4, /* kFast */ 0, /* kLastFast */ 0);
+ else {
+ if (MEM_32bits()) {
+ switch (tableLog) {
+ case 11:
+ HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ 2, /* kFastFlush */ 1, /* kLastFast */ 0);
+ break;
+ case 10: ZSTD_FALLTHROUGH;
+ case 9: ZSTD_FALLTHROUGH;
+ case 8:
+ HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ 2, /* kFastFlush */ 1, /* kLastFast */ 1);
+ break;
+ case 7: ZSTD_FALLTHROUGH;
+ default:
+ HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ 3, /* kFastFlush */ 1, /* kLastFast */ 1);
+ break;
+ }
+ } else {
+ switch (tableLog) {
+ case 11:
+ HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ 5, /* kFastFlush */ 1, /* kLastFast */ 0);
+ break;
+ case 10:
+ HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ 5, /* kFastFlush */ 1, /* kLastFast */ 1);
+ break;
+ case 9:
+ HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ 6, /* kFastFlush */ 1, /* kLastFast */ 0);
+ break;
+ case 8:
+ HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ 7, /* kFastFlush */ 1, /* kLastFast */ 0);
+ break;
+ case 7:
+ HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ 8, /* kFastFlush */ 1, /* kLastFast */ 0);
+ break;
+ case 6: ZSTD_FALLTHROUGH;
+ default:
+ HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ 9, /* kFastFlush */ 1, /* kLastFast */ 1);
+ break;
+ }
+ }
+ }
+ assert(bitC.ptr <= bitC.endPtr);
+
+ return HUF_closeCStream(&bitC);
+}
+
+#if DYNAMIC_BMI2
+
+static BMI2_TARGET_ATTRIBUTE size_t
+HUF_compress1X_usingCTable_internal_bmi2(void* dst, size_t dstSize,
+ const void* src, size_t srcSize,
+ const HUF_CElt* CTable)
+{
+ return HUF_compress1X_usingCTable_internal_body(dst, dstSize, src, srcSize, CTable);
+}
+
+static size_t
+HUF_compress1X_usingCTable_internal_default(void* dst, size_t dstSize,
+ const void* src, size_t srcSize,
+ const HUF_CElt* CTable)
+{
+ return HUF_compress1X_usingCTable_internal_body(dst, dstSize, src, srcSize, CTable);
+}
+
+static size_t
+HUF_compress1X_usingCTable_internal(void* dst, size_t dstSize,
+ const void* src, size_t srcSize,
+ const HUF_CElt* CTable, const int bmi2)
+{
+ if (bmi2) {
+ return HUF_compress1X_usingCTable_internal_bmi2(dst, dstSize, src, srcSize, CTable);
+ }
+ return HUF_compress1X_usingCTable_internal_default(dst, dstSize, src, srcSize, CTable);
+}
+
+#else
+
+static size_t
+HUF_compress1X_usingCTable_internal(void* dst, size_t dstSize,
+ const void* src, size_t srcSize,
+ const HUF_CElt* CTable, const int bmi2)
+{
+ (void)bmi2;
+ return HUF_compress1X_usingCTable_internal_body(dst, dstSize, src, srcSize, CTable);
+}
+
+#endif
+
+size_t HUF_compress1X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable)
+{
+ return HUF_compress1X_usingCTable_bmi2(dst, dstSize, src, srcSize, CTable, /* bmi2 */ 0);
+}
+
+size_t HUF_compress1X_usingCTable_bmi2(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable, int bmi2)
+{
+ return HUF_compress1X_usingCTable_internal(dst, dstSize, src, srcSize, CTable, bmi2);
+}
+
+static size_t
+HUF_compress4X_usingCTable_internal(void* dst, size_t dstSize,
+ const void* src, size_t srcSize,
+ const HUF_CElt* CTable, int bmi2)
+{
+ size_t const segmentSize = (srcSize+3)/4; /* first 3 segments */
+ const BYTE* ip = (const BYTE*) src;
+ const BYTE* const iend = ip + srcSize;
+ BYTE* const ostart = (BYTE*) dst;
+ BYTE* const oend = ostart + dstSize;
+ BYTE* op = ostart;
+
+ if (dstSize < 6 + 1 + 1 + 1 + 8) return 0; /* minimum space to compress successfully */
+ if (srcSize < 12) return 0; /* no saving possible : too small input */
+ op += 6; /* jumpTable */
+
+ assert(op <= oend);
+ { CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, segmentSize, CTable, bmi2) );
+ if (cSize == 0 || cSize > 65535) return 0;
+ MEM_writeLE16(ostart, (U16)cSize);
+ op += cSize;
+ }
+
+ ip += segmentSize;
+ assert(op <= oend);
+ { CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, segmentSize, CTable, bmi2) );
+ if (cSize == 0 || cSize > 65535) return 0;
+ MEM_writeLE16(ostart+2, (U16)cSize);
+ op += cSize;
+ }
+
+ ip += segmentSize;
+ assert(op <= oend);
+ { CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, segmentSize, CTable, bmi2) );
+ if (cSize == 0 || cSize > 65535) return 0;
+ MEM_writeLE16(ostart+4, (U16)cSize);
+ op += cSize;
+ }
+
+ ip += segmentSize;
+ assert(op <= oend);
+ assert(ip <= iend);
+ { CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, (size_t)(iend-ip), CTable, bmi2) );
+ if (cSize == 0 || cSize > 65535) return 0;
+ op += cSize;
+ }
+
+ return (size_t)(op-ostart);
+}
+
+size_t HUF_compress4X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable)
+{
+ return HUF_compress4X_usingCTable_bmi2(dst, dstSize, src, srcSize, CTable, /* bmi2 */ 0);
+}
+
+size_t HUF_compress4X_usingCTable_bmi2(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable, int bmi2)
+{
+ return HUF_compress4X_usingCTable_internal(dst, dstSize, src, srcSize, CTable, bmi2);
+}
+
+typedef enum { HUF_singleStream, HUF_fourStreams } HUF_nbStreams_e;
+
+static size_t HUF_compressCTable_internal(
+ BYTE* const ostart, BYTE* op, BYTE* const oend,
+ const void* src, size_t srcSize,
+ HUF_nbStreams_e nbStreams, const HUF_CElt* CTable, const int bmi2)
+{
+ size_t const cSize = (nbStreams==HUF_singleStream) ?
+ HUF_compress1X_usingCTable_internal(op, (size_t)(oend - op), src, srcSize, CTable, bmi2) :
+ HUF_compress4X_usingCTable_internal(op, (size_t)(oend - op), src, srcSize, CTable, bmi2);
+ if (HUF_isError(cSize)) { return cSize; }
+ if (cSize==0) { return 0; } /* uncompressible */
+ op += cSize;
+ /* check compressibility */
+ assert(op >= ostart);
+ if ((size_t)(op-ostart) >= srcSize-1) { return 0; }
+ return (size_t)(op-ostart);
+}
+
+typedef struct {
+ unsigned count[HUF_SYMBOLVALUE_MAX + 1];
+ HUF_CElt CTable[HUF_CTABLE_SIZE_ST(HUF_SYMBOLVALUE_MAX)];
+ union {
+ HUF_buildCTable_wksp_tables buildCTable_wksp;
+ HUF_WriteCTableWksp writeCTable_wksp;
+ U32 hist_wksp[HIST_WKSP_SIZE_U32];
+ } wksps;
+} HUF_compress_tables_t;
+
+#define SUSPECT_INCOMPRESSIBLE_SAMPLE_SIZE 4096
+#define SUSPECT_INCOMPRESSIBLE_SAMPLE_RATIO 10 /* Must be >= 2 */
+
+/* HUF_compress_internal() :
+ * `workSpace_align4` must be aligned on 4-bytes boundaries,
+ * and occupies the same space as a table of HUF_WORKSPACE_SIZE_U64 unsigned */
+static size_t
+HUF_compress_internal (void* dst, size_t dstSize,
+ const void* src, size_t srcSize,
+ unsigned maxSymbolValue, unsigned huffLog,
+ HUF_nbStreams_e nbStreams,
+ void* workSpace, size_t wkspSize,
+ HUF_CElt* oldHufTable, HUF_repeat* repeat, int preferRepeat,
+ const int bmi2, unsigned suspectUncompressible)
+{
+ HUF_compress_tables_t* const table = (HUF_compress_tables_t*)HUF_alignUpWorkspace(workSpace, &wkspSize, ZSTD_ALIGNOF(size_t));
+ BYTE* const ostart = (BYTE*)dst;
+ BYTE* const oend = ostart + dstSize;
+ BYTE* op = ostart;
+
+ HUF_STATIC_ASSERT(sizeof(*table) + HUF_WORKSPACE_MAX_ALIGNMENT <= HUF_WORKSPACE_SIZE);
+
+ /* checks & inits */
+ if (wkspSize < sizeof(*table)) return ERROR(workSpace_tooSmall);
+ if (!srcSize) return 0; /* Uncompressed */
+ if (!dstSize) return 0; /* cannot fit anything within dst budget */
+ if (srcSize > HUF_BLOCKSIZE_MAX) return ERROR(srcSize_wrong); /* current block size limit */
+ if (huffLog > HUF_TABLELOG_MAX) return ERROR(tableLog_tooLarge);
+ if (maxSymbolValue > HUF_SYMBOLVALUE_MAX) return ERROR(maxSymbolValue_tooLarge);
+ if (!maxSymbolValue) maxSymbolValue = HUF_SYMBOLVALUE_MAX;
+ if (!huffLog) huffLog = HUF_TABLELOG_DEFAULT;
+
+ /* Heuristic : If old table is valid, use it for small inputs */
+ if (preferRepeat && repeat && *repeat == HUF_repeat_valid) {
+ return HUF_compressCTable_internal(ostart, op, oend,
+ src, srcSize,
+ nbStreams, oldHufTable, bmi2);
+ }
+
+ /* If uncompressible data is suspected, do a smaller sampling first */
+ DEBUG_STATIC_ASSERT(SUSPECT_INCOMPRESSIBLE_SAMPLE_RATIO >= 2);
+ if (suspectUncompressible && srcSize >= (SUSPECT_INCOMPRESSIBLE_SAMPLE_SIZE * SUSPECT_INCOMPRESSIBLE_SAMPLE_RATIO)) {
+ size_t largestTotal = 0;
+ { unsigned maxSymbolValueBegin = maxSymbolValue;
+ CHECK_V_F(largestBegin, HIST_count_simple (table->count, &maxSymbolValueBegin, (const BYTE*)src, SUSPECT_INCOMPRESSIBLE_SAMPLE_SIZE) );
+ largestTotal += largestBegin;
+ }
+ { unsigned maxSymbolValueEnd = maxSymbolValue;
+ CHECK_V_F(largestEnd, HIST_count_simple (table->count, &maxSymbolValueEnd, (const BYTE*)src + srcSize - SUSPECT_INCOMPRESSIBLE_SAMPLE_SIZE, SUSPECT_INCOMPRESSIBLE_SAMPLE_SIZE) );
+ largestTotal += largestEnd;
+ }
+ if (largestTotal <= ((2 * SUSPECT_INCOMPRESSIBLE_SAMPLE_SIZE) >> 7)+4) return 0; /* heuristic : probably not compressible enough */
+ }
+
+ /* Scan input and build symbol stats */
+ { CHECK_V_F(largest, HIST_count_wksp (table->count, &maxSymbolValue, (const BYTE*)src, srcSize, table->wksps.hist_wksp, sizeof(table->wksps.hist_wksp)) );
+ if (largest == srcSize) { *ostart = ((const BYTE*)src)[0]; return 1; } /* single symbol, rle */
+ if (largest <= (srcSize >> 7)+4) return 0; /* heuristic : probably not compressible enough */
+ }
+
+ /* Check validity of previous table */
+ if ( repeat
+ && *repeat == HUF_repeat_check
+ && !HUF_validateCTable(oldHufTable, table->count, maxSymbolValue)) {
+ *repeat = HUF_repeat_none;
+ }
+ /* Heuristic : use existing table for small inputs */
+ if (preferRepeat && repeat && *repeat != HUF_repeat_none) {
+ return HUF_compressCTable_internal(ostart, op, oend,
+ src, srcSize,
+ nbStreams, oldHufTable, bmi2);
+ }
+
+ /* Build Huffman Tree */
+ huffLog = HUF_optimalTableLog(huffLog, srcSize, maxSymbolValue);
+ { size_t const maxBits = HUF_buildCTable_wksp(table->CTable, table->count,
+ maxSymbolValue, huffLog,
+ &table->wksps.buildCTable_wksp, sizeof(table->wksps.buildCTable_wksp));
+ CHECK_F(maxBits);
+ huffLog = (U32)maxBits;
+ }
+ /* Zero unused symbols in CTable, so we can check it for validity */
+ {
+ size_t const ctableSize = HUF_CTABLE_SIZE_ST(maxSymbolValue);
+ size_t const unusedSize = sizeof(table->CTable) - ctableSize * sizeof(HUF_CElt);
+ ZSTD_memset(table->CTable + ctableSize, 0, unusedSize);
+ }
+
+ /* Write table description header */
+ { CHECK_V_F(hSize, HUF_writeCTable_wksp(op, dstSize, table->CTable, maxSymbolValue, huffLog,
+ &table->wksps.writeCTable_wksp, sizeof(table->wksps.writeCTable_wksp)) );
+ /* Check if using previous huffman table is beneficial */
+ if (repeat && *repeat != HUF_repeat_none) {
+ size_t const oldSize = HUF_estimateCompressedSize(oldHufTable, table->count, maxSymbolValue);
+ size_t const newSize = HUF_estimateCompressedSize(table->CTable, table->count, maxSymbolValue);
+ if (oldSize <= hSize + newSize || hSize + 12 >= srcSize) {
+ return HUF_compressCTable_internal(ostart, op, oend,
+ src, srcSize,
+ nbStreams, oldHufTable, bmi2);
+ } }
+
+ /* Use the new huffman table */
+ if (hSize + 12ul >= srcSize) { return 0; }
+ op += hSize;
+ if (repeat) { *repeat = HUF_repeat_none; }
+ if (oldHufTable)
+ ZSTD_memcpy(oldHufTable, table->CTable, sizeof(table->CTable)); /* Save new table */
+ }
+ return HUF_compressCTable_internal(ostart, op, oend,
+ src, srcSize,
+ nbStreams, table->CTable, bmi2);
+}
+
+
+size_t HUF_compress1X_wksp (void* dst, size_t dstSize,
+ const void* src, size_t srcSize,
+ unsigned maxSymbolValue, unsigned huffLog,
+ void* workSpace, size_t wkspSize)
+{
+ return HUF_compress_internal(dst, dstSize, src, srcSize,
+ maxSymbolValue, huffLog, HUF_singleStream,
+ workSpace, wkspSize,
+ NULL, NULL, 0, 0 /*bmi2*/, 0);
+}
+
+size_t HUF_compress1X_repeat (void* dst, size_t dstSize,
+ const void* src, size_t srcSize,
+ unsigned maxSymbolValue, unsigned huffLog,
+ void* workSpace, size_t wkspSize,
+ HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat,
+ int bmi2, unsigned suspectUncompressible)
+{
+ return HUF_compress_internal(dst, dstSize, src, srcSize,
+ maxSymbolValue, huffLog, HUF_singleStream,
+ workSpace, wkspSize, hufTable,
+ repeat, preferRepeat, bmi2, suspectUncompressible);
+}
+
+/* HUF_compress4X_repeat():
+ * compress input using 4 streams.
+ * provide workspace to generate compression tables */
+size_t HUF_compress4X_wksp (void* dst, size_t dstSize,
+ const void* src, size_t srcSize,
+ unsigned maxSymbolValue, unsigned huffLog,
+ void* workSpace, size_t wkspSize)
+{
+ return HUF_compress_internal(dst, dstSize, src, srcSize,
+ maxSymbolValue, huffLog, HUF_fourStreams,
+ workSpace, wkspSize,
+ NULL, NULL, 0, 0 /*bmi2*/, 0);
+}
+
+/* HUF_compress4X_repeat():
+ * compress input using 4 streams.
+ * consider skipping quickly
+ * re-use an existing huffman compression table */
+size_t HUF_compress4X_repeat (void* dst, size_t dstSize,
+ const void* src, size_t srcSize,
+ unsigned maxSymbolValue, unsigned huffLog,
+ void* workSpace, size_t wkspSize,
+ HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat, int bmi2, unsigned suspectUncompressible)
+{
+ return HUF_compress_internal(dst, dstSize, src, srcSize,
+ maxSymbolValue, huffLog, HUF_fourStreams,
+ workSpace, wkspSize,
+ hufTable, repeat, preferRepeat, bmi2, suspectUncompressible);
+}
+
diff --git a/lib/zstd/compress/zstd_compress.c b/lib/zstd/compress/zstd_compress.c
new file mode 100644
index 000000000000..f620cafca633
--- /dev/null
+++ b/lib/zstd/compress/zstd_compress.c
@@ -0,0 +1,6127 @@
+/*
+ * Copyright (c) Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+/*-*************************************
+* Dependencies
+***************************************/
+#include "../common/zstd_deps.h" /* INT_MAX, ZSTD_memset, ZSTD_memcpy */
+#include "../common/mem.h"
+#include "hist.h" /* HIST_countFast_wksp */
+#define FSE_STATIC_LINKING_ONLY /* FSE_encodeSymbol */
+#include "../common/fse.h"
+#define HUF_STATIC_LINKING_ONLY
+#include "../common/huf.h"
+#include "zstd_compress_internal.h"
+#include "zstd_compress_sequences.h"
+#include "zstd_compress_literals.h"
+#include "zstd_fast.h"
+#include "zstd_double_fast.h"
+#include "zstd_lazy.h"
+#include "zstd_opt.h"
+#include "zstd_ldm.h"
+#include "zstd_compress_superblock.h"
+
+/* ***************************************************************
+* Tuning parameters
+*****************************************************************/
+/*!
+ * COMPRESS_HEAPMODE :
+ * Select how default decompression function ZSTD_compress() allocates its context,
+ * on stack (0, default), or into heap (1).
+ * Note that functions with explicit context such as ZSTD_compressCCtx() are unaffected.
+ */
+
+/*!
+ * ZSTD_HASHLOG3_MAX :
+ * Maximum size of the hash table dedicated to find 3-bytes matches,
+ * in log format, aka 17 => 1 << 17 == 128Ki positions.
+ * This structure is only used in zstd_opt.
+ * Since allocation is centralized for all strategies, it has to be known here.
+ * The actual (selected) size of the hash table is then stored in ZSTD_matchState_t.hashLog3,
+ * so that zstd_opt.c doesn't need to know about this constant.
+ */
+#ifndef ZSTD_HASHLOG3_MAX
+# define ZSTD_HASHLOG3_MAX 17
+#endif
+
+/*-*************************************
+* Helper functions
+***************************************/
+/* ZSTD_compressBound()
+ * Note that the result from this function is only compatible with the "normal"
+ * full-block strategy.
+ * When there are a lot of small blocks due to frequent flush in streaming mode
+ * the overhead of headers can make the compressed data to be larger than the
+ * return value of ZSTD_compressBound().
+ */
+size_t ZSTD_compressBound(size_t srcSize) {
+ return ZSTD_COMPRESSBOUND(srcSize);
+}
+
+
+/*-*************************************
+* Context memory management
+***************************************/
+struct ZSTD_CDict_s {
+ const void* dictContent;
+ size_t dictContentSize;
+ ZSTD_dictContentType_e dictContentType; /* The dictContentType the CDict was created with */
+ U32* entropyWorkspace; /* entropy workspace of HUF_WORKSPACE_SIZE bytes */
+ ZSTD_cwksp workspace;
+ ZSTD_matchState_t matchState;
+ ZSTD_compressedBlockState_t cBlockState;
+ ZSTD_customMem customMem;
+ U32 dictID;
+ int compressionLevel; /* 0 indicates that advanced API was used to select CDict params */
+ ZSTD_paramSwitch_e useRowMatchFinder; /* Indicates whether the CDict was created with params that would use
+ * row-based matchfinder. Unless the cdict is reloaded, we will use
+ * the same greedy/lazy matchfinder at compression time.
+ */
+}; /* typedef'd to ZSTD_CDict within "zstd.h" */
+
+ZSTD_CCtx* ZSTD_createCCtx(void)
+{
+ return ZSTD_createCCtx_advanced(ZSTD_defaultCMem);
+}
+
+static void ZSTD_initCCtx(ZSTD_CCtx* cctx, ZSTD_customMem memManager)
+{
+ assert(cctx != NULL);
+ ZSTD_memset(cctx, 0, sizeof(*cctx));
+ cctx->customMem = memManager;
+ cctx->bmi2 = ZSTD_cpuSupportsBmi2();
+ { size_t const err = ZSTD_CCtx_reset(cctx, ZSTD_reset_parameters);
+ assert(!ZSTD_isError(err));
+ (void)err;
+ }
+}
+
+ZSTD_CCtx* ZSTD_createCCtx_advanced(ZSTD_customMem customMem)
+{
+ ZSTD_STATIC_ASSERT(zcss_init==0);
+ ZSTD_STATIC_ASSERT(ZSTD_CONTENTSIZE_UNKNOWN==(0ULL - 1));
+ if ((!customMem.customAlloc) ^ (!customMem.customFree)) return NULL;
+ { ZSTD_CCtx* const cctx = (ZSTD_CCtx*)ZSTD_customMalloc(sizeof(ZSTD_CCtx), customMem);
+ if (!cctx) return NULL;
+ ZSTD_initCCtx(cctx, customMem);
+ return cctx;
+ }
+}
+
+ZSTD_CCtx* ZSTD_initStaticCCtx(void* workspace, size_t workspaceSize)
+{
+ ZSTD_cwksp ws;
+ ZSTD_CCtx* cctx;
+ if (workspaceSize <= sizeof(ZSTD_CCtx)) return NULL; /* minimum size */
+ if ((size_t)workspace & 7) return NULL; /* must be 8-aligned */
+ ZSTD_cwksp_init(&ws, workspace, workspaceSize, ZSTD_cwksp_static_alloc);
+
+ cctx = (ZSTD_CCtx*)ZSTD_cwksp_reserve_object(&ws, sizeof(ZSTD_CCtx));
+ if (cctx == NULL) return NULL;
+
+ ZSTD_memset(cctx, 0, sizeof(ZSTD_CCtx));
+ ZSTD_cwksp_move(&cctx->workspace, &ws);
+ cctx->staticSize = workspaceSize;
+
+ /* statically sized space. entropyWorkspace never moves (but prev/next block swap places) */
+ if (!ZSTD_cwksp_check_available(&cctx->workspace, ENTROPY_WORKSPACE_SIZE + 2 * sizeof(ZSTD_compressedBlockState_t))) return NULL;
+ cctx->blockState.prevCBlock = (ZSTD_compressedBlockState_t*)ZSTD_cwksp_reserve_object(&cctx->workspace, sizeof(ZSTD_compressedBlockState_t));
+ cctx->blockState.nextCBlock = (ZSTD_compressedBlockState_t*)ZSTD_cwksp_reserve_object(&cctx->workspace, sizeof(ZSTD_compressedBlockState_t));
+ cctx->entropyWorkspace = (U32*)ZSTD_cwksp_reserve_object(&cctx->workspace, ENTROPY_WORKSPACE_SIZE);
+ cctx->bmi2 = ZSTD_cpuid_bmi2(ZSTD_cpuid());
+ return cctx;
+}
+
+/*
+ * Clears and frees all of the dictionaries in the CCtx.
+ */
+static void ZSTD_clearAllDicts(ZSTD_CCtx* cctx)
+{
+ ZSTD_customFree(cctx->localDict.dictBuffer, cctx->customMem);
+ ZSTD_freeCDict(cctx->localDict.cdict);
+ ZSTD_memset(&cctx->localDict, 0, sizeof(cctx->localDict));
+ ZSTD_memset(&cctx->prefixDict, 0, sizeof(cctx->prefixDict));
+ cctx->cdict = NULL;
+}
+
+static size_t ZSTD_sizeof_localDict(ZSTD_localDict dict)
+{
+ size_t const bufferSize = dict.dictBuffer != NULL ? dict.dictSize : 0;
+ size_t const cdictSize = ZSTD_sizeof_CDict(dict.cdict);
+ return bufferSize + cdictSize;
+}
+
+static void ZSTD_freeCCtxContent(ZSTD_CCtx* cctx)
+{
+ assert(cctx != NULL);
+ assert(cctx->staticSize == 0);
+ ZSTD_clearAllDicts(cctx);
+ ZSTD_cwksp_free(&cctx->workspace, cctx->customMem);
+}
+
+size_t ZSTD_freeCCtx(ZSTD_CCtx* cctx)
+{
+ if (cctx==NULL) return 0; /* support free on NULL */
+ RETURN_ERROR_IF(cctx->staticSize, memory_allocation,
+ "not compatible with static CCtx");
+ {
+ int cctxInWorkspace = ZSTD_cwksp_owns_buffer(&cctx->workspace, cctx);
+ ZSTD_freeCCtxContent(cctx);
+ if (!cctxInWorkspace) {
+ ZSTD_customFree(cctx, cctx->customMem);
+ }
+ }
+ return 0;
+}
+
+
+static size_t ZSTD_sizeof_mtctx(const ZSTD_CCtx* cctx)
+{
+ (void)cctx;
+ return 0;
+}
+
+
+size_t ZSTD_sizeof_CCtx(const ZSTD_CCtx* cctx)
+{
+ if (cctx==NULL) return 0; /* support sizeof on NULL */
+ /* cctx may be in the workspace */
+ return (cctx->workspace.workspace == cctx ? 0 : sizeof(*cctx))
+ + ZSTD_cwksp_sizeof(&cctx->workspace)
+ + ZSTD_sizeof_localDict(cctx->localDict)
+ + ZSTD_sizeof_mtctx(cctx);
+}
+
+size_t ZSTD_sizeof_CStream(const ZSTD_CStream* zcs)
+{
+ return ZSTD_sizeof_CCtx(zcs); /* same object */
+}
+
+/* private API call, for dictBuilder only */
+const seqStore_t* ZSTD_getSeqStore(const ZSTD_CCtx* ctx) { return &(ctx->seqStore); }
+
+/* Returns true if the strategy supports using a row based matchfinder */
+static int ZSTD_rowMatchFinderSupported(const ZSTD_strategy strategy) {
+ return (strategy >= ZSTD_greedy && strategy <= ZSTD_lazy2);
+}
+
+/* Returns true if the strategy and useRowMatchFinder mode indicate that we will use the row based matchfinder
+ * for this compression.
+ */
+static int ZSTD_rowMatchFinderUsed(const ZSTD_strategy strategy, const ZSTD_paramSwitch_e mode) {
+ assert(mode != ZSTD_ps_auto);
+ return ZSTD_rowMatchFinderSupported(strategy) && (mode == ZSTD_ps_enable);
+}
+
+/* Returns row matchfinder usage given an initial mode and cParams */
+static ZSTD_paramSwitch_e ZSTD_resolveRowMatchFinderMode(ZSTD_paramSwitch_e mode,
+ const ZSTD_compressionParameters* const cParams) {
+#if defined(ZSTD_ARCH_X86_SSE2) || defined(ZSTD_ARCH_ARM_NEON)
+ int const kHasSIMD128 = 1;
+#else
+ int const kHasSIMD128 = 0;
+#endif
+ if (mode != ZSTD_ps_auto) return mode; /* if requested enabled, but no SIMD, we still will use row matchfinder */
+ mode = ZSTD_ps_disable;
+ if (!ZSTD_rowMatchFinderSupported(cParams->strategy)) return mode;
+ if (kHasSIMD128) {
+ if (cParams->windowLog > 14) mode = ZSTD_ps_enable;
+ } else {
+ if (cParams->windowLog > 17) mode = ZSTD_ps_enable;
+ }
+ return mode;
+}
+
+/* Returns block splitter usage (generally speaking, when using slower/stronger compression modes) */
+static ZSTD_paramSwitch_e ZSTD_resolveBlockSplitterMode(ZSTD_paramSwitch_e mode,
+ const ZSTD_compressionParameters* const cParams) {
+ if (mode != ZSTD_ps_auto) return mode;
+ return (cParams->strategy >= ZSTD_btopt && cParams->windowLog >= 17) ? ZSTD_ps_enable : ZSTD_ps_disable;
+}
+
+/* Returns 1 if the arguments indicate that we should allocate a chainTable, 0 otherwise */
+static int ZSTD_allocateChainTable(const ZSTD_strategy strategy,
+ const ZSTD_paramSwitch_e useRowMatchFinder,
+ const U32 forDDSDict) {
+ assert(useRowMatchFinder != ZSTD_ps_auto);
+ /* We always should allocate a chaintable if we are allocating a matchstate for a DDS dictionary matchstate.
+ * We do not allocate a chaintable if we are using ZSTD_fast, or are using the row-based matchfinder.
+ */
+ return forDDSDict || ((strategy != ZSTD_fast) && !ZSTD_rowMatchFinderUsed(strategy, useRowMatchFinder));
+}
+
+/* Returns 1 if compression parameters are such that we should
+ * enable long distance matching (wlog >= 27, strategy >= btopt).
+ * Returns 0 otherwise.
+ */
+static ZSTD_paramSwitch_e ZSTD_resolveEnableLdm(ZSTD_paramSwitch_e mode,
+ const ZSTD_compressionParameters* const cParams) {
+ if (mode != ZSTD_ps_auto) return mode;
+ return (cParams->strategy >= ZSTD_btopt && cParams->windowLog >= 27) ? ZSTD_ps_enable : ZSTD_ps_disable;
+}
+
+static ZSTD_CCtx_params ZSTD_makeCCtxParamsFromCParams(
+ ZSTD_compressionParameters cParams)
+{
+ ZSTD_CCtx_params cctxParams;
+ /* should not matter, as all cParams are presumed properly defined */
+ ZSTD_CCtxParams_init(&cctxParams, ZSTD_CLEVEL_DEFAULT);
+ cctxParams.cParams = cParams;
+
+ /* Adjust advanced params according to cParams */
+ cctxParams.ldmParams.enableLdm = ZSTD_resolveEnableLdm(cctxParams.ldmParams.enableLdm, &cParams);
+ if (cctxParams.ldmParams.enableLdm == ZSTD_ps_enable) {
+ ZSTD_ldm_adjustParameters(&cctxParams.ldmParams, &cParams);
+ assert(cctxParams.ldmParams.hashLog >= cctxParams.ldmParams.bucketSizeLog);
+ assert(cctxParams.ldmParams.hashRateLog < 32);
+ }
+ cctxParams.useBlockSplitter = ZSTD_resolveBlockSplitterMode(cctxParams.useBlockSplitter, &cParams);
+ cctxParams.useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(cctxParams.useRowMatchFinder, &cParams);
+ assert(!ZSTD_checkCParams(cParams));
+ return cctxParams;
+}
+
+static ZSTD_CCtx_params* ZSTD_createCCtxParams_advanced(
+ ZSTD_customMem customMem)
+{
+ ZSTD_CCtx_params* params;
+ if ((!customMem.customAlloc) ^ (!customMem.customFree)) return NULL;
+ params = (ZSTD_CCtx_params*)ZSTD_customCalloc(
+ sizeof(ZSTD_CCtx_params), customMem);
+ if (!params) { return NULL; }
+ ZSTD_CCtxParams_init(params, ZSTD_CLEVEL_DEFAULT);
+ params->customMem = customMem;
+ return params;
+}
+
+ZSTD_CCtx_params* ZSTD_createCCtxParams(void)
+{
+ return ZSTD_createCCtxParams_advanced(ZSTD_defaultCMem);
+}
+
+size_t ZSTD_freeCCtxParams(ZSTD_CCtx_params* params)
+{
+ if (params == NULL) { return 0; }
+ ZSTD_customFree(params, params->customMem);
+ return 0;
+}
+
+size_t ZSTD_CCtxParams_reset(ZSTD_CCtx_params* params)
+{
+ return ZSTD_CCtxParams_init(params, ZSTD_CLEVEL_DEFAULT);
+}
+
+size_t ZSTD_CCtxParams_init(ZSTD_CCtx_params* cctxParams, int compressionLevel) {
+ RETURN_ERROR_IF(!cctxParams, GENERIC, "NULL pointer!");
+ ZSTD_memset(cctxParams, 0, sizeof(*cctxParams));
+ cctxParams->compressionLevel = compressionLevel;
+ cctxParams->fParams.contentSizeFlag = 1;
+ return 0;
+}
+
+#define ZSTD_NO_CLEVEL 0
+
+/*
+ * Initializes the cctxParams from params and compressionLevel.
+ * @param compressionLevel If params are derived from a compression level then that compression level, otherwise ZSTD_NO_CLEVEL.
+ */
+static void ZSTD_CCtxParams_init_internal(ZSTD_CCtx_params* cctxParams, ZSTD_parameters const* params, int compressionLevel)
+{
+ assert(!ZSTD_checkCParams(params->cParams));
+ ZSTD_memset(cctxParams, 0, sizeof(*cctxParams));
+ cctxParams->cParams = params->cParams;
+ cctxParams->fParams = params->fParams;
+ /* Should not matter, as all cParams are presumed properly defined.
+ * But, set it for tracing anyway.
+ */
+ cctxParams->compressionLevel = compressionLevel;
+ cctxParams->useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(cctxParams->useRowMatchFinder, &params->cParams);
+ cctxParams->useBlockSplitter = ZSTD_resolveBlockSplitterMode(cctxParams->useBlockSplitter, &params->cParams);
+ cctxParams->ldmParams.enableLdm = ZSTD_resolveEnableLdm(cctxParams->ldmParams.enableLdm, &params->cParams);
+ DEBUGLOG(4, "ZSTD_CCtxParams_init_internal: useRowMatchFinder=%d, useBlockSplitter=%d ldm=%d",
+ cctxParams->useRowMatchFinder, cctxParams->useBlockSplitter, cctxParams->ldmParams.enableLdm);
+}
+
+size_t ZSTD_CCtxParams_init_advanced(ZSTD_CCtx_params* cctxParams, ZSTD_parameters params)
+{
+ RETURN_ERROR_IF(!cctxParams, GENERIC, "NULL pointer!");
+ FORWARD_IF_ERROR( ZSTD_checkCParams(params.cParams) , "");
+ ZSTD_CCtxParams_init_internal(cctxParams, &params, ZSTD_NO_CLEVEL);
+ return 0;
+}
+
+/*
+ * Sets cctxParams' cParams and fParams from params, but otherwise leaves them alone.
+ * @param param Validated zstd parameters.
+ */
+static void ZSTD_CCtxParams_setZstdParams(
+ ZSTD_CCtx_params* cctxParams, const ZSTD_parameters* params)
+{
+ assert(!ZSTD_checkCParams(params->cParams));
+ cctxParams->cParams = params->cParams;
+ cctxParams->fParams = params->fParams;
+ /* Should not matter, as all cParams are presumed properly defined.
+ * But, set it for tracing anyway.
+ */
+ cctxParams->compressionLevel = ZSTD_NO_CLEVEL;
+}
+
+ZSTD_bounds ZSTD_cParam_getBounds(ZSTD_cParameter param)
+{
+ ZSTD_bounds bounds = { 0, 0, 0 };
+
+ switch(param)
+ {
+ case ZSTD_c_compressionLevel:
+ bounds.lowerBound = ZSTD_minCLevel();
+ bounds.upperBound = ZSTD_maxCLevel();
+ return bounds;
+
+ case ZSTD_c_windowLog:
+ bounds.lowerBound = ZSTD_WINDOWLOG_MIN;
+ bounds.upperBound = ZSTD_WINDOWLOG_MAX;
+ return bounds;
+
+ case ZSTD_c_hashLog:
+ bounds.lowerBound = ZSTD_HASHLOG_MIN;
+ bounds.upperBound = ZSTD_HASHLOG_MAX;
+ return bounds;
+
+ case ZSTD_c_chainLog:
+ bounds.lowerBound = ZSTD_CHAINLOG_MIN;
+ bounds.upperBound = ZSTD_CHAINLOG_MAX;
+ return bounds;
+
+ case ZSTD_c_searchLog:
+ bounds.lowerBound = ZSTD_SEARCHLOG_MIN;
+ bounds.upperBound = ZSTD_SEARCHLOG_MAX;
+ return bounds;
+
+ case ZSTD_c_minMatch:
+ bounds.lowerBound = ZSTD_MINMATCH_MIN;
+ bounds.upperBound = ZSTD_MINMATCH_MAX;
+ return bounds;
+
+ case ZSTD_c_targetLength:
+ bounds.lowerBound = ZSTD_TARGETLENGTH_MIN;
+ bounds.upperBound = ZSTD_TARGETLENGTH_MAX;
+ return bounds;
+
+ case ZSTD_c_strategy:
+ bounds.lowerBound = ZSTD_STRATEGY_MIN;
+ bounds.upperBound = ZSTD_STRATEGY_MAX;
+ return bounds;
+
+ case ZSTD_c_contentSizeFlag:
+ bounds.lowerBound = 0;
+ bounds.upperBound = 1;
+ return bounds;
+
+ case ZSTD_c_checksumFlag:
+ bounds.lowerBound = 0;
+ bounds.upperBound = 1;
+ return bounds;
+
+ case ZSTD_c_dictIDFlag:
+ bounds.lowerBound = 0;
+ bounds.upperBound = 1;
+ return bounds;
+
+ case ZSTD_c_nbWorkers:
+ bounds.lowerBound = 0;
+ bounds.upperBound = 0;
+ return bounds;
+
+ case ZSTD_c_jobSize:
+ bounds.lowerBound = 0;
+ bounds.upperBound = 0;
+ return bounds;
+
+ case ZSTD_c_overlapLog:
+ bounds.lowerBound = 0;
+ bounds.upperBound = 0;
+ return bounds;
+
+ case ZSTD_c_enableDedicatedDictSearch:
+ bounds.lowerBound = 0;
+ bounds.upperBound = 1;
+ return bounds;
+
+ case ZSTD_c_enableLongDistanceMatching:
+ bounds.lowerBound = 0;
+ bounds.upperBound = 1;
+ return bounds;
+
+ case ZSTD_c_ldmHashLog:
+ bounds.lowerBound = ZSTD_LDM_HASHLOG_MIN;
+ bounds.upperBound = ZSTD_LDM_HASHLOG_MAX;
+ return bounds;
+
+ case ZSTD_c_ldmMinMatch:
+ bounds.lowerBound = ZSTD_LDM_MINMATCH_MIN;
+ bounds.upperBound = ZSTD_LDM_MINMATCH_MAX;
+ return bounds;
+
+ case ZSTD_c_ldmBucketSizeLog:
+ bounds.lowerBound = ZSTD_LDM_BUCKETSIZELOG_MIN;
+ bounds.upperBound = ZSTD_LDM_BUCKETSIZELOG_MAX;
+ return bounds;
+
+ case ZSTD_c_ldmHashRateLog:
+ bounds.lowerBound = ZSTD_LDM_HASHRATELOG_MIN;
+ bounds.upperBound = ZSTD_LDM_HASHRATELOG_MAX;
+ return bounds;
+
+ /* experimental parameters */
+ case ZSTD_c_rsyncable:
+ bounds.lowerBound = 0;
+ bounds.upperBound = 1;
+ return bounds;
+
+ case ZSTD_c_forceMaxWindow :
+ bounds.lowerBound = 0;
+ bounds.upperBound = 1;
+ return bounds;
+
+ case ZSTD_c_format:
+ ZSTD_STATIC_ASSERT(ZSTD_f_zstd1 < ZSTD_f_zstd1_magicless);
+ bounds.lowerBound = ZSTD_f_zstd1;
+ bounds.upperBound = ZSTD_f_zstd1_magicless; /* note : how to ensure at compile time that this is the highest value enum ? */
+ return bounds;
+
+ case ZSTD_c_forceAttachDict:
+ ZSTD_STATIC_ASSERT(ZSTD_dictDefaultAttach < ZSTD_dictForceLoad);
+ bounds.lowerBound = ZSTD_dictDefaultAttach;
+ bounds.upperBound = ZSTD_dictForceLoad; /* note : how to ensure at compile time that this is the highest value enum ? */
+ return bounds;
+
+ case ZSTD_c_literalCompressionMode:
+ ZSTD_STATIC_ASSERT(ZSTD_ps_auto < ZSTD_ps_enable && ZSTD_ps_enable < ZSTD_ps_disable);
+ bounds.lowerBound = (int)ZSTD_ps_auto;
+ bounds.upperBound = (int)ZSTD_ps_disable;
+ return bounds;
+
+ case ZSTD_c_targetCBlockSize:
+ bounds.lowerBound = ZSTD_TARGETCBLOCKSIZE_MIN;
+ bounds.upperBound = ZSTD_TARGETCBLOCKSIZE_MAX;
+ return bounds;
+
+ case ZSTD_c_srcSizeHint:
+ bounds.lowerBound = ZSTD_SRCSIZEHINT_MIN;
+ bounds.upperBound = ZSTD_SRCSIZEHINT_MAX;
+ return bounds;
+
+ case ZSTD_c_stableInBuffer:
+ case ZSTD_c_stableOutBuffer:
+ bounds.lowerBound = (int)ZSTD_bm_buffered;
+ bounds.upperBound = (int)ZSTD_bm_stable;
+ return bounds;
+
+ case ZSTD_c_blockDelimiters:
+ bounds.lowerBound = (int)ZSTD_sf_noBlockDelimiters;
+ bounds.upperBound = (int)ZSTD_sf_explicitBlockDelimiters;
+ return bounds;
+
+ case ZSTD_c_validateSequences:
+ bounds.lowerBound = 0;
+ bounds.upperBound = 1;
+ return bounds;
+
+ case ZSTD_c_useBlockSplitter:
+ bounds.lowerBound = (int)ZSTD_ps_auto;
+ bounds.upperBound = (int)ZSTD_ps_disable;
+ return bounds;
+
+ case ZSTD_c_useRowMatchFinder:
+ bounds.lowerBound = (int)ZSTD_ps_auto;
+ bounds.upperBound = (int)ZSTD_ps_disable;
+ return bounds;
+
+ case ZSTD_c_deterministicRefPrefix:
+ bounds.lowerBound = 0;
+ bounds.upperBound = 1;
+ return bounds;
+
+ default:
+ bounds.error = ERROR(parameter_unsupported);
+ return bounds;
+ }
+}
+
+/* ZSTD_cParam_clampBounds:
+ * Clamps the value into the bounded range.
+ */
+static size_t ZSTD_cParam_clampBounds(ZSTD_cParameter cParam, int* value)
+{
+ ZSTD_bounds const bounds = ZSTD_cParam_getBounds(cParam);
+ if (ZSTD_isError(bounds.error)) return bounds.error;
+ if (*value < bounds.lowerBound) *value = bounds.lowerBound;
+ if (*value > bounds.upperBound) *value = bounds.upperBound;
+ return 0;
+}
+
+#define BOUNDCHECK(cParam, val) { \
+ RETURN_ERROR_IF(!ZSTD_cParam_withinBounds(cParam,val), \
+ parameter_outOfBound, "Param out of bounds"); \
+}
+
+
+static int ZSTD_isUpdateAuthorized(ZSTD_cParameter param)
+{
+ switch(param)
+ {
+ case ZSTD_c_compressionLevel:
+ case ZSTD_c_hashLog:
+ case ZSTD_c_chainLog:
+ case ZSTD_c_searchLog:
+ case ZSTD_c_minMatch:
+ case ZSTD_c_targetLength:
+ case ZSTD_c_strategy:
+ return 1;
+
+ case ZSTD_c_format:
+ case ZSTD_c_windowLog:
+ case ZSTD_c_contentSizeFlag:
+ case ZSTD_c_checksumFlag:
+ case ZSTD_c_dictIDFlag:
+ case ZSTD_c_forceMaxWindow :
+ case ZSTD_c_nbWorkers:
+ case ZSTD_c_jobSize:
+ case ZSTD_c_overlapLog:
+ case ZSTD_c_rsyncable:
+ case ZSTD_c_enableDedicatedDictSearch:
+ case ZSTD_c_enableLongDistanceMatching:
+ case ZSTD_c_ldmHashLog:
+ case ZSTD_c_ldmMinMatch:
+ case ZSTD_c_ldmBucketSizeLog:
+ case ZSTD_c_ldmHashRateLog:
+ case ZSTD_c_forceAttachDict:
+ case ZSTD_c_literalCompressionMode:
+ case ZSTD_c_targetCBlockSize:
+ case ZSTD_c_srcSizeHint:
+ case ZSTD_c_stableInBuffer:
+ case ZSTD_c_stableOutBuffer:
+ case ZSTD_c_blockDelimiters:
+ case ZSTD_c_validateSequences:
+ case ZSTD_c_useBlockSplitter:
+ case ZSTD_c_useRowMatchFinder:
+ case ZSTD_c_deterministicRefPrefix:
+ default:
+ return 0;
+ }
+}
+
+size_t ZSTD_CCtx_setParameter(ZSTD_CCtx* cctx, ZSTD_cParameter param, int value)
+{
+ DEBUGLOG(4, "ZSTD_CCtx_setParameter (%i, %i)", (int)param, value);
+ if (cctx->streamStage != zcss_init) {
+ if (ZSTD_isUpdateAuthorized(param)) {
+ cctx->cParamsChanged = 1;
+ } else {
+ RETURN_ERROR(stage_wrong, "can only set params in ctx init stage");
+ } }
+
+ switch(param)
+ {
+ case ZSTD_c_nbWorkers:
+ RETURN_ERROR_IF((value!=0) && cctx->staticSize, parameter_unsupported,
+ "MT not compatible with static alloc");
+ break;
+
+ case ZSTD_c_compressionLevel:
+ case ZSTD_c_windowLog:
+ case ZSTD_c_hashLog:
+ case ZSTD_c_chainLog:
+ case ZSTD_c_searchLog:
+ case ZSTD_c_minMatch:
+ case ZSTD_c_targetLength:
+ case ZSTD_c_strategy:
+ case ZSTD_c_ldmHashRateLog:
+ case ZSTD_c_format:
+ case ZSTD_c_contentSizeFlag:
+ case ZSTD_c_checksumFlag:
+ case ZSTD_c_dictIDFlag:
+ case ZSTD_c_forceMaxWindow:
+ case ZSTD_c_forceAttachDict:
+ case ZSTD_c_literalCompressionMode:
+ case ZSTD_c_jobSize:
+ case ZSTD_c_overlapLog:
+ case ZSTD_c_rsyncable:
+ case ZSTD_c_enableDedicatedDictSearch:
+ case ZSTD_c_enableLongDistanceMatching:
+ case ZSTD_c_ldmHashLog:
+ case ZSTD_c_ldmMinMatch:
+ case ZSTD_c_ldmBucketSizeLog:
+ case ZSTD_c_targetCBlockSize:
+ case ZSTD_c_srcSizeHint:
+ case ZSTD_c_stableInBuffer:
+ case ZSTD_c_stableOutBuffer:
+ case ZSTD_c_blockDelimiters:
+ case ZSTD_c_validateSequences:
+ case ZSTD_c_useBlockSplitter:
+ case ZSTD_c_useRowMatchFinder:
+ case ZSTD_c_deterministicRefPrefix:
+ break;
+
+ default: RETURN_ERROR(parameter_unsupported, "unknown parameter");
+ }
+ return ZSTD_CCtxParams_setParameter(&cctx->requestedParams, param, value);
+}
+
+size_t ZSTD_CCtxParams_setParameter(ZSTD_CCtx_params* CCtxParams,
+ ZSTD_cParameter param, int value)
+{
+ DEBUGLOG(4, "ZSTD_CCtxParams_setParameter (%i, %i)", (int)param, value);
+ switch(param)
+ {
+ case ZSTD_c_format :
+ BOUNDCHECK(ZSTD_c_format, value);
+ CCtxParams->format = (ZSTD_format_e)value;
+ return (size_t)CCtxParams->format;
+
+ case ZSTD_c_compressionLevel : {
+ FORWARD_IF_ERROR(ZSTD_cParam_clampBounds(param, &value), "");
+ if (value == 0)
+ CCtxParams->compressionLevel = ZSTD_CLEVEL_DEFAULT; /* 0 == default */
+ else
+ CCtxParams->compressionLevel = value;
+ if (CCtxParams->compressionLevel >= 0) return (size_t)CCtxParams->compressionLevel;
+ return 0; /* return type (size_t) cannot represent negative values */
+ }
+
+ case ZSTD_c_windowLog :
+ if (value!=0) /* 0 => use default */
+ BOUNDCHECK(ZSTD_c_windowLog, value);
+ CCtxParams->cParams.windowLog = (U32)value;
+ return CCtxParams->cParams.windowLog;
+
+ case ZSTD_c_hashLog :
+ if (value!=0) /* 0 => use default */
+ BOUNDCHECK(ZSTD_c_hashLog, value);
+ CCtxParams->cParams.hashLog = (U32)value;
+ return CCtxParams->cParams.hashLog;
+
+ case ZSTD_c_chainLog :
+ if (value!=0) /* 0 => use default */
+ BOUNDCHECK(ZSTD_c_chainLog, value);
+ CCtxParams->cParams.chainLog = (U32)value;
+ return CCtxParams->cParams.chainLog;
+
+ case ZSTD_c_searchLog :
+ if (value!=0) /* 0 => use default */
+ BOUNDCHECK(ZSTD_c_searchLog, value);
+ CCtxParams->cParams.searchLog = (U32)value;
+ return (size_t)value;
+
+ case ZSTD_c_minMatch :
+ if (value!=0) /* 0 => use default */
+ BOUNDCHECK(ZSTD_c_minMatch, value);
+ CCtxParams->cParams.minMatch = value;
+ return CCtxParams->cParams.minMatch;
+
+ case ZSTD_c_targetLength :
+ BOUNDCHECK(ZSTD_c_targetLength, value);
+ CCtxParams->cParams.targetLength = value;
+ return CCtxParams->cParams.targetLength;
+
+ case ZSTD_c_strategy :
+ if (value!=0) /* 0 => use default */
+ BOUNDCHECK(ZSTD_c_strategy, value);
+ CCtxParams->cParams.strategy = (ZSTD_strategy)value;
+ return (size_t)CCtxParams->cParams.strategy;
+
+ case ZSTD_c_contentSizeFlag :
+ /* Content size written in frame header _when known_ (default:1) */
+ DEBUGLOG(4, "set content size flag = %u", (value!=0));
+ CCtxParams->fParams.contentSizeFlag = value != 0;
+ return CCtxParams->fParams.contentSizeFlag;
+
+ case ZSTD_c_checksumFlag :
+ /* A 32-bits content checksum will be calculated and written at end of frame (default:0) */
+ CCtxParams->fParams.checksumFlag = value != 0;
+ return CCtxParams->fParams.checksumFlag;
+
+ case ZSTD_c_dictIDFlag : /* When applicable, dictionary's dictID is provided in frame header (default:1) */
+ DEBUGLOG(4, "set dictIDFlag = %u", (value!=0));
+ CCtxParams->fParams.noDictIDFlag = !value;
+ return !CCtxParams->fParams.noDictIDFlag;
+
+ case ZSTD_c_forceMaxWindow :
+ CCtxParams->forceWindow = (value != 0);
+ return CCtxParams->forceWindow;
+
+ case ZSTD_c_forceAttachDict : {
+ const ZSTD_dictAttachPref_e pref = (ZSTD_dictAttachPref_e)value;
+ BOUNDCHECK(ZSTD_c_forceAttachDict, pref);
+ CCtxParams->attachDictPref = pref;
+ return CCtxParams->attachDictPref;
+ }
+
+ case ZSTD_c_literalCompressionMode : {
+ const ZSTD_paramSwitch_e lcm = (ZSTD_paramSwitch_e)value;
+ BOUNDCHECK(ZSTD_c_literalCompressionMode, lcm);
+ CCtxParams->literalCompressionMode = lcm;
+ return CCtxParams->literalCompressionMode;
+ }
+
+ case ZSTD_c_nbWorkers :
+ RETURN_ERROR_IF(value!=0, parameter_unsupported, "not compiled with multithreading");
+ return 0;
+
+ case ZSTD_c_jobSize :
+ RETURN_ERROR_IF(value!=0, parameter_unsupported, "not compiled with multithreading");
+ return 0;
+
+ case ZSTD_c_overlapLog :
+ RETURN_ERROR_IF(value!=0, parameter_unsupported, "not compiled with multithreading");
+ return 0;
+
+ case ZSTD_c_rsyncable :
+ RETURN_ERROR_IF(value!=0, parameter_unsupported, "not compiled with multithreading");
+ return 0;
+
+ case ZSTD_c_enableDedicatedDictSearch :
+ CCtxParams->enableDedicatedDictSearch = (value!=0);
+ return CCtxParams->enableDedicatedDictSearch;
+
+ case ZSTD_c_enableLongDistanceMatching :
+ CCtxParams->ldmParams.enableLdm = (ZSTD_paramSwitch_e)value;
+ return CCtxParams->ldmParams.enableLdm;
+
+ case ZSTD_c_ldmHashLog :
+ if (value!=0) /* 0 ==> auto */
+ BOUNDCHECK(ZSTD_c_ldmHashLog, value);
+ CCtxParams->ldmParams.hashLog = value;
+ return CCtxParams->ldmParams.hashLog;
+
+ case ZSTD_c_ldmMinMatch :
+ if (value!=0) /* 0 ==> default */
+ BOUNDCHECK(ZSTD_c_ldmMinMatch, value);
+ CCtxParams->ldmParams.minMatchLength = value;
+ return CCtxParams->ldmParams.minMatchLength;
+
+ case ZSTD_c_ldmBucketSizeLog :
+ if (value!=0) /* 0 ==> default */
+ BOUNDCHECK(ZSTD_c_ldmBucketSizeLog, value);
+ CCtxParams->ldmParams.bucketSizeLog = value;
+ return CCtxParams->ldmParams.bucketSizeLog;
+
+ case ZSTD_c_ldmHashRateLog :
+ if (value!=0) /* 0 ==> default */
+ BOUNDCHECK(ZSTD_c_ldmHashRateLog, value);
+ CCtxParams->ldmParams.hashRateLog = value;
+ return CCtxParams->ldmParams.hashRateLog;
+
+ case ZSTD_c_targetCBlockSize :
+ if (value!=0) /* 0 ==> default */
+ BOUNDCHECK(ZSTD_c_targetCBlockSize, value);
+ CCtxParams->targetCBlockSize = value;
+ return CCtxParams->targetCBlockSize;
+
+ case ZSTD_c_srcSizeHint :
+ if (value!=0) /* 0 ==> default */
+ BOUNDCHECK(ZSTD_c_srcSizeHint, value);
+ CCtxParams->srcSizeHint = value;
+ return CCtxParams->srcSizeHint;
+
+ case ZSTD_c_stableInBuffer:
+ BOUNDCHECK(ZSTD_c_stableInBuffer, value);
+ CCtxParams->inBufferMode = (ZSTD_bufferMode_e)value;
+ return CCtxParams->inBufferMode;
+
+ case ZSTD_c_stableOutBuffer:
+ BOUNDCHECK(ZSTD_c_stableOutBuffer, value);
+ CCtxParams->outBufferMode = (ZSTD_bufferMode_e)value;
+ return CCtxParams->outBufferMode;
+
+ case ZSTD_c_blockDelimiters:
+ BOUNDCHECK(ZSTD_c_blockDelimiters, value);
+ CCtxParams->blockDelimiters = (ZSTD_sequenceFormat_e)value;
+ return CCtxParams->blockDelimiters;
+
+ case ZSTD_c_validateSequences:
+ BOUNDCHECK(ZSTD_c_validateSequences, value);
+ CCtxParams->validateSequences = value;
+ return CCtxParams->validateSequences;
+
+ case ZSTD_c_useBlockSplitter:
+ BOUNDCHECK(ZSTD_c_useBlockSplitter, value);
+ CCtxParams->useBlockSplitter = (ZSTD_paramSwitch_e)value;
+ return CCtxParams->useBlockSplitter;
+
+ case ZSTD_c_useRowMatchFinder:
+ BOUNDCHECK(ZSTD_c_useRowMatchFinder, value);
+ CCtxParams->useRowMatchFinder = (ZSTD_paramSwitch_e)value;
+ return CCtxParams->useRowMatchFinder;
+
+ case ZSTD_c_deterministicRefPrefix:
+ BOUNDCHECK(ZSTD_c_deterministicRefPrefix, value);
+ CCtxParams->deterministicRefPrefix = !!value;
+ return CCtxParams->deterministicRefPrefix;
+
+ default: RETURN_ERROR(parameter_unsupported, "unknown parameter");
+ }
+}
+
+size_t ZSTD_CCtx_getParameter(ZSTD_CCtx const* cctx, ZSTD_cParameter param, int* value)
+{
+ return ZSTD_CCtxParams_getParameter(&cctx->requestedParams, param, value);
+}
+
+size_t ZSTD_CCtxParams_getParameter(
+ ZSTD_CCtx_params const* CCtxParams, ZSTD_cParameter param, int* value)
+{
+ switch(param)
+ {
+ case ZSTD_c_format :
+ *value = CCtxParams->format;
+ break;
+ case ZSTD_c_compressionLevel :
+ *value = CCtxParams->compressionLevel;
+ break;
+ case ZSTD_c_windowLog :
+ *value = (int)CCtxParams->cParams.windowLog;
+ break;
+ case ZSTD_c_hashLog :
+ *value = (int)CCtxParams->cParams.hashLog;
+ break;
+ case ZSTD_c_chainLog :
+ *value = (int)CCtxParams->cParams.chainLog;
+ break;
+ case ZSTD_c_searchLog :
+ *value = CCtxParams->cParams.searchLog;
+ break;
+ case ZSTD_c_minMatch :
+ *value = CCtxParams->cParams.minMatch;
+ break;
+ case ZSTD_c_targetLength :
+ *value = CCtxParams->cParams.targetLength;
+ break;
+ case ZSTD_c_strategy :
+ *value = (unsigned)CCtxParams->cParams.strategy;
+ break;
+ case ZSTD_c_contentSizeFlag :
+ *value = CCtxParams->fParams.contentSizeFlag;
+ break;
+ case ZSTD_c_checksumFlag :
+ *value = CCtxParams->fParams.checksumFlag;
+ break;
+ case ZSTD_c_dictIDFlag :
+ *value = !CCtxParams->fParams.noDictIDFlag;
+ break;
+ case ZSTD_c_forceMaxWindow :
+ *value = CCtxParams->forceWindow;
+ break;
+ case ZSTD_c_forceAttachDict :
+ *value = CCtxParams->attachDictPref;
+ break;
+ case ZSTD_c_literalCompressionMode :
+ *value = CCtxParams->literalCompressionMode;
+ break;
+ case ZSTD_c_nbWorkers :
+ assert(CCtxParams->nbWorkers == 0);
+ *value = CCtxParams->nbWorkers;
+ break;
+ case ZSTD_c_jobSize :
+ RETURN_ERROR(parameter_unsupported, "not compiled with multithreading");
+ case ZSTD_c_overlapLog :
+ RETURN_ERROR(parameter_unsupported, "not compiled with multithreading");
+ case ZSTD_c_rsyncable :
+ RETURN_ERROR(parameter_unsupported, "not compiled with multithreading");
+ case ZSTD_c_enableDedicatedDictSearch :
+ *value = CCtxParams->enableDedicatedDictSearch;
+ break;
+ case ZSTD_c_enableLongDistanceMatching :
+ *value = CCtxParams->ldmParams.enableLdm;
+ break;
+ case ZSTD_c_ldmHashLog :
+ *value = CCtxParams->ldmParams.hashLog;
+ break;
+ case ZSTD_c_ldmMinMatch :
+ *value = CCtxParams->ldmParams.minMatchLength;
+ break;
+ case ZSTD_c_ldmBucketSizeLog :
+ *value = CCtxParams->ldmParams.bucketSizeLog;
+ break;
+ case ZSTD_c_ldmHashRateLog :
+ *value = CCtxParams->ldmParams.hashRateLog;
+ break;
+ case ZSTD_c_targetCBlockSize :
+ *value = (int)CCtxParams->targetCBlockSize;
+ break;
+ case ZSTD_c_srcSizeHint :
+ *value = (int)CCtxParams->srcSizeHint;
+ break;
+ case ZSTD_c_stableInBuffer :
+ *value = (int)CCtxParams->inBufferMode;
+ break;
+ case ZSTD_c_stableOutBuffer :
+ *value = (int)CCtxParams->outBufferMode;
+ break;
+ case ZSTD_c_blockDelimiters :
+ *value = (int)CCtxParams->blockDelimiters;
+ break;
+ case ZSTD_c_validateSequences :
+ *value = (int)CCtxParams->validateSequences;
+ break;
+ case ZSTD_c_useBlockSplitter :
+ *value = (int)CCtxParams->useBlockSplitter;
+ break;
+ case ZSTD_c_useRowMatchFinder :
+ *value = (int)CCtxParams->useRowMatchFinder;
+ break;
+ case ZSTD_c_deterministicRefPrefix:
+ *value = (int)CCtxParams->deterministicRefPrefix;
+ break;
+ default: RETURN_ERROR(parameter_unsupported, "unknown parameter");
+ }
+ return 0;
+}
+
+/* ZSTD_CCtx_setParametersUsingCCtxParams() :
+ * just applies `params` into `cctx`
+ * no action is performed, parameters are merely stored.
+ * If ZSTDMT is enabled, parameters are pushed to cctx->mtctx.
+ * This is possible even if a compression is ongoing.
+ * In which case, new parameters will be applied on the fly, starting with next compression job.
+ */
+size_t ZSTD_CCtx_setParametersUsingCCtxParams(
+ ZSTD_CCtx* cctx, const ZSTD_CCtx_params* params)
+{
+ DEBUGLOG(4, "ZSTD_CCtx_setParametersUsingCCtxParams");
+ RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong,
+ "The context is in the wrong stage!");
+ RETURN_ERROR_IF(cctx->cdict, stage_wrong,
+ "Can't override parameters with cdict attached (some must "
+ "be inherited from the cdict).");
+
+ cctx->requestedParams = *params;
+ return 0;
+}
+
+size_t ZSTD_CCtx_setPledgedSrcSize(ZSTD_CCtx* cctx, unsigned long long pledgedSrcSize)
+{
+ DEBUGLOG(4, "ZSTD_CCtx_setPledgedSrcSize to %u bytes", (U32)pledgedSrcSize);
+ RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong,
+ "Can't set pledgedSrcSize when not in init stage.");
+ cctx->pledgedSrcSizePlusOne = pledgedSrcSize+1;
+ return 0;
+}
+
+static ZSTD_compressionParameters ZSTD_dedicatedDictSearch_getCParams(
+ int const compressionLevel,
+ size_t const dictSize);
+static int ZSTD_dedicatedDictSearch_isSupported(
+ const ZSTD_compressionParameters* cParams);
+static void ZSTD_dedicatedDictSearch_revertCParams(
+ ZSTD_compressionParameters* cParams);
+
+/*
+ * Initializes the local dict using the requested parameters.
+ * NOTE: This does not use the pledged src size, because it may be used for more
+ * than one compression.
+ */
+static size_t ZSTD_initLocalDict(ZSTD_CCtx* cctx)
+{
+ ZSTD_localDict* const dl = &cctx->localDict;
+ if (dl->dict == NULL) {
+ /* No local dictionary. */
+ assert(dl->dictBuffer == NULL);
+ assert(dl->cdict == NULL);
+ assert(dl->dictSize == 0);
+ return 0;
+ }
+ if (dl->cdict != NULL) {
+ assert(cctx->cdict == dl->cdict);
+ /* Local dictionary already initialized. */
+ return 0;
+ }
+ assert(dl->dictSize > 0);
+ assert(cctx->cdict == NULL);
+ assert(cctx->prefixDict.dict == NULL);
+
+ dl->cdict = ZSTD_createCDict_advanced2(
+ dl->dict,
+ dl->dictSize,
+ ZSTD_dlm_byRef,
+ dl->dictContentType,
+ &cctx->requestedParams,
+ cctx->customMem);
+ RETURN_ERROR_IF(!dl->cdict, memory_allocation, "ZSTD_createCDict_advanced failed");
+ cctx->cdict = dl->cdict;
+ return 0;
+}
+
+size_t ZSTD_CCtx_loadDictionary_advanced(
+ ZSTD_CCtx* cctx, const void* dict, size_t dictSize,
+ ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType)
+{
+ RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong,
+ "Can't load a dictionary when ctx is not in init stage.");
+ DEBUGLOG(4, "ZSTD_CCtx_loadDictionary_advanced (size: %u)", (U32)dictSize);
+ ZSTD_clearAllDicts(cctx); /* in case one already exists */
+ if (dict == NULL || dictSize == 0) /* no dictionary mode */
+ return 0;
+ if (dictLoadMethod == ZSTD_dlm_byRef) {
+ cctx->localDict.dict = dict;
+ } else {
+ void* dictBuffer;
+ RETURN_ERROR_IF(cctx->staticSize, memory_allocation,
+ "no malloc for static CCtx");
+ dictBuffer = ZSTD_customMalloc(dictSize, cctx->customMem);
+ RETURN_ERROR_IF(!dictBuffer, memory_allocation, "NULL pointer!");
+ ZSTD_memcpy(dictBuffer, dict, dictSize);
+ cctx->localDict.dictBuffer = dictBuffer;
+ cctx->localDict.dict = dictBuffer;
+ }
+ cctx->localDict.dictSize = dictSize;
+ cctx->localDict.dictContentType = dictContentType;
+ return 0;
+}
+
+size_t ZSTD_CCtx_loadDictionary_byReference(
+ ZSTD_CCtx* cctx, const void* dict, size_t dictSize)
+{
+ return ZSTD_CCtx_loadDictionary_advanced(
+ cctx, dict, dictSize, ZSTD_dlm_byRef, ZSTD_dct_auto);
+}
+
+size_t ZSTD_CCtx_loadDictionary(ZSTD_CCtx* cctx, const void* dict, size_t dictSize)
+{
+ return ZSTD_CCtx_loadDictionary_advanced(
+ cctx, dict, dictSize, ZSTD_dlm_byCopy, ZSTD_dct_auto);
+}
+
+
+size_t ZSTD_CCtx_refCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict)
+{
+ RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong,
+ "Can't ref a dict when ctx not in init stage.");
+ /* Free the existing local cdict (if any) to save memory. */
+ ZSTD_clearAllDicts(cctx);
+ cctx->cdict = cdict;
+ return 0;
+}
+
+size_t ZSTD_CCtx_refThreadPool(ZSTD_CCtx* cctx, ZSTD_threadPool* pool)
+{
+ RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong,
+ "Can't ref a pool when ctx not in init stage.");
+ cctx->pool = pool;
+ return 0;
+}
+
+size_t ZSTD_CCtx_refPrefix(ZSTD_CCtx* cctx, const void* prefix, size_t prefixSize)
+{
+ return ZSTD_CCtx_refPrefix_advanced(cctx, prefix, prefixSize, ZSTD_dct_rawContent);
+}
+
+size_t ZSTD_CCtx_refPrefix_advanced(
+ ZSTD_CCtx* cctx, const void* prefix, size_t prefixSize, ZSTD_dictContentType_e dictContentType)
+{
+ RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong,
+ "Can't ref a prefix when ctx not in init stage.");
+ ZSTD_clearAllDicts(cctx);
+ if (prefix != NULL && prefixSize > 0) {
+ cctx->prefixDict.dict = prefix;
+ cctx->prefixDict.dictSize = prefixSize;
+ cctx->prefixDict.dictContentType = dictContentType;
+ }
+ return 0;
+}
+
+/*! ZSTD_CCtx_reset() :
+ * Also dumps dictionary */
+size_t ZSTD_CCtx_reset(ZSTD_CCtx* cctx, ZSTD_ResetDirective reset)
+{
+ if ( (reset == ZSTD_reset_session_only)
+ || (reset == ZSTD_reset_session_and_parameters) ) {
+ cctx->streamStage = zcss_init;
+ cctx->pledgedSrcSizePlusOne = 0;
+ }
+ if ( (reset == ZSTD_reset_parameters)
+ || (reset == ZSTD_reset_session_and_parameters) ) {
+ RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong,
+ "Can't reset parameters only when not in init stage.");
+ ZSTD_clearAllDicts(cctx);
+ return ZSTD_CCtxParams_reset(&cctx->requestedParams);
+ }
+ return 0;
+}
+
+
+/* ZSTD_checkCParams() :
+ control CParam values remain within authorized range.
+ @return : 0, or an error code if one value is beyond authorized range */
+size_t ZSTD_checkCParams(ZSTD_compressionParameters cParams)
+{
+ BOUNDCHECK(ZSTD_c_windowLog, (int)cParams.windowLog);
+ BOUNDCHECK(ZSTD_c_chainLog, (int)cParams.chainLog);
+ BOUNDCHECK(ZSTD_c_hashLog, (int)cParams.hashLog);
+ BOUNDCHECK(ZSTD_c_searchLog, (int)cParams.searchLog);
+ BOUNDCHECK(ZSTD_c_minMatch, (int)cParams.minMatch);
+ BOUNDCHECK(ZSTD_c_targetLength,(int)cParams.targetLength);
+ BOUNDCHECK(ZSTD_c_strategy, cParams.strategy);
+ return 0;
+}
+
+/* ZSTD_clampCParams() :
+ * make CParam values within valid range.
+ * @return : valid CParams */
+static ZSTD_compressionParameters
+ZSTD_clampCParams(ZSTD_compressionParameters cParams)
+{
+# define CLAMP_TYPE(cParam, val, type) { \
+ ZSTD_bounds const bounds = ZSTD_cParam_getBounds(cParam); \
+ if ((int)val<bounds.lowerBound) val=(type)bounds.lowerBound; \
+ else if ((int)val>bounds.upperBound) val=(type)bounds.upperBound; \
+ }
+# define CLAMP(cParam, val) CLAMP_TYPE(cParam, val, unsigned)
+ CLAMP(ZSTD_c_windowLog, cParams.windowLog);
+ CLAMP(ZSTD_c_chainLog, cParams.chainLog);
+ CLAMP(ZSTD_c_hashLog, cParams.hashLog);
+ CLAMP(ZSTD_c_searchLog, cParams.searchLog);
+ CLAMP(ZSTD_c_minMatch, cParams.minMatch);
+ CLAMP(ZSTD_c_targetLength,cParams.targetLength);
+ CLAMP_TYPE(ZSTD_c_strategy,cParams.strategy, ZSTD_strategy);
+ return cParams;
+}
+
+/* ZSTD_cycleLog() :
+ * condition for correct operation : hashLog > 1 */
+U32 ZSTD_cycleLog(U32 hashLog, ZSTD_strategy strat)
+{
+ U32 const btScale = ((U32)strat >= (U32)ZSTD_btlazy2);
+ return hashLog - btScale;
+}
+
+/* ZSTD_dictAndWindowLog() :
+ * Returns an adjusted window log that is large enough to fit the source and the dictionary.
+ * The zstd format says that the entire dictionary is valid if one byte of the dictionary
+ * is within the window. So the hashLog and chainLog should be large enough to reference both
+ * the dictionary and the window. So we must use this adjusted dictAndWindowLog when downsizing
+ * the hashLog and windowLog.
+ * NOTE: srcSize must not be ZSTD_CONTENTSIZE_UNKNOWN.
+ */
+static U32 ZSTD_dictAndWindowLog(U32 windowLog, U64 srcSize, U64 dictSize)
+{
+ const U64 maxWindowSize = 1ULL << ZSTD_WINDOWLOG_MAX;
+ /* No dictionary ==> No change */
+ if (dictSize == 0) {
+ return windowLog;
+ }
+ assert(windowLog <= ZSTD_WINDOWLOG_MAX);
+ assert(srcSize != ZSTD_CONTENTSIZE_UNKNOWN); /* Handled in ZSTD_adjustCParams_internal() */
+ {
+ U64 const windowSize = 1ULL << windowLog;
+ U64 const dictAndWindowSize = dictSize + windowSize;
+ /* If the window size is already large enough to fit both the source and the dictionary
+ * then just use the window size. Otherwise adjust so that it fits the dictionary and
+ * the window.
+ */
+ if (windowSize >= dictSize + srcSize) {
+ return windowLog; /* Window size large enough already */
+ } else if (dictAndWindowSize >= maxWindowSize) {
+ return ZSTD_WINDOWLOG_MAX; /* Larger than max window log */
+ } else {
+ return ZSTD_highbit32((U32)dictAndWindowSize - 1) + 1;
+ }
+ }
+}
+
+/* ZSTD_adjustCParams_internal() :
+ * optimize `cPar` for a specified input (`srcSize` and `dictSize`).
+ * mostly downsize to reduce memory consumption and initialization latency.
+ * `srcSize` can be ZSTD_CONTENTSIZE_UNKNOWN when not known.
+ * `mode` is the mode for parameter adjustment. See docs for `ZSTD_cParamMode_e`.
+ * note : `srcSize==0` means 0!
+ * condition : cPar is presumed validated (can be checked using ZSTD_checkCParams()). */
+static ZSTD_compressionParameters
+ZSTD_adjustCParams_internal(ZSTD_compressionParameters cPar,
+ unsigned long long srcSize,
+ size_t dictSize,
+ ZSTD_cParamMode_e mode)
+{
+ const U64 minSrcSize = 513; /* (1<<9) + 1 */
+ const U64 maxWindowResize = 1ULL << (ZSTD_WINDOWLOG_MAX-1);
+ assert(ZSTD_checkCParams(cPar)==0);
+
+ switch (mode) {
+ case ZSTD_cpm_unknown:
+ case ZSTD_cpm_noAttachDict:
+ /* If we don't know the source size, don't make any
+ * assumptions about it. We will already have selected
+ * smaller parameters if a dictionary is in use.
+ */
+ break;
+ case ZSTD_cpm_createCDict:
+ /* Assume a small source size when creating a dictionary
+ * with an unknown source size.
+ */
+ if (dictSize && srcSize == ZSTD_CONTENTSIZE_UNKNOWN)
+ srcSize = minSrcSize;
+ break;
+ case ZSTD_cpm_attachDict:
+ /* Dictionary has its own dedicated parameters which have
+ * already been selected. We are selecting parameters
+ * for only the source.
+ */
+ dictSize = 0;
+ break;
+ default:
+ assert(0);
+ break;
+ }
+
+ /* resize windowLog if input is small enough, to use less memory */
+ if ( (srcSize < maxWindowResize)
+ && (dictSize < maxWindowResize) ) {
+ U32 const tSize = (U32)(srcSize + dictSize);
+ static U32 const hashSizeMin = 1 << ZSTD_HASHLOG_MIN;
+ U32 const srcLog = (tSize < hashSizeMin) ? ZSTD_HASHLOG_MIN :
+ ZSTD_highbit32(tSize-1) + 1;
+ if (cPar.windowLog > srcLog) cPar.windowLog = srcLog;
+ }
+ if (srcSize != ZSTD_CONTENTSIZE_UNKNOWN) {
+ U32 const dictAndWindowLog = ZSTD_dictAndWindowLog(cPar.windowLog, (U64)srcSize, (U64)dictSize);
+ U32 const cycleLog = ZSTD_cycleLog(cPar.chainLog, cPar.strategy);
+ if (cPar.hashLog > dictAndWindowLog+1) cPar.hashLog = dictAndWindowLog+1;
+ if (cycleLog > dictAndWindowLog)
+ cPar.chainLog -= (cycleLog - dictAndWindowLog);
+ }
+
+ if (cPar.windowLog < ZSTD_WINDOWLOG_ABSOLUTEMIN)
+ cPar.windowLog = ZSTD_WINDOWLOG_ABSOLUTEMIN; /* minimum wlog required for valid frame header */
+
+ return cPar;
+}
+
+ZSTD_compressionParameters
+ZSTD_adjustCParams(ZSTD_compressionParameters cPar,
+ unsigned long long srcSize,
+ size_t dictSize)
+{
+ cPar = ZSTD_clampCParams(cPar); /* resulting cPar is necessarily valid (all parameters within range) */
+ if (srcSize == 0) srcSize = ZSTD_CONTENTSIZE_UNKNOWN;
+ return ZSTD_adjustCParams_internal(cPar, srcSize, dictSize, ZSTD_cpm_unknown);
+}
+
+static ZSTD_compressionParameters ZSTD_getCParams_internal(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode);
+static ZSTD_parameters ZSTD_getParams_internal(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode);
+
+static void ZSTD_overrideCParams(
+ ZSTD_compressionParameters* cParams,
+ const ZSTD_compressionParameters* overrides)
+{
+ if (overrides->windowLog) cParams->windowLog = overrides->windowLog;
+ if (overrides->hashLog) cParams->hashLog = overrides->hashLog;
+ if (overrides->chainLog) cParams->chainLog = overrides->chainLog;
+ if (overrides->searchLog) cParams->searchLog = overrides->searchLog;
+ if (overrides->minMatch) cParams->minMatch = overrides->minMatch;
+ if (overrides->targetLength) cParams->targetLength = overrides->targetLength;
+ if (overrides->strategy) cParams->strategy = overrides->strategy;
+}
+
+ZSTD_compressionParameters ZSTD_getCParamsFromCCtxParams(
+ const ZSTD_CCtx_params* CCtxParams, U64 srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode)
+{
+ ZSTD_compressionParameters cParams;
+ if (srcSizeHint == ZSTD_CONTENTSIZE_UNKNOWN && CCtxParams->srcSizeHint > 0) {
+ srcSizeHint = CCtxParams->srcSizeHint;
+ }
+ cParams = ZSTD_getCParams_internal(CCtxParams->compressionLevel, srcSizeHint, dictSize, mode);
+ if (CCtxParams->ldmParams.enableLdm == ZSTD_ps_enable) cParams.windowLog = ZSTD_LDM_DEFAULT_WINDOW_LOG;
+ ZSTD_overrideCParams(&cParams, &CCtxParams->cParams);
+ assert(!ZSTD_checkCParams(cParams));
+ /* srcSizeHint == 0 means 0 */
+ return ZSTD_adjustCParams_internal(cParams, srcSizeHint, dictSize, mode);
+}
+
+static size_t
+ZSTD_sizeof_matchState(const ZSTD_compressionParameters* const cParams,
+ const ZSTD_paramSwitch_e useRowMatchFinder,
+ const U32 enableDedicatedDictSearch,
+ const U32 forCCtx)
+{
+ /* chain table size should be 0 for fast or row-hash strategies */
+ size_t const chainSize = ZSTD_allocateChainTable(cParams->strategy, useRowMatchFinder, enableDedicatedDictSearch && !forCCtx)
+ ? ((size_t)1 << cParams->chainLog)
+ : 0;
+ size_t const hSize = ((size_t)1) << cParams->hashLog;
+ U32 const hashLog3 = (forCCtx && cParams->minMatch==3) ? MIN(ZSTD_HASHLOG3_MAX, cParams->windowLog) : 0;
+ size_t const h3Size = hashLog3 ? ((size_t)1) << hashLog3 : 0;
+ /* We don't use ZSTD_cwksp_alloc_size() here because the tables aren't
+ * surrounded by redzones in ASAN. */
+ size_t const tableSpace = chainSize * sizeof(U32)
+ + hSize * sizeof(U32)
+ + h3Size * sizeof(U32);
+ size_t const optPotentialSpace =
+ ZSTD_cwksp_aligned_alloc_size((MaxML+1) * sizeof(U32))
+ + ZSTD_cwksp_aligned_alloc_size((MaxLL+1) * sizeof(U32))
+ + ZSTD_cwksp_aligned_alloc_size((MaxOff+1) * sizeof(U32))
+ + ZSTD_cwksp_aligned_alloc_size((1<<Litbits) * sizeof(U32))
+ + ZSTD_cwksp_aligned_alloc_size((ZSTD_OPT_NUM+1) * sizeof(ZSTD_match_t))
+ + ZSTD_cwksp_aligned_alloc_size((ZSTD_OPT_NUM+1) * sizeof(ZSTD_optimal_t));
+ size_t const lazyAdditionalSpace = ZSTD_rowMatchFinderUsed(cParams->strategy, useRowMatchFinder)
+ ? ZSTD_cwksp_aligned_alloc_size(hSize*sizeof(U16))
+ : 0;
+ size_t const optSpace = (forCCtx && (cParams->strategy >= ZSTD_btopt))
+ ? optPotentialSpace
+ : 0;
+ size_t const slackSpace = ZSTD_cwksp_slack_space_required();
+
+ /* tables are guaranteed to be sized in multiples of 64 bytes (or 16 uint32_t) */
+ ZSTD_STATIC_ASSERT(ZSTD_HASHLOG_MIN >= 4 && ZSTD_WINDOWLOG_MIN >= 4 && ZSTD_CHAINLOG_MIN >= 4);
+ assert(useRowMatchFinder != ZSTD_ps_auto);
+
+ DEBUGLOG(4, "chainSize: %u - hSize: %u - h3Size: %u",
+ (U32)chainSize, (U32)hSize, (U32)h3Size);
+ return tableSpace + optSpace + slackSpace + lazyAdditionalSpace;
+}
+
+static size_t ZSTD_estimateCCtxSize_usingCCtxParams_internal(
+ const ZSTD_compressionParameters* cParams,
+ const ldmParams_t* ldmParams,
+ const int isStatic,
+ const ZSTD_paramSwitch_e useRowMatchFinder,
+ const size_t buffInSize,
+ const size_t buffOutSize,
+ const U64 pledgedSrcSize)
+{
+ size_t const windowSize = (size_t) BOUNDED(1ULL, 1ULL << cParams->windowLog, pledgedSrcSize);
+ size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, windowSize);
+ U32 const divider = (cParams->minMatch==3) ? 3 : 4;
+ size_t const maxNbSeq = blockSize / divider;
+ size_t const tokenSpace = ZSTD_cwksp_alloc_size(WILDCOPY_OVERLENGTH + blockSize)
+ + ZSTD_cwksp_aligned_alloc_size(maxNbSeq * sizeof(seqDef))
+ + 3 * ZSTD_cwksp_alloc_size(maxNbSeq * sizeof(BYTE));
+ size_t const entropySpace = ZSTD_cwksp_alloc_size(ENTROPY_WORKSPACE_SIZE);
+ size_t const blockStateSpace = 2 * ZSTD_cwksp_alloc_size(sizeof(ZSTD_compressedBlockState_t));
+ size_t const matchStateSize = ZSTD_sizeof_matchState(cParams, useRowMatchFinder, /* enableDedicatedDictSearch */ 0, /* forCCtx */ 1);
+
+ size_t const ldmSpace = ZSTD_ldm_getTableSize(*ldmParams);
+ size_t const maxNbLdmSeq = ZSTD_ldm_getMaxNbSeq(*ldmParams, blockSize);
+ size_t const ldmSeqSpace = ldmParams->enableLdm == ZSTD_ps_enable ?
+ ZSTD_cwksp_aligned_alloc_size(maxNbLdmSeq * sizeof(rawSeq)) : 0;
+
+
+ size_t const bufferSpace = ZSTD_cwksp_alloc_size(buffInSize)
+ + ZSTD_cwksp_alloc_size(buffOutSize);
+
+ size_t const cctxSpace = isStatic ? ZSTD_cwksp_alloc_size(sizeof(ZSTD_CCtx)) : 0;
+
+ size_t const neededSpace =
+ cctxSpace +
+ entropySpace +
+ blockStateSpace +
+ ldmSpace +
+ ldmSeqSpace +
+ matchStateSize +
+ tokenSpace +
+ bufferSpace;
+
+ DEBUGLOG(5, "estimate workspace : %u", (U32)neededSpace);
+ return neededSpace;
+}
+
+size_t ZSTD_estimateCCtxSize_usingCCtxParams(const ZSTD_CCtx_params* params)
+{
+ ZSTD_compressionParameters const cParams =
+ ZSTD_getCParamsFromCCtxParams(params, ZSTD_CONTENTSIZE_UNKNOWN, 0, ZSTD_cpm_noAttachDict);
+ ZSTD_paramSwitch_e const useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(params->useRowMatchFinder,
+ &cParams);
+
+ RETURN_ERROR_IF(params->nbWorkers > 0, GENERIC, "Estimate CCtx size is supported for single-threaded compression only.");
+ /* estimateCCtxSize is for one-shot compression. So no buffers should
+ * be needed. However, we still allocate two 0-sized buffers, which can
+ * take space under ASAN. */
+ return ZSTD_estimateCCtxSize_usingCCtxParams_internal(
+ &cParams, &params->ldmParams, 1, useRowMatchFinder, 0, 0, ZSTD_CONTENTSIZE_UNKNOWN);
+}
+
+size_t ZSTD_estimateCCtxSize_usingCParams(ZSTD_compressionParameters cParams)
+{
+ ZSTD_CCtx_params initialParams = ZSTD_makeCCtxParamsFromCParams(cParams);
+ if (ZSTD_rowMatchFinderSupported(cParams.strategy)) {
+ /* Pick bigger of not using and using row-based matchfinder for greedy and lazy strategies */
+ size_t noRowCCtxSize;
+ size_t rowCCtxSize;
+ initialParams.useRowMatchFinder = ZSTD_ps_disable;
+ noRowCCtxSize = ZSTD_estimateCCtxSize_usingCCtxParams(&initialParams);
+ initialParams.useRowMatchFinder = ZSTD_ps_enable;
+ rowCCtxSize = ZSTD_estimateCCtxSize_usingCCtxParams(&initialParams);
+ return MAX(noRowCCtxSize, rowCCtxSize);
+ } else {
+ return ZSTD_estimateCCtxSize_usingCCtxParams(&initialParams);
+ }
+}
+
+static size_t ZSTD_estimateCCtxSize_internal(int compressionLevel)
+{
+ int tier = 0;
+ size_t largestSize = 0;
+ static const unsigned long long srcSizeTiers[4] = {16 KB, 128 KB, 256 KB, ZSTD_CONTENTSIZE_UNKNOWN};
+ for (; tier < 4; ++tier) {
+ /* Choose the set of cParams for a given level across all srcSizes that give the largest cctxSize */
+ ZSTD_compressionParameters const cParams = ZSTD_getCParams_internal(compressionLevel, srcSizeTiers[tier], 0, ZSTD_cpm_noAttachDict);
+ largestSize = MAX(ZSTD_estimateCCtxSize_usingCParams(cParams), largestSize);
+ }
+ return largestSize;
+}
+
+size_t ZSTD_estimateCCtxSize(int compressionLevel)
+{
+ int level;
+ size_t memBudget = 0;
+ for (level=MIN(compressionLevel, 1); level<=compressionLevel; level++) {
+ /* Ensure monotonically increasing memory usage as compression level increases */
+ size_t const newMB = ZSTD_estimateCCtxSize_internal(level);
+ if (newMB > memBudget) memBudget = newMB;
+ }
+ return memBudget;
+}
+
+size_t ZSTD_estimateCStreamSize_usingCCtxParams(const ZSTD_CCtx_params* params)
+{
+ RETURN_ERROR_IF(params->nbWorkers > 0, GENERIC, "Estimate CCtx size is supported for single-threaded compression only.");
+ { ZSTD_compressionParameters const cParams =
+ ZSTD_getCParamsFromCCtxParams(params, ZSTD_CONTENTSIZE_UNKNOWN, 0, ZSTD_cpm_noAttachDict);
+ size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, (size_t)1 << cParams.windowLog);
+ size_t const inBuffSize = (params->inBufferMode == ZSTD_bm_buffered)
+ ? ((size_t)1 << cParams.windowLog) + blockSize
+ : 0;
+ size_t const outBuffSize = (params->outBufferMode == ZSTD_bm_buffered)
+ ? ZSTD_compressBound(blockSize) + 1
+ : 0;
+ ZSTD_paramSwitch_e const useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(params->useRowMatchFinder, &params->cParams);
+
+ return ZSTD_estimateCCtxSize_usingCCtxParams_internal(
+ &cParams, &params->ldmParams, 1, useRowMatchFinder, inBuffSize, outBuffSize,
+ ZSTD_CONTENTSIZE_UNKNOWN);
+ }
+}
+
+size_t ZSTD_estimateCStreamSize_usingCParams(ZSTD_compressionParameters cParams)
+{
+ ZSTD_CCtx_params initialParams = ZSTD_makeCCtxParamsFromCParams(cParams);
+ if (ZSTD_rowMatchFinderSupported(cParams.strategy)) {
+ /* Pick bigger of not using and using row-based matchfinder for greedy and lazy strategies */
+ size_t noRowCCtxSize;
+ size_t rowCCtxSize;
+ initialParams.useRowMatchFinder = ZSTD_ps_disable;
+ noRowCCtxSize = ZSTD_estimateCStreamSize_usingCCtxParams(&initialParams);
+ initialParams.useRowMatchFinder = ZSTD_ps_enable;
+ rowCCtxSize = ZSTD_estimateCStreamSize_usingCCtxParams(&initialParams);
+ return MAX(noRowCCtxSize, rowCCtxSize);
+ } else {
+ return ZSTD_estimateCStreamSize_usingCCtxParams(&initialParams);
+ }
+}
+
+static size_t ZSTD_estimateCStreamSize_internal(int compressionLevel)
+{
+ ZSTD_compressionParameters const cParams = ZSTD_getCParams_internal(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, 0, ZSTD_cpm_noAttachDict);
+ return ZSTD_estimateCStreamSize_usingCParams(cParams);
+}
+
+size_t ZSTD_estimateCStreamSize(int compressionLevel)
+{
+ int level;
+ size_t memBudget = 0;
+ for (level=MIN(compressionLevel, 1); level<=compressionLevel; level++) {
+ size_t const newMB = ZSTD_estimateCStreamSize_internal(level);
+ if (newMB > memBudget) memBudget = newMB;
+ }
+ return memBudget;
+}
+
+/* ZSTD_getFrameProgression():
+ * tells how much data has been consumed (input) and produced (output) for current frame.
+ * able to count progression inside worker threads (non-blocking mode).
+ */
+ZSTD_frameProgression ZSTD_getFrameProgression(const ZSTD_CCtx* cctx)
+{
+ { ZSTD_frameProgression fp;
+ size_t const buffered = (cctx->inBuff == NULL) ? 0 :
+ cctx->inBuffPos - cctx->inToCompress;
+ if (buffered) assert(cctx->inBuffPos >= cctx->inToCompress);
+ assert(buffered <= ZSTD_BLOCKSIZE_MAX);
+ fp.ingested = cctx->consumedSrcSize + buffered;
+ fp.consumed = cctx->consumedSrcSize;
+ fp.produced = cctx->producedCSize;
+ fp.flushed = cctx->producedCSize; /* simplified; some data might still be left within streaming output buffer */
+ fp.currentJobID = 0;
+ fp.nbActiveWorkers = 0;
+ return fp;
+} }
+
+/*! ZSTD_toFlushNow()
+ * Only useful for multithreading scenarios currently (nbWorkers >= 1).
+ */
+size_t ZSTD_toFlushNow(ZSTD_CCtx* cctx)
+{
+ (void)cctx;
+ return 0; /* over-simplification; could also check if context is currently running in streaming mode, and in which case, report how many bytes are left to be flushed within output buffer */
+}
+
+static void ZSTD_assertEqualCParams(ZSTD_compressionParameters cParams1,
+ ZSTD_compressionParameters cParams2)
+{
+ (void)cParams1;
+ (void)cParams2;
+ assert(cParams1.windowLog == cParams2.windowLog);
+ assert(cParams1.chainLog == cParams2.chainLog);
+ assert(cParams1.hashLog == cParams2.hashLog);
+ assert(cParams1.searchLog == cParams2.searchLog);
+ assert(cParams1.minMatch == cParams2.minMatch);
+ assert(cParams1.targetLength == cParams2.targetLength);
+ assert(cParams1.strategy == cParams2.strategy);
+}
+
+void ZSTD_reset_compressedBlockState(ZSTD_compressedBlockState_t* bs)
+{
+ int i;
+ for (i = 0; i < ZSTD_REP_NUM; ++i)
+ bs->rep[i] = repStartValue[i];
+ bs->entropy.huf.repeatMode = HUF_repeat_none;
+ bs->entropy.fse.offcode_repeatMode = FSE_repeat_none;
+ bs->entropy.fse.matchlength_repeatMode = FSE_repeat_none;
+ bs->entropy.fse.litlength_repeatMode = FSE_repeat_none;
+}
+
+/*! ZSTD_invalidateMatchState()
+ * Invalidate all the matches in the match finder tables.
+ * Requires nextSrc and base to be set (can be NULL).
+ */
+static void ZSTD_invalidateMatchState(ZSTD_matchState_t* ms)
+{
+ ZSTD_window_clear(&ms->window);
+
+ ms->nextToUpdate = ms->window.dictLimit;
+ ms->loadedDictEnd = 0;
+ ms->opt.litLengthSum = 0; /* force reset of btopt stats */
+ ms->dictMatchState = NULL;
+}
+
+/*
+ * Controls, for this matchState reset, whether the tables need to be cleared /
+ * prepared for the coming compression (ZSTDcrp_makeClean), or whether the
+ * tables can be left unclean (ZSTDcrp_leaveDirty), because we know that a
+ * subsequent operation will overwrite the table space anyways (e.g., copying
+ * the matchState contents in from a CDict).
+ */
+typedef enum {
+ ZSTDcrp_makeClean,
+ ZSTDcrp_leaveDirty
+} ZSTD_compResetPolicy_e;
+
+/*
+ * Controls, for this matchState reset, whether indexing can continue where it
+ * left off (ZSTDirp_continue), or whether it needs to be restarted from zero
+ * (ZSTDirp_reset).
+ */
+typedef enum {
+ ZSTDirp_continue,
+ ZSTDirp_reset
+} ZSTD_indexResetPolicy_e;
+
+typedef enum {
+ ZSTD_resetTarget_CDict,
+ ZSTD_resetTarget_CCtx
+} ZSTD_resetTarget_e;
+
+
+static size_t
+ZSTD_reset_matchState(ZSTD_matchState_t* ms,
+ ZSTD_cwksp* ws,
+ const ZSTD_compressionParameters* cParams,
+ const ZSTD_paramSwitch_e useRowMatchFinder,
+ const ZSTD_compResetPolicy_e crp,
+ const ZSTD_indexResetPolicy_e forceResetIndex,
+ const ZSTD_resetTarget_e forWho)
+{
+ /* disable chain table allocation for fast or row-based strategies */
+ size_t const chainSize = ZSTD_allocateChainTable(cParams->strategy, useRowMatchFinder,
+ ms->dedicatedDictSearch && (forWho == ZSTD_resetTarget_CDict))
+ ? ((size_t)1 << cParams->chainLog)
+ : 0;
+ size_t const hSize = ((size_t)1) << cParams->hashLog;
+ U32 const hashLog3 = ((forWho == ZSTD_resetTarget_CCtx) && cParams->minMatch==3) ? MIN(ZSTD_HASHLOG3_MAX, cParams->windowLog) : 0;
+ size_t const h3Size = hashLog3 ? ((size_t)1) << hashLog3 : 0;
+
+ DEBUGLOG(4, "reset indices : %u", forceResetIndex == ZSTDirp_reset);
+ assert(useRowMatchFinder != ZSTD_ps_auto);
+ if (forceResetIndex == ZSTDirp_reset) {
+ ZSTD_window_init(&ms->window);
+ ZSTD_cwksp_mark_tables_dirty(ws);
+ }
+
+ ms->hashLog3 = hashLog3;
+
+ ZSTD_invalidateMatchState(ms);
+
+ assert(!ZSTD_cwksp_reserve_failed(ws)); /* check that allocation hasn't already failed */
+
+ ZSTD_cwksp_clear_tables(ws);
+
+ DEBUGLOG(5, "reserving table space");
+ /* table Space */
+ ms->hashTable = (U32*)ZSTD_cwksp_reserve_table(ws, hSize * sizeof(U32));
+ ms->chainTable = (U32*)ZSTD_cwksp_reserve_table(ws, chainSize * sizeof(U32));
+ ms->hashTable3 = (U32*)ZSTD_cwksp_reserve_table(ws, h3Size * sizeof(U32));
+ RETURN_ERROR_IF(ZSTD_cwksp_reserve_failed(ws), memory_allocation,
+ "failed a workspace allocation in ZSTD_reset_matchState");
+
+ DEBUGLOG(4, "reset table : %u", crp!=ZSTDcrp_leaveDirty);
+ if (crp!=ZSTDcrp_leaveDirty) {
+ /* reset tables only */
+ ZSTD_cwksp_clean_tables(ws);
+ }
+
+ /* opt parser space */
+ if ((forWho == ZSTD_resetTarget_CCtx) && (cParams->strategy >= ZSTD_btopt)) {
+ DEBUGLOG(4, "reserving optimal parser space");
+ ms->opt.litFreq = (unsigned*)ZSTD_cwksp_reserve_aligned(ws, (1<<Litbits) * sizeof(unsigned));
+ ms->opt.litLengthFreq = (unsigned*)ZSTD_cwksp_reserve_aligned(ws, (MaxLL+1) * sizeof(unsigned));
+ ms->opt.matchLengthFreq = (unsigned*)ZSTD_cwksp_reserve_aligned(ws, (MaxML+1) * sizeof(unsigned));
+ ms->opt.offCodeFreq = (unsigned*)ZSTD_cwksp_reserve_aligned(ws, (MaxOff+1) * sizeof(unsigned));
+ ms->opt.matchTable = (ZSTD_match_t*)ZSTD_cwksp_reserve_aligned(ws, (ZSTD_OPT_NUM+1) * sizeof(ZSTD_match_t));
+ ms->opt.priceTable = (ZSTD_optimal_t*)ZSTD_cwksp_reserve_aligned(ws, (ZSTD_OPT_NUM+1) * sizeof(ZSTD_optimal_t));
+ }
+
+ if (ZSTD_rowMatchFinderUsed(cParams->strategy, useRowMatchFinder)) {
+ { /* Row match finder needs an additional table of hashes ("tags") */
+ size_t const tagTableSize = hSize*sizeof(U16);
+ ms->tagTable = (U16*)ZSTD_cwksp_reserve_aligned(ws, tagTableSize);
+ if (ms->tagTable) ZSTD_memset(ms->tagTable, 0, tagTableSize);
+ }
+ { /* Switch to 32-entry rows if searchLog is 5 (or more) */
+ U32 const rowLog = BOUNDED(4, cParams->searchLog, 6);
+ assert(cParams->hashLog >= rowLog);
+ ms->rowHashLog = cParams->hashLog - rowLog;
+ }
+ }
+
+ ms->cParams = *cParams;
+
+ RETURN_ERROR_IF(ZSTD_cwksp_reserve_failed(ws), memory_allocation,
+ "failed a workspace allocation in ZSTD_reset_matchState");
+ return 0;
+}
+
+/* ZSTD_indexTooCloseToMax() :
+ * minor optimization : prefer memset() rather than reduceIndex()
+ * which is measurably slow in some circumstances (reported for Visual Studio).
+ * Works when re-using a context for a lot of smallish inputs :
+ * if all inputs are smaller than ZSTD_INDEXOVERFLOW_MARGIN,
+ * memset() will be triggered before reduceIndex().
+ */
+#define ZSTD_INDEXOVERFLOW_MARGIN (16 MB)
+static int ZSTD_indexTooCloseToMax(ZSTD_window_t w)
+{
+ return (size_t)(w.nextSrc - w.base) > (ZSTD_CURRENT_MAX - ZSTD_INDEXOVERFLOW_MARGIN);
+}
+
+/* ZSTD_dictTooBig():
+ * When dictionaries are larger than ZSTD_CHUNKSIZE_MAX they can't be loaded in
+ * one go generically. So we ensure that in that case we reset the tables to zero,
+ * so that we can load as much of the dictionary as possible.
+ */
+static int ZSTD_dictTooBig(size_t const loadedDictSize)
+{
+ return loadedDictSize > ZSTD_CHUNKSIZE_MAX;
+}
+
+/*! ZSTD_resetCCtx_internal() :
+ * @param loadedDictSize The size of the dictionary to be loaded
+ * into the context, if any. If no dictionary is used, or the
+ * dictionary is being attached / copied, then pass 0.
+ * note : `params` are assumed fully validated at this stage.
+ */
+static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc,
+ ZSTD_CCtx_params const* params,
+ U64 const pledgedSrcSize,
+ size_t const loadedDictSize,
+ ZSTD_compResetPolicy_e const crp,
+ ZSTD_buffered_policy_e const zbuff)
+{
+ ZSTD_cwksp* const ws = &zc->workspace;
+ DEBUGLOG(4, "ZSTD_resetCCtx_internal: pledgedSrcSize=%u, wlog=%u, useRowMatchFinder=%d useBlockSplitter=%d",
+ (U32)pledgedSrcSize, params->cParams.windowLog, (int)params->useRowMatchFinder, (int)params->useBlockSplitter);
+ assert(!ZSTD_isError(ZSTD_checkCParams(params->cParams)));
+
+ zc->isFirstBlock = 1;
+
+ /* Set applied params early so we can modify them for LDM,
+ * and point params at the applied params.
+ */
+ zc->appliedParams = *params;
+ params = &zc->appliedParams;
+
+ assert(params->useRowMatchFinder != ZSTD_ps_auto);
+ assert(params->useBlockSplitter != ZSTD_ps_auto);
+ assert(params->ldmParams.enableLdm != ZSTD_ps_auto);
+ if (params->ldmParams.enableLdm == ZSTD_ps_enable) {
+ /* Adjust long distance matching parameters */
+ ZSTD_ldm_adjustParameters(&zc->appliedParams.ldmParams, &params->cParams);
+ assert(params->ldmParams.hashLog >= params->ldmParams.bucketSizeLog);
+ assert(params->ldmParams.hashRateLog < 32);
+ }
+
+ { size_t const windowSize = MAX(1, (size_t)MIN(((U64)1 << params->cParams.windowLog), pledgedSrcSize));
+ size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, windowSize);
+ U32 const divider = (params->cParams.minMatch==3) ? 3 : 4;
+ size_t const maxNbSeq = blockSize / divider;
+ size_t const buffOutSize = (zbuff == ZSTDb_buffered && params->outBufferMode == ZSTD_bm_buffered)
+ ? ZSTD_compressBound(blockSize) + 1
+ : 0;
+ size_t const buffInSize = (zbuff == ZSTDb_buffered && params->inBufferMode == ZSTD_bm_buffered)
+ ? windowSize + blockSize
+ : 0;
+ size_t const maxNbLdmSeq = ZSTD_ldm_getMaxNbSeq(params->ldmParams, blockSize);
+
+ int const indexTooClose = ZSTD_indexTooCloseToMax(zc->blockState.matchState.window);
+ int const dictTooBig = ZSTD_dictTooBig(loadedDictSize);
+ ZSTD_indexResetPolicy_e needsIndexReset =
+ (indexTooClose || dictTooBig || !zc->initialized) ? ZSTDirp_reset : ZSTDirp_continue;
+
+ size_t const neededSpace =
+ ZSTD_estimateCCtxSize_usingCCtxParams_internal(
+ &params->cParams, &params->ldmParams, zc->staticSize != 0, params->useRowMatchFinder,
+ buffInSize, buffOutSize, pledgedSrcSize);
+ int resizeWorkspace;
+
+ FORWARD_IF_ERROR(neededSpace, "cctx size estimate failed!");
+
+ if (!zc->staticSize) ZSTD_cwksp_bump_oversized_duration(ws, 0);
+
+ { /* Check if workspace is large enough, alloc a new one if needed */
+ int const workspaceTooSmall = ZSTD_cwksp_sizeof(ws) < neededSpace;
+ int const workspaceWasteful = ZSTD_cwksp_check_wasteful(ws, neededSpace);
+ resizeWorkspace = workspaceTooSmall || workspaceWasteful;
+ DEBUGLOG(4, "Need %zu B workspace", neededSpace);
+ DEBUGLOG(4, "windowSize: %zu - blockSize: %zu", windowSize, blockSize);
+
+ if (resizeWorkspace) {
+ DEBUGLOG(4, "Resize workspaceSize from %zuKB to %zuKB",
+ ZSTD_cwksp_sizeof(ws) >> 10,
+ neededSpace >> 10);
+
+ RETURN_ERROR_IF(zc->staticSize, memory_allocation, "static cctx : no resize");
+
+ needsIndexReset = ZSTDirp_reset;
+
+ ZSTD_cwksp_free(ws, zc->customMem);
+ FORWARD_IF_ERROR(ZSTD_cwksp_create(ws, neededSpace, zc->customMem), "");
+
+ DEBUGLOG(5, "reserving object space");
+ /* Statically sized space.
+ * entropyWorkspace never moves,
+ * though prev/next block swap places */
+ assert(ZSTD_cwksp_check_available(ws, 2 * sizeof(ZSTD_compressedBlockState_t)));
+ zc->blockState.prevCBlock = (ZSTD_compressedBlockState_t*) ZSTD_cwksp_reserve_object(ws, sizeof(ZSTD_compressedBlockState_t));
+ RETURN_ERROR_IF(zc->blockState.prevCBlock == NULL, memory_allocation, "couldn't allocate prevCBlock");
+ zc->blockState.nextCBlock = (ZSTD_compressedBlockState_t*) ZSTD_cwksp_reserve_object(ws, sizeof(ZSTD_compressedBlockState_t));
+ RETURN_ERROR_IF(zc->blockState.nextCBlock == NULL, memory_allocation, "couldn't allocate nextCBlock");
+ zc->entropyWorkspace = (U32*) ZSTD_cwksp_reserve_object(ws, ENTROPY_WORKSPACE_SIZE);
+ RETURN_ERROR_IF(zc->entropyWorkspace == NULL, memory_allocation, "couldn't allocate entropyWorkspace");
+ } }
+
+ ZSTD_cwksp_clear(ws);
+
+ /* init params */
+ zc->blockState.matchState.cParams = params->cParams;
+ zc->pledgedSrcSizePlusOne = pledgedSrcSize+1;
+ zc->consumedSrcSize = 0;
+ zc->producedCSize = 0;
+ if (pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN)
+ zc->appliedParams.fParams.contentSizeFlag = 0;
+ DEBUGLOG(4, "pledged content size : %u ; flag : %u",
+ (unsigned)pledgedSrcSize, zc->appliedParams.fParams.contentSizeFlag);
+ zc->blockSize = blockSize;
+
+ xxh64_reset(&zc->xxhState, 0);
+ zc->stage = ZSTDcs_init;
+ zc->dictID = 0;
+ zc->dictContentSize = 0;
+
+ ZSTD_reset_compressedBlockState(zc->blockState.prevCBlock);
+
+ /* ZSTD_wildcopy() is used to copy into the literals buffer,
+ * so we have to oversize the buffer by WILDCOPY_OVERLENGTH bytes.
+ */
+ zc->seqStore.litStart = ZSTD_cwksp_reserve_buffer(ws, blockSize + WILDCOPY_OVERLENGTH);
+ zc->seqStore.maxNbLit = blockSize;
+
+ /* buffers */
+ zc->bufferedPolicy = zbuff;
+ zc->inBuffSize = buffInSize;
+ zc->inBuff = (char*)ZSTD_cwksp_reserve_buffer(ws, buffInSize);
+ zc->outBuffSize = buffOutSize;
+ zc->outBuff = (char*)ZSTD_cwksp_reserve_buffer(ws, buffOutSize);
+
+ /* ldm bucketOffsets table */
+ if (params->ldmParams.enableLdm == ZSTD_ps_enable) {
+ /* TODO: avoid memset? */
+ size_t const numBuckets =
+ ((size_t)1) << (params->ldmParams.hashLog -
+ params->ldmParams.bucketSizeLog);
+ zc->ldmState.bucketOffsets = ZSTD_cwksp_reserve_buffer(ws, numBuckets);
+ ZSTD_memset(zc->ldmState.bucketOffsets, 0, numBuckets);
+ }
+
+ /* sequences storage */
+ ZSTD_referenceExternalSequences(zc, NULL, 0);
+ zc->seqStore.maxNbSeq = maxNbSeq;
+ zc->seqStore.llCode = ZSTD_cwksp_reserve_buffer(ws, maxNbSeq * sizeof(BYTE));
+ zc->seqStore.mlCode = ZSTD_cwksp_reserve_buffer(ws, maxNbSeq * sizeof(BYTE));
+ zc->seqStore.ofCode = ZSTD_cwksp_reserve_buffer(ws, maxNbSeq * sizeof(BYTE));
+ zc->seqStore.sequencesStart = (seqDef*)ZSTD_cwksp_reserve_aligned(ws, maxNbSeq * sizeof(seqDef));
+
+ FORWARD_IF_ERROR(ZSTD_reset_matchState(
+ &zc->blockState.matchState,
+ ws,
+ &params->cParams,
+ params->useRowMatchFinder,
+ crp,
+ needsIndexReset,
+ ZSTD_resetTarget_CCtx), "");
+
+ /* ldm hash table */
+ if (params->ldmParams.enableLdm == ZSTD_ps_enable) {
+ /* TODO: avoid memset? */
+ size_t const ldmHSize = ((size_t)1) << params->ldmParams.hashLog;
+ zc->ldmState.hashTable = (ldmEntry_t*)ZSTD_cwksp_reserve_aligned(ws, ldmHSize * sizeof(ldmEntry_t));
+ ZSTD_memset(zc->ldmState.hashTable, 0, ldmHSize * sizeof(ldmEntry_t));
+ zc->ldmSequences = (rawSeq*)ZSTD_cwksp_reserve_aligned(ws, maxNbLdmSeq * sizeof(rawSeq));
+ zc->maxNbLdmSequences = maxNbLdmSeq;
+
+ ZSTD_window_init(&zc->ldmState.window);
+ zc->ldmState.loadedDictEnd = 0;
+ }
+
+ DEBUGLOG(3, "wksp: finished allocating, %zd bytes remain available", ZSTD_cwksp_available_space(ws));
+ assert(ZSTD_cwksp_estimated_space_within_bounds(ws, neededSpace, resizeWorkspace));
+
+ zc->initialized = 1;
+
+ return 0;
+ }
+}
+
+/* ZSTD_invalidateRepCodes() :
+ * ensures next compression will not use repcodes from previous block.
+ * Note : only works with regular variant;
+ * do not use with extDict variant ! */
+void ZSTD_invalidateRepCodes(ZSTD_CCtx* cctx) {
+ int i;
+ for (i=0; i<ZSTD_REP_NUM; i++) cctx->blockState.prevCBlock->rep[i] = 0;
+ assert(!ZSTD_window_hasExtDict(cctx->blockState.matchState.window));
+}
+
+/* These are the approximate sizes for each strategy past which copying the
+ * dictionary tables into the working context is faster than using them
+ * in-place.
+ */
+static const size_t attachDictSizeCutoffs[ZSTD_STRATEGY_MAX+1] = {
+ 8 KB, /* unused */
+ 8 KB, /* ZSTD_fast */
+ 16 KB, /* ZSTD_dfast */
+ 32 KB, /* ZSTD_greedy */
+ 32 KB, /* ZSTD_lazy */
+ 32 KB, /* ZSTD_lazy2 */
+ 32 KB, /* ZSTD_btlazy2 */
+ 32 KB, /* ZSTD_btopt */
+ 8 KB, /* ZSTD_btultra */
+ 8 KB /* ZSTD_btultra2 */
+};
+
+static int ZSTD_shouldAttachDict(const ZSTD_CDict* cdict,
+ const ZSTD_CCtx_params* params,
+ U64 pledgedSrcSize)
+{
+ size_t cutoff = attachDictSizeCutoffs[cdict->matchState.cParams.strategy];
+ int const dedicatedDictSearch = cdict->matchState.dedicatedDictSearch;
+ return dedicatedDictSearch
+ || ( ( pledgedSrcSize <= cutoff
+ || pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN
+ || params->attachDictPref == ZSTD_dictForceAttach )
+ && params->attachDictPref != ZSTD_dictForceCopy
+ && !params->forceWindow ); /* dictMatchState isn't correctly
+ * handled in _enforceMaxDist */
+}
+
+static size_t
+ZSTD_resetCCtx_byAttachingCDict(ZSTD_CCtx* cctx,
+ const ZSTD_CDict* cdict,
+ ZSTD_CCtx_params params,
+ U64 pledgedSrcSize,
+ ZSTD_buffered_policy_e zbuff)
+{
+ DEBUGLOG(4, "ZSTD_resetCCtx_byAttachingCDict() pledgedSrcSize=%llu",
+ (unsigned long long)pledgedSrcSize);
+ {
+ ZSTD_compressionParameters adjusted_cdict_cParams = cdict->matchState.cParams;
+ unsigned const windowLog = params.cParams.windowLog;
+ assert(windowLog != 0);
+ /* Resize working context table params for input only, since the dict
+ * has its own tables. */
+ /* pledgedSrcSize == 0 means 0! */
+
+ if (cdict->matchState.dedicatedDictSearch) {
+ ZSTD_dedicatedDictSearch_revertCParams(&adjusted_cdict_cParams);
+ }
+
+ params.cParams = ZSTD_adjustCParams_internal(adjusted_cdict_cParams, pledgedSrcSize,
+ cdict->dictContentSize, ZSTD_cpm_attachDict);
+ params.cParams.windowLog = windowLog;
+ params.useRowMatchFinder = cdict->useRowMatchFinder; /* cdict overrides */
+ FORWARD_IF_ERROR(ZSTD_resetCCtx_internal(cctx, &params, pledgedSrcSize,
+ /* loadedDictSize */ 0,
+ ZSTDcrp_makeClean, zbuff), "");
+ assert(cctx->appliedParams.cParams.strategy == adjusted_cdict_cParams.strategy);
+ }
+
+ { const U32 cdictEnd = (U32)( cdict->matchState.window.nextSrc
+ - cdict->matchState.window.base);
+ const U32 cdictLen = cdictEnd - cdict->matchState.window.dictLimit;
+ if (cdictLen == 0) {
+ /* don't even attach dictionaries with no contents */
+ DEBUGLOG(4, "skipping attaching empty dictionary");
+ } else {
+ DEBUGLOG(4, "attaching dictionary into context");
+ cctx->blockState.matchState.dictMatchState = &cdict->matchState;
+
+ /* prep working match state so dict matches never have negative indices
+ * when they are translated to the working context's index space. */
+ if (cctx->blockState.matchState.window.dictLimit < cdictEnd) {
+ cctx->blockState.matchState.window.nextSrc =
+ cctx->blockState.matchState.window.base + cdictEnd;
+ ZSTD_window_clear(&cctx->blockState.matchState.window);
+ }
+ /* loadedDictEnd is expressed within the referential of the active context */
+ cctx->blockState.matchState.loadedDictEnd = cctx->blockState.matchState.window.dictLimit;
+ } }
+
+ cctx->dictID = cdict->dictID;
+ cctx->dictContentSize = cdict->dictContentSize;
+
+ /* copy block state */
+ ZSTD_memcpy(cctx->blockState.prevCBlock, &cdict->cBlockState, sizeof(cdict->cBlockState));
+
+ return 0;
+}
+
+static size_t ZSTD_resetCCtx_byCopyingCDict(ZSTD_CCtx* cctx,
+ const ZSTD_CDict* cdict,
+ ZSTD_CCtx_params params,
+ U64 pledgedSrcSize,
+ ZSTD_buffered_policy_e zbuff)
+{
+ const ZSTD_compressionParameters *cdict_cParams = &cdict->matchState.cParams;
+
+ assert(!cdict->matchState.dedicatedDictSearch);
+ DEBUGLOG(4, "ZSTD_resetCCtx_byCopyingCDict() pledgedSrcSize=%llu",
+ (unsigned long long)pledgedSrcSize);
+
+ { unsigned const windowLog = params.cParams.windowLog;
+ assert(windowLog != 0);
+ /* Copy only compression parameters related to tables. */
+ params.cParams = *cdict_cParams;
+ params.cParams.windowLog = windowLog;
+ params.useRowMatchFinder = cdict->useRowMatchFinder;
+ FORWARD_IF_ERROR(ZSTD_resetCCtx_internal(cctx, &params, pledgedSrcSize,
+ /* loadedDictSize */ 0,
+ ZSTDcrp_leaveDirty, zbuff), "");
+ assert(cctx->appliedParams.cParams.strategy == cdict_cParams->strategy);
+ assert(cctx->appliedParams.cParams.hashLog == cdict_cParams->hashLog);
+ assert(cctx->appliedParams.cParams.chainLog == cdict_cParams->chainLog);
+ }
+
+ ZSTD_cwksp_mark_tables_dirty(&cctx->workspace);
+ assert(params.useRowMatchFinder != ZSTD_ps_auto);
+
+ /* copy tables */
+ { size_t const chainSize = ZSTD_allocateChainTable(cdict_cParams->strategy, cdict->useRowMatchFinder, 0 /* DDS guaranteed disabled */)
+ ? ((size_t)1 << cdict_cParams->chainLog)
+ : 0;
+ size_t const hSize = (size_t)1 << cdict_cParams->hashLog;
+
+ ZSTD_memcpy(cctx->blockState.matchState.hashTable,
+ cdict->matchState.hashTable,
+ hSize * sizeof(U32));
+ /* Do not copy cdict's chainTable if cctx has parameters such that it would not use chainTable */
+ if (ZSTD_allocateChainTable(cctx->appliedParams.cParams.strategy, cctx->appliedParams.useRowMatchFinder, 0 /* forDDSDict */)) {
+ ZSTD_memcpy(cctx->blockState.matchState.chainTable,
+ cdict->matchState.chainTable,
+ chainSize * sizeof(U32));
+ }
+ /* copy tag table */
+ if (ZSTD_rowMatchFinderUsed(cdict_cParams->strategy, cdict->useRowMatchFinder)) {
+ size_t const tagTableSize = hSize*sizeof(U16);
+ ZSTD_memcpy(cctx->blockState.matchState.tagTable,
+ cdict->matchState.tagTable,
+ tagTableSize);
+ }
+ }
+
+ /* Zero the hashTable3, since the cdict never fills it */
+ { int const h3log = cctx->blockState.matchState.hashLog3;
+ size_t const h3Size = h3log ? ((size_t)1 << h3log) : 0;
+ assert(cdict->matchState.hashLog3 == 0);
+ ZSTD_memset(cctx->blockState.matchState.hashTable3, 0, h3Size * sizeof(U32));
+ }
+
+ ZSTD_cwksp_mark_tables_clean(&cctx->workspace);
+
+ /* copy dictionary offsets */
+ { ZSTD_matchState_t const* srcMatchState = &cdict->matchState;
+ ZSTD_matchState_t* dstMatchState = &cctx->blockState.matchState;
+ dstMatchState->window = srcMatchState->window;
+ dstMatchState->nextToUpdate = srcMatchState->nextToUpdate;
+ dstMatchState->loadedDictEnd= srcMatchState->loadedDictEnd;
+ }
+
+ cctx->dictID = cdict->dictID;
+ cctx->dictContentSize = cdict->dictContentSize;
+
+ /* copy block state */
+ ZSTD_memcpy(cctx->blockState.prevCBlock, &cdict->cBlockState, sizeof(cdict->cBlockState));
+
+ return 0;
+}
+
+/* We have a choice between copying the dictionary context into the working
+ * context, or referencing the dictionary context from the working context
+ * in-place. We decide here which strategy to use. */
+static size_t ZSTD_resetCCtx_usingCDict(ZSTD_CCtx* cctx,
+ const ZSTD_CDict* cdict,
+ const ZSTD_CCtx_params* params,
+ U64 pledgedSrcSize,
+ ZSTD_buffered_policy_e zbuff)
+{
+
+ DEBUGLOG(4, "ZSTD_resetCCtx_usingCDict (pledgedSrcSize=%u)",
+ (unsigned)pledgedSrcSize);
+
+ if (ZSTD_shouldAttachDict(cdict, params, pledgedSrcSize)) {
+ return ZSTD_resetCCtx_byAttachingCDict(
+ cctx, cdict, *params, pledgedSrcSize, zbuff);
+ } else {
+ return ZSTD_resetCCtx_byCopyingCDict(
+ cctx, cdict, *params, pledgedSrcSize, zbuff);
+ }
+}
+
+/*! ZSTD_copyCCtx_internal() :
+ * Duplicate an existing context `srcCCtx` into another one `dstCCtx`.
+ * Only works during stage ZSTDcs_init (i.e. after creation, but before first call to ZSTD_compressContinue()).
+ * The "context", in this case, refers to the hash and chain tables,
+ * entropy tables, and dictionary references.
+ * `windowLog` value is enforced if != 0, otherwise value is copied from srcCCtx.
+ * @return : 0, or an error code */
+static size_t ZSTD_copyCCtx_internal(ZSTD_CCtx* dstCCtx,
+ const ZSTD_CCtx* srcCCtx,
+ ZSTD_frameParameters fParams,
+ U64 pledgedSrcSize,
+ ZSTD_buffered_policy_e zbuff)
+{
+ RETURN_ERROR_IF(srcCCtx->stage!=ZSTDcs_init, stage_wrong,
+ "Can't copy a ctx that's not in init stage.");
+ DEBUGLOG(5, "ZSTD_copyCCtx_internal");
+ ZSTD_memcpy(&dstCCtx->customMem, &srcCCtx->customMem, sizeof(ZSTD_customMem));
+ { ZSTD_CCtx_params params = dstCCtx->requestedParams;
+ /* Copy only compression parameters related to tables. */
+ params.cParams = srcCCtx->appliedParams.cParams;
+ assert(srcCCtx->appliedParams.useRowMatchFinder != ZSTD_ps_auto);
+ assert(srcCCtx->appliedParams.useBlockSplitter != ZSTD_ps_auto);
+ assert(srcCCtx->appliedParams.ldmParams.enableLdm != ZSTD_ps_auto);
+ params.useRowMatchFinder = srcCCtx->appliedParams.useRowMatchFinder;
+ params.useBlockSplitter = srcCCtx->appliedParams.useBlockSplitter;
+ params.ldmParams = srcCCtx->appliedParams.ldmParams;
+ params.fParams = fParams;
+ ZSTD_resetCCtx_internal(dstCCtx, &params, pledgedSrcSize,
+ /* loadedDictSize */ 0,
+ ZSTDcrp_leaveDirty, zbuff);
+ assert(dstCCtx->appliedParams.cParams.windowLog == srcCCtx->appliedParams.cParams.windowLog);
+ assert(dstCCtx->appliedParams.cParams.strategy == srcCCtx->appliedParams.cParams.strategy);
+ assert(dstCCtx->appliedParams.cParams.hashLog == srcCCtx->appliedParams.cParams.hashLog);
+ assert(dstCCtx->appliedParams.cParams.chainLog == srcCCtx->appliedParams.cParams.chainLog);
+ assert(dstCCtx->blockState.matchState.hashLog3 == srcCCtx->blockState.matchState.hashLog3);
+ }
+
+ ZSTD_cwksp_mark_tables_dirty(&dstCCtx->workspace);
+
+ /* copy tables */
+ { size_t const chainSize = ZSTD_allocateChainTable(srcCCtx->appliedParams.cParams.strategy,
+ srcCCtx->appliedParams.useRowMatchFinder,
+ 0 /* forDDSDict */)
+ ? ((size_t)1 << srcCCtx->appliedParams.cParams.chainLog)
+ : 0;
+ size_t const hSize = (size_t)1 << srcCCtx->appliedParams.cParams.hashLog;
+ int const h3log = srcCCtx->blockState.matchState.hashLog3;
+ size_t const h3Size = h3log ? ((size_t)1 << h3log) : 0;
+
+ ZSTD_memcpy(dstCCtx->blockState.matchState.hashTable,
+ srcCCtx->blockState.matchState.hashTable,
+ hSize * sizeof(U32));
+ ZSTD_memcpy(dstCCtx->blockState.matchState.chainTable,
+ srcCCtx->blockState.matchState.chainTable,
+ chainSize * sizeof(U32));
+ ZSTD_memcpy(dstCCtx->blockState.matchState.hashTable3,
+ srcCCtx->blockState.matchState.hashTable3,
+ h3Size * sizeof(U32));
+ }
+
+ ZSTD_cwksp_mark_tables_clean(&dstCCtx->workspace);
+
+ /* copy dictionary offsets */
+ {
+ const ZSTD_matchState_t* srcMatchState = &srcCCtx->blockState.matchState;
+ ZSTD_matchState_t* dstMatchState = &dstCCtx->blockState.matchState;
+ dstMatchState->window = srcMatchState->window;
+ dstMatchState->nextToUpdate = srcMatchState->nextToUpdate;
+ dstMatchState->loadedDictEnd= srcMatchState->loadedDictEnd;
+ }
+ dstCCtx->dictID = srcCCtx->dictID;
+ dstCCtx->dictContentSize = srcCCtx->dictContentSize;
+
+ /* copy block state */
+ ZSTD_memcpy(dstCCtx->blockState.prevCBlock, srcCCtx->blockState.prevCBlock, sizeof(*srcCCtx->blockState.prevCBlock));
+
+ return 0;
+}
+
+/*! ZSTD_copyCCtx() :
+ * Duplicate an existing context `srcCCtx` into another one `dstCCtx`.
+ * Only works during stage ZSTDcs_init (i.e. after creation, but before first call to ZSTD_compressContinue()).
+ * pledgedSrcSize==0 means "unknown".
+* @return : 0, or an error code */
+size_t ZSTD_copyCCtx(ZSTD_CCtx* dstCCtx, const ZSTD_CCtx* srcCCtx, unsigned long long pledgedSrcSize)
+{
+ ZSTD_frameParameters fParams = { 1 /*content*/, 0 /*checksum*/, 0 /*noDictID*/ };
+ ZSTD_buffered_policy_e const zbuff = srcCCtx->bufferedPolicy;
+ ZSTD_STATIC_ASSERT((U32)ZSTDb_buffered==1);
+ if (pledgedSrcSize==0) pledgedSrcSize = ZSTD_CONTENTSIZE_UNKNOWN;
+ fParams.contentSizeFlag = (pledgedSrcSize != ZSTD_CONTENTSIZE_UNKNOWN);
+
+ return ZSTD_copyCCtx_internal(dstCCtx, srcCCtx,
+ fParams, pledgedSrcSize,
+ zbuff);
+}
+
+
+#define ZSTD_ROWSIZE 16
+/*! ZSTD_reduceTable() :
+ * reduce table indexes by `reducerValue`, or squash to zero.
+ * PreserveMark preserves "unsorted mark" for btlazy2 strategy.
+ * It must be set to a clear 0/1 value, to remove branch during inlining.
+ * Presume table size is a multiple of ZSTD_ROWSIZE
+ * to help auto-vectorization */
+FORCE_INLINE_TEMPLATE void
+ZSTD_reduceTable_internal (U32* const table, U32 const size, U32 const reducerValue, int const preserveMark)
+{
+ int const nbRows = (int)size / ZSTD_ROWSIZE;
+ int cellNb = 0;
+ int rowNb;
+ /* Protect special index values < ZSTD_WINDOW_START_INDEX. */
+ U32 const reducerThreshold = reducerValue + ZSTD_WINDOW_START_INDEX;
+ assert((size & (ZSTD_ROWSIZE-1)) == 0); /* multiple of ZSTD_ROWSIZE */
+ assert(size < (1U<<31)); /* can be casted to int */
+
+
+ for (rowNb=0 ; rowNb < nbRows ; rowNb++) {
+ int column;
+ for (column=0; column<ZSTD_ROWSIZE; column++) {
+ U32 newVal;
+ if (preserveMark && table[cellNb] == ZSTD_DUBT_UNSORTED_MARK) {
+ /* This write is pointless, but is required(?) for the compiler
+ * to auto-vectorize the loop. */
+ newVal = ZSTD_DUBT_UNSORTED_MARK;
+ } else if (table[cellNb] < reducerThreshold) {
+ newVal = 0;
+ } else {
+ newVal = table[cellNb] - reducerValue;
+ }
+ table[cellNb] = newVal;
+ cellNb++;
+ } }
+}
+
+static void ZSTD_reduceTable(U32* const table, U32 const size, U32 const reducerValue)
+{
+ ZSTD_reduceTable_internal(table, size, reducerValue, 0);
+}
+
+static void ZSTD_reduceTable_btlazy2(U32* const table, U32 const size, U32 const reducerValue)
+{
+ ZSTD_reduceTable_internal(table, size, reducerValue, 1);
+}
+
+/*! ZSTD_reduceIndex() :
+* rescale all indexes to avoid future overflow (indexes are U32) */
+static void ZSTD_reduceIndex (ZSTD_matchState_t* ms, ZSTD_CCtx_params const* params, const U32 reducerValue)
+{
+ { U32 const hSize = (U32)1 << params->cParams.hashLog;
+ ZSTD_reduceTable(ms->hashTable, hSize, reducerValue);
+ }
+
+ if (ZSTD_allocateChainTable(params->cParams.strategy, params->useRowMatchFinder, (U32)ms->dedicatedDictSearch)) {
+ U32 const chainSize = (U32)1 << params->cParams.chainLog;
+ if (params->cParams.strategy == ZSTD_btlazy2)
+ ZSTD_reduceTable_btlazy2(ms->chainTable, chainSize, reducerValue);
+ else
+ ZSTD_reduceTable(ms->chainTable, chainSize, reducerValue);
+ }
+
+ if (ms->hashLog3) {
+ U32 const h3Size = (U32)1 << ms->hashLog3;
+ ZSTD_reduceTable(ms->hashTable3, h3Size, reducerValue);
+ }
+}
+
+
+/*-*******************************************************
+* Block entropic compression
+*********************************************************/
+
+/* See doc/zstd_compression_format.md for detailed format description */
+
+void ZSTD_seqToCodes(const seqStore_t* seqStorePtr)
+{
+ const seqDef* const sequences = seqStorePtr->sequencesStart;
+ BYTE* const llCodeTable = seqStorePtr->llCode;
+ BYTE* const ofCodeTable = seqStorePtr->ofCode;
+ BYTE* const mlCodeTable = seqStorePtr->mlCode;
+ U32 const nbSeq = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
+ U32 u;
+ assert(nbSeq <= seqStorePtr->maxNbSeq);
+ for (u=0; u<nbSeq; u++) {
+ U32 const llv = sequences[u].litLength;
+ U32 const mlv = sequences[u].mlBase;
+ llCodeTable[u] = (BYTE)ZSTD_LLcode(llv);
+ ofCodeTable[u] = (BYTE)ZSTD_highbit32(sequences[u].offBase);
+ mlCodeTable[u] = (BYTE)ZSTD_MLcode(mlv);
+ }
+ if (seqStorePtr->longLengthType==ZSTD_llt_literalLength)
+ llCodeTable[seqStorePtr->longLengthPos] = MaxLL;
+ if (seqStorePtr->longLengthType==ZSTD_llt_matchLength)
+ mlCodeTable[seqStorePtr->longLengthPos] = MaxML;
+}
+
+/* ZSTD_useTargetCBlockSize():
+ * Returns if target compressed block size param is being used.
+ * If used, compression will do best effort to make a compressed block size to be around targetCBlockSize.
+ * Returns 1 if true, 0 otherwise. */
+static int ZSTD_useTargetCBlockSize(const ZSTD_CCtx_params* cctxParams)
+{
+ DEBUGLOG(5, "ZSTD_useTargetCBlockSize (targetCBlockSize=%zu)", cctxParams->targetCBlockSize);
+ return (cctxParams->targetCBlockSize != 0);
+}
+
+/* ZSTD_blockSplitterEnabled():
+ * Returns if block splitting param is being used
+ * If used, compression will do best effort to split a block in order to improve compression ratio.
+ * At the time this function is called, the parameter must be finalized.
+ * Returns 1 if true, 0 otherwise. */
+static int ZSTD_blockSplitterEnabled(ZSTD_CCtx_params* cctxParams)
+{
+ DEBUGLOG(5, "ZSTD_blockSplitterEnabled (useBlockSplitter=%d)", cctxParams->useBlockSplitter);
+ assert(cctxParams->useBlockSplitter != ZSTD_ps_auto);
+ return (cctxParams->useBlockSplitter == ZSTD_ps_enable);
+}
+
+/* Type returned by ZSTD_buildSequencesStatistics containing finalized symbol encoding types
+ * and size of the sequences statistics
+ */
+typedef struct {
+ U32 LLtype;
+ U32 Offtype;
+ U32 MLtype;
+ size_t size;
+ size_t lastCountSize; /* Accounts for bug in 1.3.4. More detail in ZSTD_entropyCompressSeqStore_internal() */
+} ZSTD_symbolEncodingTypeStats_t;
+
+/* ZSTD_buildSequencesStatistics():
+ * Returns a ZSTD_symbolEncodingTypeStats_t, or a zstd error code in the `size` field.
+ * Modifies `nextEntropy` to have the appropriate values as a side effect.
+ * nbSeq must be greater than 0.
+ *
+ * entropyWkspSize must be of size at least ENTROPY_WORKSPACE_SIZE - (MaxSeq + 1)*sizeof(U32)
+ */
+static ZSTD_symbolEncodingTypeStats_t
+ZSTD_buildSequencesStatistics(seqStore_t* seqStorePtr, size_t nbSeq,
+ const ZSTD_fseCTables_t* prevEntropy, ZSTD_fseCTables_t* nextEntropy,
+ BYTE* dst, const BYTE* const dstEnd,
+ ZSTD_strategy strategy, unsigned* countWorkspace,
+ void* entropyWorkspace, size_t entropyWkspSize) {
+ BYTE* const ostart = dst;
+ const BYTE* const oend = dstEnd;
+ BYTE* op = ostart;
+ FSE_CTable* CTable_LitLength = nextEntropy->litlengthCTable;
+ FSE_CTable* CTable_OffsetBits = nextEntropy->offcodeCTable;
+ FSE_CTable* CTable_MatchLength = nextEntropy->matchlengthCTable;
+ const BYTE* const ofCodeTable = seqStorePtr->ofCode;
+ const BYTE* const llCodeTable = seqStorePtr->llCode;
+ const BYTE* const mlCodeTable = seqStorePtr->mlCode;
+ ZSTD_symbolEncodingTypeStats_t stats;
+
+ stats.lastCountSize = 0;
+ /* convert length/distances into codes */
+ ZSTD_seqToCodes(seqStorePtr);
+ assert(op <= oend);
+ assert(nbSeq != 0); /* ZSTD_selectEncodingType() divides by nbSeq */
+ /* build CTable for Literal Lengths */
+ { unsigned max = MaxLL;
+ size_t const mostFrequent = HIST_countFast_wksp(countWorkspace, &max, llCodeTable, nbSeq, entropyWorkspace, entropyWkspSize); /* can't fail */
+ DEBUGLOG(5, "Building LL table");
+ nextEntropy->litlength_repeatMode = prevEntropy->litlength_repeatMode;
+ stats.LLtype = ZSTD_selectEncodingType(&nextEntropy->litlength_repeatMode,
+ countWorkspace, max, mostFrequent, nbSeq,
+ LLFSELog, prevEntropy->litlengthCTable,
+ LL_defaultNorm, LL_defaultNormLog,
+ ZSTD_defaultAllowed, strategy);
+ assert(set_basic < set_compressed && set_rle < set_compressed);
+ assert(!(stats.LLtype < set_compressed && nextEntropy->litlength_repeatMode != FSE_repeat_none)); /* We don't copy tables */
+ { size_t const countSize = ZSTD_buildCTable(
+ op, (size_t)(oend - op),
+ CTable_LitLength, LLFSELog, (symbolEncodingType_e)stats.LLtype,
+ countWorkspace, max, llCodeTable, nbSeq,
+ LL_defaultNorm, LL_defaultNormLog, MaxLL,
+ prevEntropy->litlengthCTable,
+ sizeof(prevEntropy->litlengthCTable),
+ entropyWorkspace, entropyWkspSize);
+ if (ZSTD_isError(countSize)) {
+ DEBUGLOG(3, "ZSTD_buildCTable for LitLens failed");
+ stats.size = countSize;
+ return stats;
+ }
+ if (stats.LLtype == set_compressed)
+ stats.lastCountSize = countSize;
+ op += countSize;
+ assert(op <= oend);
+ } }
+ /* build CTable for Offsets */
+ { unsigned max = MaxOff;
+ size_t const mostFrequent = HIST_countFast_wksp(
+ countWorkspace, &max, ofCodeTable, nbSeq, entropyWorkspace, entropyWkspSize); /* can't fail */
+ /* We can only use the basic table if max <= DefaultMaxOff, otherwise the offsets are too large */
+ ZSTD_defaultPolicy_e const defaultPolicy = (max <= DefaultMaxOff) ? ZSTD_defaultAllowed : ZSTD_defaultDisallowed;
+ DEBUGLOG(5, "Building OF table");
+ nextEntropy->offcode_repeatMode = prevEntropy->offcode_repeatMode;
+ stats.Offtype = ZSTD_selectEncodingType(&nextEntropy->offcode_repeatMode,
+ countWorkspace, max, mostFrequent, nbSeq,
+ OffFSELog, prevEntropy->offcodeCTable,
+ OF_defaultNorm, OF_defaultNormLog,
+ defaultPolicy, strategy);
+ assert(!(stats.Offtype < set_compressed && nextEntropy->offcode_repeatMode != FSE_repeat_none)); /* We don't copy tables */
+ { size_t const countSize = ZSTD_buildCTable(
+ op, (size_t)(oend - op),
+ CTable_OffsetBits, OffFSELog, (symbolEncodingType_e)stats.Offtype,
+ countWorkspace, max, ofCodeTable, nbSeq,
+ OF_defaultNorm, OF_defaultNormLog, DefaultMaxOff,
+ prevEntropy->offcodeCTable,
+ sizeof(prevEntropy->offcodeCTable),
+ entropyWorkspace, entropyWkspSize);
+ if (ZSTD_isError(countSize)) {
+ DEBUGLOG(3, "ZSTD_buildCTable for Offsets failed");
+ stats.size = countSize;
+ return stats;
+ }
+ if (stats.Offtype == set_compressed)
+ stats.lastCountSize = countSize;
+ op += countSize;
+ assert(op <= oend);
+ } }
+ /* build CTable for MatchLengths */
+ { unsigned max = MaxML;
+ size_t const mostFrequent = HIST_countFast_wksp(
+ countWorkspace, &max, mlCodeTable, nbSeq, entropyWorkspace, entropyWkspSize); /* can't fail */
+ DEBUGLOG(5, "Building ML table (remaining space : %i)", (int)(oend-op));
+ nextEntropy->matchlength_repeatMode = prevEntropy->matchlength_repeatMode;
+ stats.MLtype = ZSTD_selectEncodingType(&nextEntropy->matchlength_repeatMode,
+ countWorkspace, max, mostFrequent, nbSeq,
+ MLFSELog, prevEntropy->matchlengthCTable,
+ ML_defaultNorm, ML_defaultNormLog,
+ ZSTD_defaultAllowed, strategy);
+ assert(!(stats.MLtype < set_compressed && nextEntropy->matchlength_repeatMode != FSE_repeat_none)); /* We don't copy tables */
+ { size_t const countSize = ZSTD_buildCTable(
+ op, (size_t)(oend - op),
+ CTable_MatchLength, MLFSELog, (symbolEncodingType_e)stats.MLtype,
+ countWorkspace, max, mlCodeTable, nbSeq,
+ ML_defaultNorm, ML_defaultNormLog, MaxML,
+ prevEntropy->matchlengthCTable,
+ sizeof(prevEntropy->matchlengthCTable),
+ entropyWorkspace, entropyWkspSize);
+ if (ZSTD_isError(countSize)) {
+ DEBUGLOG(3, "ZSTD_buildCTable for MatchLengths failed");
+ stats.size = countSize;
+ return stats;
+ }
+ if (stats.MLtype == set_compressed)
+ stats.lastCountSize = countSize;
+ op += countSize;
+ assert(op <= oend);
+ } }
+ stats.size = (size_t)(op-ostart);
+ return stats;
+}
+
+/* ZSTD_entropyCompressSeqStore_internal():
+ * compresses both literals and sequences
+ * Returns compressed size of block, or a zstd error.
+ */
+#define SUSPECT_UNCOMPRESSIBLE_LITERAL_RATIO 20
+MEM_STATIC size_t
+ZSTD_entropyCompressSeqStore_internal(seqStore_t* seqStorePtr,
+ const ZSTD_entropyCTables_t* prevEntropy,
+ ZSTD_entropyCTables_t* nextEntropy,
+ const ZSTD_CCtx_params* cctxParams,
+ void* dst, size_t dstCapacity,
+ void* entropyWorkspace, size_t entropyWkspSize,
+ const int bmi2)
+{
+ const int longOffsets = cctxParams->cParams.windowLog > STREAM_ACCUMULATOR_MIN;
+ ZSTD_strategy const strategy = cctxParams->cParams.strategy;
+ unsigned* count = (unsigned*)entropyWorkspace;
+ FSE_CTable* CTable_LitLength = nextEntropy->fse.litlengthCTable;
+ FSE_CTable* CTable_OffsetBits = nextEntropy->fse.offcodeCTable;
+ FSE_CTable* CTable_MatchLength = nextEntropy->fse.matchlengthCTable;
+ const seqDef* const sequences = seqStorePtr->sequencesStart;
+ const size_t nbSeq = seqStorePtr->sequences - seqStorePtr->sequencesStart;
+ const BYTE* const ofCodeTable = seqStorePtr->ofCode;
+ const BYTE* const llCodeTable = seqStorePtr->llCode;
+ const BYTE* const mlCodeTable = seqStorePtr->mlCode;
+ BYTE* const ostart = (BYTE*)dst;
+ BYTE* const oend = ostart + dstCapacity;
+ BYTE* op = ostart;
+ size_t lastCountSize;
+
+ entropyWorkspace = count + (MaxSeq + 1);
+ entropyWkspSize -= (MaxSeq + 1) * sizeof(*count);
+
+ DEBUGLOG(4, "ZSTD_entropyCompressSeqStore_internal (nbSeq=%zu)", nbSeq);
+ ZSTD_STATIC_ASSERT(HUF_WORKSPACE_SIZE >= (1<<MAX(MLFSELog,LLFSELog)));
+ assert(entropyWkspSize >= HUF_WORKSPACE_SIZE);
+
+ /* Compress literals */
+ { const BYTE* const literals = seqStorePtr->litStart;
+ size_t const numSequences = seqStorePtr->sequences - seqStorePtr->sequencesStart;
+ size_t const numLiterals = seqStorePtr->lit - seqStorePtr->litStart;
+ /* Base suspicion of uncompressibility on ratio of literals to sequences */
+ unsigned const suspectUncompressible = (numSequences == 0) || (numLiterals / numSequences >= SUSPECT_UNCOMPRESSIBLE_LITERAL_RATIO);
+ size_t const litSize = (size_t)(seqStorePtr->lit - literals);
+ size_t const cSize = ZSTD_compressLiterals(
+ &prevEntropy->huf, &nextEntropy->huf,
+ cctxParams->cParams.strategy,
+ ZSTD_literalsCompressionIsDisabled(cctxParams),
+ op, dstCapacity,
+ literals, litSize,
+ entropyWorkspace, entropyWkspSize,
+ bmi2, suspectUncompressible);
+ FORWARD_IF_ERROR(cSize, "ZSTD_compressLiterals failed");
+ assert(cSize <= dstCapacity);
+ op += cSize;
+ }
+
+ /* Sequences Header */
+ RETURN_ERROR_IF((oend-op) < 3 /*max nbSeq Size*/ + 1 /*seqHead*/,
+ dstSize_tooSmall, "Can't fit seq hdr in output buf!");
+ if (nbSeq < 128) {
+ *op++ = (BYTE)nbSeq;
+ } else if (nbSeq < LONGNBSEQ) {
+ op[0] = (BYTE)((nbSeq>>8) + 0x80);
+ op[1] = (BYTE)nbSeq;
+ op+=2;
+ } else {
+ op[0]=0xFF;
+ MEM_writeLE16(op+1, (U16)(nbSeq - LONGNBSEQ));
+ op+=3;
+ }
+ assert(op <= oend);
+ if (nbSeq==0) {
+ /* Copy the old tables over as if we repeated them */
+ ZSTD_memcpy(&nextEntropy->fse, &prevEntropy->fse, sizeof(prevEntropy->fse));
+ return (size_t)(op - ostart);
+ }
+ {
+ ZSTD_symbolEncodingTypeStats_t stats;
+ BYTE* seqHead = op++;
+ /* build stats for sequences */
+ stats = ZSTD_buildSequencesStatistics(seqStorePtr, nbSeq,
+ &prevEntropy->fse, &nextEntropy->fse,
+ op, oend,
+ strategy, count,
+ entropyWorkspace, entropyWkspSize);
+ FORWARD_IF_ERROR(stats.size, "ZSTD_buildSequencesStatistics failed!");
+ *seqHead = (BYTE)((stats.LLtype<<6) + (stats.Offtype<<4) + (stats.MLtype<<2));
+ lastCountSize = stats.lastCountSize;
+ op += stats.size;
+ }
+
+ { size_t const bitstreamSize = ZSTD_encodeSequences(
+ op, (size_t)(oend - op),
+ CTable_MatchLength, mlCodeTable,
+ CTable_OffsetBits, ofCodeTable,
+ CTable_LitLength, llCodeTable,
+ sequences, nbSeq,
+ longOffsets, bmi2);
+ FORWARD_IF_ERROR(bitstreamSize, "ZSTD_encodeSequences failed");
+ op += bitstreamSize;
+ assert(op <= oend);
+ /* zstd versions <= 1.3.4 mistakenly report corruption when
+ * FSE_readNCount() receives a buffer < 4 bytes.
+ * Fixed by https://github.com/facebook/zstd/pull/1146.
+ * This can happen when the last set_compressed table present is 2
+ * bytes and the bitstream is only one byte.
+ * In this exceedingly rare case, we will simply emit an uncompressed
+ * block, since it isn't worth optimizing.
+ */
+ if (lastCountSize && (lastCountSize + bitstreamSize) < 4) {
+ /* lastCountSize >= 2 && bitstreamSize > 0 ==> lastCountSize == 3 */
+ assert(lastCountSize + bitstreamSize == 3);
+ DEBUGLOG(5, "Avoiding bug in zstd decoder in versions <= 1.3.4 by "
+ "emitting an uncompressed block.");
+ return 0;
+ }
+ }
+
+ DEBUGLOG(5, "compressed block size : %u", (unsigned)(op - ostart));
+ return (size_t)(op - ostart);
+}
+
+MEM_STATIC size_t
+ZSTD_entropyCompressSeqStore(seqStore_t* seqStorePtr,
+ const ZSTD_entropyCTables_t* prevEntropy,
+ ZSTD_entropyCTables_t* nextEntropy,
+ const ZSTD_CCtx_params* cctxParams,
+ void* dst, size_t dstCapacity,
+ size_t srcSize,
+ void* entropyWorkspace, size_t entropyWkspSize,
+ int bmi2)
+{
+ size_t const cSize = ZSTD_entropyCompressSeqStore_internal(
+ seqStorePtr, prevEntropy, nextEntropy, cctxParams,
+ dst, dstCapacity,
+ entropyWorkspace, entropyWkspSize, bmi2);
+ if (cSize == 0) return 0;
+ /* When srcSize <= dstCapacity, there is enough space to write a raw uncompressed block.
+ * Since we ran out of space, block must be not compressible, so fall back to raw uncompressed block.
+ */
+ if ((cSize == ERROR(dstSize_tooSmall)) & (srcSize <= dstCapacity))
+ return 0; /* block not compressed */
+ FORWARD_IF_ERROR(cSize, "ZSTD_entropyCompressSeqStore_internal failed");
+
+ /* Check compressibility */
+ { size_t const maxCSize = srcSize - ZSTD_minGain(srcSize, cctxParams->cParams.strategy);
+ if (cSize >= maxCSize) return 0; /* block not compressed */
+ }
+ DEBUGLOG(4, "ZSTD_entropyCompressSeqStore() cSize: %zu", cSize);
+ return cSize;
+}
+
+/* ZSTD_selectBlockCompressor() :
+ * Not static, but internal use only (used by long distance matcher)
+ * assumption : strat is a valid strategy */
+ZSTD_blockCompressor ZSTD_selectBlockCompressor(ZSTD_strategy strat, ZSTD_paramSwitch_e useRowMatchFinder, ZSTD_dictMode_e dictMode)
+{
+ static const ZSTD_blockCompressor blockCompressor[4][ZSTD_STRATEGY_MAX+1] = {
+ { ZSTD_compressBlock_fast /* default for 0 */,
+ ZSTD_compressBlock_fast,
+ ZSTD_compressBlock_doubleFast,
+ ZSTD_compressBlock_greedy,
+ ZSTD_compressBlock_lazy,
+ ZSTD_compressBlock_lazy2,
+ ZSTD_compressBlock_btlazy2,
+ ZSTD_compressBlock_btopt,
+ ZSTD_compressBlock_btultra,
+ ZSTD_compressBlock_btultra2 },
+ { ZSTD_compressBlock_fast_extDict /* default for 0 */,
+ ZSTD_compressBlock_fast_extDict,
+ ZSTD_compressBlock_doubleFast_extDict,
+ ZSTD_compressBlock_greedy_extDict,
+ ZSTD_compressBlock_lazy_extDict,
+ ZSTD_compressBlock_lazy2_extDict,
+ ZSTD_compressBlock_btlazy2_extDict,
+ ZSTD_compressBlock_btopt_extDict,
+ ZSTD_compressBlock_btultra_extDict,
+ ZSTD_compressBlock_btultra_extDict },
+ { ZSTD_compressBlock_fast_dictMatchState /* default for 0 */,
+ ZSTD_compressBlock_fast_dictMatchState,
+ ZSTD_compressBlock_doubleFast_dictMatchState,
+ ZSTD_compressBlock_greedy_dictMatchState,
+ ZSTD_compressBlock_lazy_dictMatchState,
+ ZSTD_compressBlock_lazy2_dictMatchState,
+ ZSTD_compressBlock_btlazy2_dictMatchState,
+ ZSTD_compressBlock_btopt_dictMatchState,
+ ZSTD_compressBlock_btultra_dictMatchState,
+ ZSTD_compressBlock_btultra_dictMatchState },
+ { NULL /* default for 0 */,
+ NULL,
+ NULL,
+ ZSTD_compressBlock_greedy_dedicatedDictSearch,
+ ZSTD_compressBlock_lazy_dedicatedDictSearch,
+ ZSTD_compressBlock_lazy2_dedicatedDictSearch,
+ NULL,
+ NULL,
+ NULL,
+ NULL }
+ };
+ ZSTD_blockCompressor selectedCompressor;
+ ZSTD_STATIC_ASSERT((unsigned)ZSTD_fast == 1);
+
+ assert(ZSTD_cParam_withinBounds(ZSTD_c_strategy, strat));
+ DEBUGLOG(4, "Selected block compressor: dictMode=%d strat=%d rowMatchfinder=%d", (int)dictMode, (int)strat, (int)useRowMatchFinder);
+ if (ZSTD_rowMatchFinderUsed(strat, useRowMatchFinder)) {
+ static const ZSTD_blockCompressor rowBasedBlockCompressors[4][3] = {
+ { ZSTD_compressBlock_greedy_row,
+ ZSTD_compressBlock_lazy_row,
+ ZSTD_compressBlock_lazy2_row },
+ { ZSTD_compressBlock_greedy_extDict_row,
+ ZSTD_compressBlock_lazy_extDict_row,
+ ZSTD_compressBlock_lazy2_extDict_row },
+ { ZSTD_compressBlock_greedy_dictMatchState_row,
+ ZSTD_compressBlock_lazy_dictMatchState_row,
+ ZSTD_compressBlock_lazy2_dictMatchState_row },
+ { ZSTD_compressBlock_greedy_dedicatedDictSearch_row,
+ ZSTD_compressBlock_lazy_dedicatedDictSearch_row,
+ ZSTD_compressBlock_lazy2_dedicatedDictSearch_row }
+ };
+ DEBUGLOG(4, "Selecting a row-based matchfinder");
+ assert(useRowMatchFinder != ZSTD_ps_auto);
+ selectedCompressor = rowBasedBlockCompressors[(int)dictMode][(int)strat - (int)ZSTD_greedy];
+ } else {
+ selectedCompressor = blockCompressor[(int)dictMode][(int)strat];
+ }
+ assert(selectedCompressor != NULL);
+ return selectedCompressor;
+}
+
+static void ZSTD_storeLastLiterals(seqStore_t* seqStorePtr,
+ const BYTE* anchor, size_t lastLLSize)
+{
+ ZSTD_memcpy(seqStorePtr->lit, anchor, lastLLSize);
+ seqStorePtr->lit += lastLLSize;
+}
+
+void ZSTD_resetSeqStore(seqStore_t* ssPtr)
+{
+ ssPtr->lit = ssPtr->litStart;
+ ssPtr->sequences = ssPtr->sequencesStart;
+ ssPtr->longLengthType = ZSTD_llt_none;
+}
+
+typedef enum { ZSTDbss_compress, ZSTDbss_noCompress } ZSTD_buildSeqStore_e;
+
+static size_t ZSTD_buildSeqStore(ZSTD_CCtx* zc, const void* src, size_t srcSize)
+{
+ ZSTD_matchState_t* const ms = &zc->blockState.matchState;
+ DEBUGLOG(5, "ZSTD_buildSeqStore (srcSize=%zu)", srcSize);
+ assert(srcSize <= ZSTD_BLOCKSIZE_MAX);
+ /* Assert that we have correctly flushed the ctx params into the ms's copy */
+ ZSTD_assertEqualCParams(zc->appliedParams.cParams, ms->cParams);
+ if (srcSize < MIN_CBLOCK_SIZE+ZSTD_blockHeaderSize+1) {
+ if (zc->appliedParams.cParams.strategy >= ZSTD_btopt) {
+ ZSTD_ldm_skipRawSeqStoreBytes(&zc->externSeqStore, srcSize);
+ } else {
+ ZSTD_ldm_skipSequences(&zc->externSeqStore, srcSize, zc->appliedParams.cParams.minMatch);
+ }
+ return ZSTDbss_noCompress; /* don't even attempt compression below a certain srcSize */
+ }
+ ZSTD_resetSeqStore(&(zc->seqStore));
+ /* required for optimal parser to read stats from dictionary */
+ ms->opt.symbolCosts = &zc->blockState.prevCBlock->entropy;
+ /* tell the optimal parser how we expect to compress literals */
+ ms->opt.literalCompressionMode = zc->appliedParams.literalCompressionMode;
+ /* a gap between an attached dict and the current window is not safe,
+ * they must remain adjacent,
+ * and when that stops being the case, the dict must be unset */
+ assert(ms->dictMatchState == NULL || ms->loadedDictEnd == ms->window.dictLimit);
+
+ /* limited update after a very long match */
+ { const BYTE* const base = ms->window.base;
+ const BYTE* const istart = (const BYTE*)src;
+ const U32 curr = (U32)(istart-base);
+ if (sizeof(ptrdiff_t)==8) assert(istart - base < (ptrdiff_t)(U32)(-1)); /* ensure no overflow */
+ if (curr > ms->nextToUpdate + 384)
+ ms->nextToUpdate = curr - MIN(192, (U32)(curr - ms->nextToUpdate - 384));
+ }
+
+ /* select and store sequences */
+ { ZSTD_dictMode_e const dictMode = ZSTD_matchState_dictMode(ms);
+ size_t lastLLSize;
+ { int i;
+ for (i = 0; i < ZSTD_REP_NUM; ++i)
+ zc->blockState.nextCBlock->rep[i] = zc->blockState.prevCBlock->rep[i];
+ }
+ if (zc->externSeqStore.pos < zc->externSeqStore.size) {
+ assert(zc->appliedParams.ldmParams.enableLdm == ZSTD_ps_disable);
+ /* Updates ldmSeqStore.pos */
+ lastLLSize =
+ ZSTD_ldm_blockCompress(&zc->externSeqStore,
+ ms, &zc->seqStore,
+ zc->blockState.nextCBlock->rep,
+ zc->appliedParams.useRowMatchFinder,
+ src, srcSize);
+ assert(zc->externSeqStore.pos <= zc->externSeqStore.size);
+ } else if (zc->appliedParams.ldmParams.enableLdm == ZSTD_ps_enable) {
+ rawSeqStore_t ldmSeqStore = kNullRawSeqStore;
+
+ ldmSeqStore.seq = zc->ldmSequences;
+ ldmSeqStore.capacity = zc->maxNbLdmSequences;
+ /* Updates ldmSeqStore.size */
+ FORWARD_IF_ERROR(ZSTD_ldm_generateSequences(&zc->ldmState, &ldmSeqStore,
+ &zc->appliedParams.ldmParams,
+ src, srcSize), "");
+ /* Updates ldmSeqStore.pos */
+ lastLLSize =
+ ZSTD_ldm_blockCompress(&ldmSeqStore,
+ ms, &zc->seqStore,
+ zc->blockState.nextCBlock->rep,
+ zc->appliedParams.useRowMatchFinder,
+ src, srcSize);
+ assert(ldmSeqStore.pos == ldmSeqStore.size);
+ } else { /* not long range mode */
+ ZSTD_blockCompressor const blockCompressor = ZSTD_selectBlockCompressor(zc->appliedParams.cParams.strategy,
+ zc->appliedParams.useRowMatchFinder,
+ dictMode);
+ ms->ldmSeqStore = NULL;
+ lastLLSize = blockCompressor(ms, &zc->seqStore, zc->blockState.nextCBlock->rep, src, srcSize);
+ }
+ { const BYTE* const lastLiterals = (const BYTE*)src + srcSize - lastLLSize;
+ ZSTD_storeLastLiterals(&zc->seqStore, lastLiterals, lastLLSize);
+ } }
+ return ZSTDbss_compress;
+}
+
+static void ZSTD_copyBlockSequences(ZSTD_CCtx* zc)
+{
+ const seqStore_t* seqStore = ZSTD_getSeqStore(zc);
+ const seqDef* seqStoreSeqs = seqStore->sequencesStart;
+ size_t seqStoreSeqSize = seqStore->sequences - seqStoreSeqs;
+ size_t seqStoreLiteralsSize = (size_t)(seqStore->lit - seqStore->litStart);
+ size_t literalsRead = 0;
+ size_t lastLLSize;
+
+ ZSTD_Sequence* outSeqs = &zc->seqCollector.seqStart[zc->seqCollector.seqIndex];
+ size_t i;
+ repcodes_t updatedRepcodes;
+
+ assert(zc->seqCollector.seqIndex + 1 < zc->seqCollector.maxSequences);
+ /* Ensure we have enough space for last literals "sequence" */
+ assert(zc->seqCollector.maxSequences >= seqStoreSeqSize + 1);
+ ZSTD_memcpy(updatedRepcodes.rep, zc->blockState.prevCBlock->rep, sizeof(repcodes_t));
+ for (i = 0; i < seqStoreSeqSize; ++i) {
+ U32 rawOffset = seqStoreSeqs[i].offBase - ZSTD_REP_NUM;
+ outSeqs[i].litLength = seqStoreSeqs[i].litLength;
+ outSeqs[i].matchLength = seqStoreSeqs[i].mlBase + MINMATCH;
+ outSeqs[i].rep = 0;
+
+ if (i == seqStore->longLengthPos) {
+ if (seqStore->longLengthType == ZSTD_llt_literalLength) {
+ outSeqs[i].litLength += 0x10000;
+ } else if (seqStore->longLengthType == ZSTD_llt_matchLength) {
+ outSeqs[i].matchLength += 0x10000;
+ }
+ }
+
+ if (seqStoreSeqs[i].offBase <= ZSTD_REP_NUM) {
+ /* Derive the correct offset corresponding to a repcode */
+ outSeqs[i].rep = seqStoreSeqs[i].offBase;
+ if (outSeqs[i].litLength != 0) {
+ rawOffset = updatedRepcodes.rep[outSeqs[i].rep - 1];
+ } else {
+ if (outSeqs[i].rep == 3) {
+ rawOffset = updatedRepcodes.rep[0] - 1;
+ } else {
+ rawOffset = updatedRepcodes.rep[outSeqs[i].rep];
+ }
+ }
+ }
+ outSeqs[i].offset = rawOffset;
+ /* seqStoreSeqs[i].offset == offCode+1, and ZSTD_updateRep() expects offCode
+ so we provide seqStoreSeqs[i].offset - 1 */
+ ZSTD_updateRep(updatedRepcodes.rep,
+ seqStoreSeqs[i].offBase - 1,
+ seqStoreSeqs[i].litLength == 0);
+ literalsRead += outSeqs[i].litLength;
+ }
+ /* Insert last literals (if any exist) in the block as a sequence with ml == off == 0.
+ * If there are no last literals, then we'll emit (of: 0, ml: 0, ll: 0), which is a marker
+ * for the block boundary, according to the API.
+ */
+ assert(seqStoreLiteralsSize >= literalsRead);
+ lastLLSize = seqStoreLiteralsSize - literalsRead;
+ outSeqs[i].litLength = (U32)lastLLSize;
+ outSeqs[i].matchLength = outSeqs[i].offset = outSeqs[i].rep = 0;
+ seqStoreSeqSize++;
+ zc->seqCollector.seqIndex += seqStoreSeqSize;
+}
+
+size_t ZSTD_generateSequences(ZSTD_CCtx* zc, ZSTD_Sequence* outSeqs,
+ size_t outSeqsSize, const void* src, size_t srcSize)
+{
+ const size_t dstCapacity = ZSTD_compressBound(srcSize);
+ void* dst = ZSTD_customMalloc(dstCapacity, ZSTD_defaultCMem);
+ SeqCollector seqCollector;
+
+ RETURN_ERROR_IF(dst == NULL, memory_allocation, "NULL pointer!");
+
+ seqCollector.collectSequences = 1;
+ seqCollector.seqStart = outSeqs;
+ seqCollector.seqIndex = 0;
+ seqCollector.maxSequences = outSeqsSize;
+ zc->seqCollector = seqCollector;
+
+ ZSTD_compress2(zc, dst, dstCapacity, src, srcSize);
+ ZSTD_customFree(dst, ZSTD_defaultCMem);
+ return zc->seqCollector.seqIndex;
+}
+
+size_t ZSTD_mergeBlockDelimiters(ZSTD_Sequence* sequences, size_t seqsSize) {
+ size_t in = 0;
+ size_t out = 0;
+ for (; in < seqsSize; ++in) {
+ if (sequences[in].offset == 0 && sequences[in].matchLength == 0) {
+ if (in != seqsSize - 1) {
+ sequences[in+1].litLength += sequences[in].litLength;
+ }
+ } else {
+ sequences[out] = sequences[in];
+ ++out;
+ }
+ }
+ return out;
+}
+
+/* Unrolled loop to read four size_ts of input at a time. Returns 1 if is RLE, 0 if not. */
+static int ZSTD_isRLE(const BYTE* src, size_t length) {
+ const BYTE* ip = src;
+ const BYTE value = ip[0];
+ const size_t valueST = (size_t)((U64)value * 0x0101010101010101ULL);
+ const size_t unrollSize = sizeof(size_t) * 4;
+ const size_t unrollMask = unrollSize - 1;
+ const size_t prefixLength = length & unrollMask;
+ size_t i;
+ size_t u;
+ if (length == 1) return 1;
+ /* Check if prefix is RLE first before using unrolled loop */
+ if (prefixLength && ZSTD_count(ip+1, ip, ip+prefixLength) != prefixLength-1) {
+ return 0;
+ }
+ for (i = prefixLength; i != length; i += unrollSize) {
+ for (u = 0; u < unrollSize; u += sizeof(size_t)) {
+ if (MEM_readST(ip + i + u) != valueST) {
+ return 0;
+ }
+ }
+ }
+ return 1;
+}
+
+/* Returns true if the given block may be RLE.
+ * This is just a heuristic based on the compressibility.
+ * It may return both false positives and false negatives.
+ */
+static int ZSTD_maybeRLE(seqStore_t const* seqStore)
+{
+ size_t const nbSeqs = (size_t)(seqStore->sequences - seqStore->sequencesStart);
+ size_t const nbLits = (size_t)(seqStore->lit - seqStore->litStart);
+
+ return nbSeqs < 4 && nbLits < 10;
+}
+
+static void ZSTD_blockState_confirmRepcodesAndEntropyTables(ZSTD_blockState_t* const bs)
+{
+ ZSTD_compressedBlockState_t* const tmp = bs->prevCBlock;
+ bs->prevCBlock = bs->nextCBlock;
+ bs->nextCBlock = tmp;
+}
+
+/* Writes the block header */
+static void writeBlockHeader(void* op, size_t cSize, size_t blockSize, U32 lastBlock) {
+ U32 const cBlockHeader = cSize == 1 ?
+ lastBlock + (((U32)bt_rle)<<1) + (U32)(blockSize << 3) :
+ lastBlock + (((U32)bt_compressed)<<1) + (U32)(cSize << 3);
+ MEM_writeLE24(op, cBlockHeader);
+ DEBUGLOG(3, "writeBlockHeader: cSize: %zu blockSize: %zu lastBlock: %u", cSize, blockSize, lastBlock);
+}
+
+/* ZSTD_buildBlockEntropyStats_literals() :
+ * Builds entropy for the literals.
+ * Stores literals block type (raw, rle, compressed, repeat) and
+ * huffman description table to hufMetadata.
+ * Requires ENTROPY_WORKSPACE_SIZE workspace
+ * @return : size of huffman description table or error code */
+static size_t ZSTD_buildBlockEntropyStats_literals(void* const src, size_t srcSize,
+ const ZSTD_hufCTables_t* prevHuf,
+ ZSTD_hufCTables_t* nextHuf,
+ ZSTD_hufCTablesMetadata_t* hufMetadata,
+ const int literalsCompressionIsDisabled,
+ void* workspace, size_t wkspSize)
+{
+ BYTE* const wkspStart = (BYTE*)workspace;
+ BYTE* const wkspEnd = wkspStart + wkspSize;
+ BYTE* const countWkspStart = wkspStart;
+ unsigned* const countWksp = (unsigned*)workspace;
+ const size_t countWkspSize = (HUF_SYMBOLVALUE_MAX + 1) * sizeof(unsigned);
+ BYTE* const nodeWksp = countWkspStart + countWkspSize;
+ const size_t nodeWkspSize = wkspEnd-nodeWksp;
+ unsigned maxSymbolValue = HUF_SYMBOLVALUE_MAX;
+ unsigned huffLog = HUF_TABLELOG_DEFAULT;
+ HUF_repeat repeat = prevHuf->repeatMode;
+ DEBUGLOG(5, "ZSTD_buildBlockEntropyStats_literals (srcSize=%zu)", srcSize);
+
+ /* Prepare nextEntropy assuming reusing the existing table */
+ ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
+
+ if (literalsCompressionIsDisabled) {
+ DEBUGLOG(5, "set_basic - disabled");
+ hufMetadata->hType = set_basic;
+ return 0;
+ }
+
+ /* small ? don't even attempt compression (speed opt) */
+#ifndef COMPRESS_LITERALS_SIZE_MIN
+#define COMPRESS_LITERALS_SIZE_MIN 63
+#endif
+ { size_t const minLitSize = (prevHuf->repeatMode == HUF_repeat_valid) ? 6 : COMPRESS_LITERALS_SIZE_MIN;
+ if (srcSize <= minLitSize) {
+ DEBUGLOG(5, "set_basic - too small");
+ hufMetadata->hType = set_basic;
+ return 0;
+ }
+ }
+
+ /* Scan input and build symbol stats */
+ { size_t const largest = HIST_count_wksp (countWksp, &maxSymbolValue, (const BYTE*)src, srcSize, workspace, wkspSize);
+ FORWARD_IF_ERROR(largest, "HIST_count_wksp failed");
+ if (largest == srcSize) {
+ DEBUGLOG(5, "set_rle");
+ hufMetadata->hType = set_rle;
+ return 0;
+ }
+ if (largest <= (srcSize >> 7)+4) {
+ DEBUGLOG(5, "set_basic - no gain");
+ hufMetadata->hType = set_basic;
+ return 0;
+ }
+ }
+
+ /* Validate the previous Huffman table */
+ if (repeat == HUF_repeat_check && !HUF_validateCTable((HUF_CElt const*)prevHuf->CTable, countWksp, maxSymbolValue)) {
+ repeat = HUF_repeat_none;
+ }
+
+ /* Build Huffman Tree */
+ ZSTD_memset(nextHuf->CTable, 0, sizeof(nextHuf->CTable));
+ huffLog = HUF_optimalTableLog(huffLog, srcSize, maxSymbolValue);
+ { size_t const maxBits = HUF_buildCTable_wksp((HUF_CElt*)nextHuf->CTable, countWksp,
+ maxSymbolValue, huffLog,
+ nodeWksp, nodeWkspSize);
+ FORWARD_IF_ERROR(maxBits, "HUF_buildCTable_wksp");
+ huffLog = (U32)maxBits;
+ { /* Build and write the CTable */
+ size_t const newCSize = HUF_estimateCompressedSize(
+ (HUF_CElt*)nextHuf->CTable, countWksp, maxSymbolValue);
+ size_t const hSize = HUF_writeCTable_wksp(
+ hufMetadata->hufDesBuffer, sizeof(hufMetadata->hufDesBuffer),
+ (HUF_CElt*)nextHuf->CTable, maxSymbolValue, huffLog,
+ nodeWksp, nodeWkspSize);
+ /* Check against repeating the previous CTable */
+ if (repeat != HUF_repeat_none) {
+ size_t const oldCSize = HUF_estimateCompressedSize(
+ (HUF_CElt const*)prevHuf->CTable, countWksp, maxSymbolValue);
+ if (oldCSize < srcSize && (oldCSize <= hSize + newCSize || hSize + 12 >= srcSize)) {
+ DEBUGLOG(5, "set_repeat - smaller");
+ ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
+ hufMetadata->hType = set_repeat;
+ return 0;
+ }
+ }
+ if (newCSize + hSize >= srcSize) {
+ DEBUGLOG(5, "set_basic - no gains");
+ ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
+ hufMetadata->hType = set_basic;
+ return 0;
+ }
+ DEBUGLOG(5, "set_compressed (hSize=%u)", (U32)hSize);
+ hufMetadata->hType = set_compressed;
+ nextHuf->repeatMode = HUF_repeat_check;
+ return hSize;
+ }
+ }
+}
+
+
+/* ZSTD_buildDummySequencesStatistics():
+ * Returns a ZSTD_symbolEncodingTypeStats_t with all encoding types as set_basic,
+ * and updates nextEntropy to the appropriate repeatMode.
+ */
+static ZSTD_symbolEncodingTypeStats_t
+ZSTD_buildDummySequencesStatistics(ZSTD_fseCTables_t* nextEntropy) {
+ ZSTD_symbolEncodingTypeStats_t stats = {set_basic, set_basic, set_basic, 0, 0};
+ nextEntropy->litlength_repeatMode = FSE_repeat_none;
+ nextEntropy->offcode_repeatMode = FSE_repeat_none;
+ nextEntropy->matchlength_repeatMode = FSE_repeat_none;
+ return stats;
+}
+
+/* ZSTD_buildBlockEntropyStats_sequences() :
+ * Builds entropy for the sequences.
+ * Stores symbol compression modes and fse table to fseMetadata.
+ * Requires ENTROPY_WORKSPACE_SIZE wksp.
+ * @return : size of fse tables or error code */
+static size_t ZSTD_buildBlockEntropyStats_sequences(seqStore_t* seqStorePtr,
+ const ZSTD_fseCTables_t* prevEntropy,
+ ZSTD_fseCTables_t* nextEntropy,
+ const ZSTD_CCtx_params* cctxParams,
+ ZSTD_fseCTablesMetadata_t* fseMetadata,
+ void* workspace, size_t wkspSize)
+{
+ ZSTD_strategy const strategy = cctxParams->cParams.strategy;
+ size_t const nbSeq = seqStorePtr->sequences - seqStorePtr->sequencesStart;
+ BYTE* const ostart = fseMetadata->fseTablesBuffer;
+ BYTE* const oend = ostart + sizeof(fseMetadata->fseTablesBuffer);
+ BYTE* op = ostart;
+ unsigned* countWorkspace = (unsigned*)workspace;
+ unsigned* entropyWorkspace = countWorkspace + (MaxSeq + 1);
+ size_t entropyWorkspaceSize = wkspSize - (MaxSeq + 1) * sizeof(*countWorkspace);
+ ZSTD_symbolEncodingTypeStats_t stats;
+
+ DEBUGLOG(5, "ZSTD_buildBlockEntropyStats_sequences (nbSeq=%zu)", nbSeq);
+ stats = nbSeq != 0 ? ZSTD_buildSequencesStatistics(seqStorePtr, nbSeq,
+ prevEntropy, nextEntropy, op, oend,
+ strategy, countWorkspace,
+ entropyWorkspace, entropyWorkspaceSize)
+ : ZSTD_buildDummySequencesStatistics(nextEntropy);
+ FORWARD_IF_ERROR(stats.size, "ZSTD_buildSequencesStatistics failed!");
+ fseMetadata->llType = (symbolEncodingType_e) stats.LLtype;
+ fseMetadata->ofType = (symbolEncodingType_e) stats.Offtype;
+ fseMetadata->mlType = (symbolEncodingType_e) stats.MLtype;
+ fseMetadata->lastCountSize = stats.lastCountSize;
+ return stats.size;
+}
+
+
+/* ZSTD_buildBlockEntropyStats() :
+ * Builds entropy for the block.
+ * Requires workspace size ENTROPY_WORKSPACE_SIZE
+ *
+ * @return : 0 on success or error code
+ */
+size_t ZSTD_buildBlockEntropyStats(seqStore_t* seqStorePtr,
+ const ZSTD_entropyCTables_t* prevEntropy,
+ ZSTD_entropyCTables_t* nextEntropy,
+ const ZSTD_CCtx_params* cctxParams,
+ ZSTD_entropyCTablesMetadata_t* entropyMetadata,
+ void* workspace, size_t wkspSize)
+{
+ size_t const litSize = seqStorePtr->lit - seqStorePtr->litStart;
+ entropyMetadata->hufMetadata.hufDesSize =
+ ZSTD_buildBlockEntropyStats_literals(seqStorePtr->litStart, litSize,
+ &prevEntropy->huf, &nextEntropy->huf,
+ &entropyMetadata->hufMetadata,
+ ZSTD_literalsCompressionIsDisabled(cctxParams),
+ workspace, wkspSize);
+ FORWARD_IF_ERROR(entropyMetadata->hufMetadata.hufDesSize, "ZSTD_buildBlockEntropyStats_literals failed");
+ entropyMetadata->fseMetadata.fseTablesSize =
+ ZSTD_buildBlockEntropyStats_sequences(seqStorePtr,
+ &prevEntropy->fse, &nextEntropy->fse,
+ cctxParams,
+ &entropyMetadata->fseMetadata,
+ workspace, wkspSize);
+ FORWARD_IF_ERROR(entropyMetadata->fseMetadata.fseTablesSize, "ZSTD_buildBlockEntropyStats_sequences failed");
+ return 0;
+}
+
+/* Returns the size estimate for the literals section (header + content) of a block */
+static size_t ZSTD_estimateBlockSize_literal(const BYTE* literals, size_t litSize,
+ const ZSTD_hufCTables_t* huf,
+ const ZSTD_hufCTablesMetadata_t* hufMetadata,
+ void* workspace, size_t wkspSize,
+ int writeEntropy)
+{
+ unsigned* const countWksp = (unsigned*)workspace;
+ unsigned maxSymbolValue = HUF_SYMBOLVALUE_MAX;
+ size_t literalSectionHeaderSize = 3 + (litSize >= 1 KB) + (litSize >= 16 KB);
+ U32 singleStream = litSize < 256;
+
+ if (hufMetadata->hType == set_basic) return litSize;
+ else if (hufMetadata->hType == set_rle) return 1;
+ else if (hufMetadata->hType == set_compressed || hufMetadata->hType == set_repeat) {
+ size_t const largest = HIST_count_wksp (countWksp, &maxSymbolValue, (const BYTE*)literals, litSize, workspace, wkspSize);
+ if (ZSTD_isError(largest)) return litSize;
+ { size_t cLitSizeEstimate = HUF_estimateCompressedSize((const HUF_CElt*)huf->CTable, countWksp, maxSymbolValue);
+ if (writeEntropy) cLitSizeEstimate += hufMetadata->hufDesSize;
+ if (!singleStream) cLitSizeEstimate += 6; /* multi-stream huffman uses 6-byte jump table */
+ return cLitSizeEstimate + literalSectionHeaderSize;
+ } }
+ assert(0); /* impossible */
+ return 0;
+}
+
+/* Returns the size estimate for the FSE-compressed symbols (of, ml, ll) of a block */
+static size_t ZSTD_estimateBlockSize_symbolType(symbolEncodingType_e type,
+ const BYTE* codeTable, size_t nbSeq, unsigned maxCode,
+ const FSE_CTable* fseCTable,
+ const U8* additionalBits,
+ short const* defaultNorm, U32 defaultNormLog, U32 defaultMax,
+ void* workspace, size_t wkspSize)
+{
+ unsigned* const countWksp = (unsigned*)workspace;
+ const BYTE* ctp = codeTable;
+ const BYTE* const ctStart = ctp;
+ const BYTE* const ctEnd = ctStart + nbSeq;
+ size_t cSymbolTypeSizeEstimateInBits = 0;
+ unsigned max = maxCode;
+
+ HIST_countFast_wksp(countWksp, &max, codeTable, nbSeq, workspace, wkspSize); /* can't fail */
+ if (type == set_basic) {
+ /* We selected this encoding type, so it must be valid. */
+ assert(max <= defaultMax);
+ (void)defaultMax;
+ cSymbolTypeSizeEstimateInBits = ZSTD_crossEntropyCost(defaultNorm, defaultNormLog, countWksp, max);
+ } else if (type == set_rle) {
+ cSymbolTypeSizeEstimateInBits = 0;
+ } else if (type == set_compressed || type == set_repeat) {
+ cSymbolTypeSizeEstimateInBits = ZSTD_fseBitCost(fseCTable, countWksp, max);
+ }
+ if (ZSTD_isError(cSymbolTypeSizeEstimateInBits)) {
+ return nbSeq * 10;
+ }
+ while (ctp < ctEnd) {
+ if (additionalBits) cSymbolTypeSizeEstimateInBits += additionalBits[*ctp];
+ else cSymbolTypeSizeEstimateInBits += *ctp; /* for offset, offset code is also the number of additional bits */
+ ctp++;
+ }
+ return cSymbolTypeSizeEstimateInBits >> 3;
+}
+
+/* Returns the size estimate for the sequences section (header + content) of a block */
+static size_t ZSTD_estimateBlockSize_sequences(const BYTE* ofCodeTable,
+ const BYTE* llCodeTable,
+ const BYTE* mlCodeTable,
+ size_t nbSeq,
+ const ZSTD_fseCTables_t* fseTables,
+ const ZSTD_fseCTablesMetadata_t* fseMetadata,
+ void* workspace, size_t wkspSize,
+ int writeEntropy)
+{
+ size_t sequencesSectionHeaderSize = 1 /* seqHead */ + 1 /* min seqSize size */ + (nbSeq >= 128) + (nbSeq >= LONGNBSEQ);
+ size_t cSeqSizeEstimate = 0;
+ cSeqSizeEstimate += ZSTD_estimateBlockSize_symbolType(fseMetadata->ofType, ofCodeTable, nbSeq, MaxOff,
+ fseTables->offcodeCTable, NULL,
+ OF_defaultNorm, OF_defaultNormLog, DefaultMaxOff,
+ workspace, wkspSize);
+ cSeqSizeEstimate += ZSTD_estimateBlockSize_symbolType(fseMetadata->llType, llCodeTable, nbSeq, MaxLL,
+ fseTables->litlengthCTable, LL_bits,
+ LL_defaultNorm, LL_defaultNormLog, MaxLL,
+ workspace, wkspSize);
+ cSeqSizeEstimate += ZSTD_estimateBlockSize_symbolType(fseMetadata->mlType, mlCodeTable, nbSeq, MaxML,
+ fseTables->matchlengthCTable, ML_bits,
+ ML_defaultNorm, ML_defaultNormLog, MaxML,
+ workspace, wkspSize);
+ if (writeEntropy) cSeqSizeEstimate += fseMetadata->fseTablesSize;
+ return cSeqSizeEstimate + sequencesSectionHeaderSize;
+}
+
+/* Returns the size estimate for a given stream of literals, of, ll, ml */
+static size_t ZSTD_estimateBlockSize(const BYTE* literals, size_t litSize,
+ const BYTE* ofCodeTable,
+ const BYTE* llCodeTable,
+ const BYTE* mlCodeTable,
+ size_t nbSeq,
+ const ZSTD_entropyCTables_t* entropy,
+ const ZSTD_entropyCTablesMetadata_t* entropyMetadata,
+ void* workspace, size_t wkspSize,
+ int writeLitEntropy, int writeSeqEntropy) {
+ size_t const literalsSize = ZSTD_estimateBlockSize_literal(literals, litSize,
+ &entropy->huf, &entropyMetadata->hufMetadata,
+ workspace, wkspSize, writeLitEntropy);
+ size_t const seqSize = ZSTD_estimateBlockSize_sequences(ofCodeTable, llCodeTable, mlCodeTable,
+ nbSeq, &entropy->fse, &entropyMetadata->fseMetadata,
+ workspace, wkspSize, writeSeqEntropy);
+ return seqSize + literalsSize + ZSTD_blockHeaderSize;
+}
+
+/* Builds entropy statistics and uses them for blocksize estimation.
+ *
+ * Returns the estimated compressed size of the seqStore, or a zstd error.
+ */
+static size_t ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize(seqStore_t* seqStore, ZSTD_CCtx* zc) {
+ ZSTD_entropyCTablesMetadata_t* entropyMetadata = &zc->blockSplitCtx.entropyMetadata;
+ DEBUGLOG(6, "ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize()");
+ FORWARD_IF_ERROR(ZSTD_buildBlockEntropyStats(seqStore,
+ &zc->blockState.prevCBlock->entropy,
+ &zc->blockState.nextCBlock->entropy,
+ &zc->appliedParams,
+ entropyMetadata,
+ zc->entropyWorkspace, ENTROPY_WORKSPACE_SIZE /* statically allocated in resetCCtx */), "");
+ return ZSTD_estimateBlockSize(seqStore->litStart, (size_t)(seqStore->lit - seqStore->litStart),
+ seqStore->ofCode, seqStore->llCode, seqStore->mlCode,
+ (size_t)(seqStore->sequences - seqStore->sequencesStart),
+ &zc->blockState.nextCBlock->entropy, entropyMetadata, zc->entropyWorkspace, ENTROPY_WORKSPACE_SIZE,
+ (int)(entropyMetadata->hufMetadata.hType == set_compressed), 1);
+}
+
+/* Returns literals bytes represented in a seqStore */
+static size_t ZSTD_countSeqStoreLiteralsBytes(const seqStore_t* const seqStore) {
+ size_t literalsBytes = 0;
+ size_t const nbSeqs = seqStore->sequences - seqStore->sequencesStart;
+ size_t i;
+ for (i = 0; i < nbSeqs; ++i) {
+ seqDef seq = seqStore->sequencesStart[i];
+ literalsBytes += seq.litLength;
+ if (i == seqStore->longLengthPos && seqStore->longLengthType == ZSTD_llt_literalLength) {
+ literalsBytes += 0x10000;
+ }
+ }
+ return literalsBytes;
+}
+
+/* Returns match bytes represented in a seqStore */
+static size_t ZSTD_countSeqStoreMatchBytes(const seqStore_t* const seqStore) {
+ size_t matchBytes = 0;
+ size_t const nbSeqs = seqStore->sequences - seqStore->sequencesStart;
+ size_t i;
+ for (i = 0; i < nbSeqs; ++i) {
+ seqDef seq = seqStore->sequencesStart[i];
+ matchBytes += seq.mlBase + MINMATCH;
+ if (i == seqStore->longLengthPos && seqStore->longLengthType == ZSTD_llt_matchLength) {
+ matchBytes += 0x10000;
+ }
+ }
+ return matchBytes;
+}
+
+/* Derives the seqStore that is a chunk of the originalSeqStore from [startIdx, endIdx).
+ * Stores the result in resultSeqStore.
+ */
+static void ZSTD_deriveSeqStoreChunk(seqStore_t* resultSeqStore,
+ const seqStore_t* originalSeqStore,
+ size_t startIdx, size_t endIdx) {
+ BYTE* const litEnd = originalSeqStore->lit;
+ size_t literalsBytes;
+ size_t literalsBytesPreceding = 0;
+
+ *resultSeqStore = *originalSeqStore;
+ if (startIdx > 0) {
+ resultSeqStore->sequences = originalSeqStore->sequencesStart + startIdx;
+ literalsBytesPreceding = ZSTD_countSeqStoreLiteralsBytes(resultSeqStore);
+ }
+
+ /* Move longLengthPos into the correct position if necessary */
+ if (originalSeqStore->longLengthType != ZSTD_llt_none) {
+ if (originalSeqStore->longLengthPos < startIdx || originalSeqStore->longLengthPos > endIdx) {
+ resultSeqStore->longLengthType = ZSTD_llt_none;
+ } else {
+ resultSeqStore->longLengthPos -= (U32)startIdx;
+ }
+ }
+ resultSeqStore->sequencesStart = originalSeqStore->sequencesStart + startIdx;
+ resultSeqStore->sequences = originalSeqStore->sequencesStart + endIdx;
+ literalsBytes = ZSTD_countSeqStoreLiteralsBytes(resultSeqStore);
+ resultSeqStore->litStart += literalsBytesPreceding;
+ if (endIdx == (size_t)(originalSeqStore->sequences - originalSeqStore->sequencesStart)) {
+ /* This accounts for possible last literals if the derived chunk reaches the end of the block */
+ resultSeqStore->lit = litEnd;
+ } else {
+ resultSeqStore->lit = resultSeqStore->litStart+literalsBytes;
+ }
+ resultSeqStore->llCode += startIdx;
+ resultSeqStore->mlCode += startIdx;
+ resultSeqStore->ofCode += startIdx;
+}
+
+/*
+ * Returns the raw offset represented by the combination of offCode, ll0, and repcode history.
+ * offCode must represent a repcode in the numeric representation of ZSTD_storeSeq().
+ */
+static U32
+ZSTD_resolveRepcodeToRawOffset(const U32 rep[ZSTD_REP_NUM], const U32 offCode, const U32 ll0)
+{
+ U32 const adjustedOffCode = STORED_REPCODE(offCode) - 1 + ll0; /* [ 0 - 3 ] */
+ assert(STORED_IS_REPCODE(offCode));
+ if (adjustedOffCode == ZSTD_REP_NUM) {
+ /* litlength == 0 and offCode == 2 implies selection of first repcode - 1 */
+ assert(rep[0] > 0);
+ return rep[0] - 1;
+ }
+ return rep[adjustedOffCode];
+}
+
+/*
+ * ZSTD_seqStore_resolveOffCodes() reconciles any possible divergences in offset history that may arise
+ * due to emission of RLE/raw blocks that disturb the offset history,
+ * and replaces any repcodes within the seqStore that may be invalid.
+ *
+ * dRepcodes are updated as would be on the decompression side.
+ * cRepcodes are updated exactly in accordance with the seqStore.
+ *
+ * Note : this function assumes seq->offBase respects the following numbering scheme :
+ * 0 : invalid
+ * 1-3 : repcode 1-3
+ * 4+ : real_offset+3
+ */
+static void ZSTD_seqStore_resolveOffCodes(repcodes_t* const dRepcodes, repcodes_t* const cRepcodes,
+ seqStore_t* const seqStore, U32 const nbSeq) {
+ U32 idx = 0;
+ for (; idx < nbSeq; ++idx) {
+ seqDef* const seq = seqStore->sequencesStart + idx;
+ U32 const ll0 = (seq->litLength == 0);
+ U32 const offCode = OFFBASE_TO_STORED(seq->offBase);
+ assert(seq->offBase > 0);
+ if (STORED_IS_REPCODE(offCode)) {
+ U32 const dRawOffset = ZSTD_resolveRepcodeToRawOffset(dRepcodes->rep, offCode, ll0);
+ U32 const cRawOffset = ZSTD_resolveRepcodeToRawOffset(cRepcodes->rep, offCode, ll0);
+ /* Adjust simulated decompression repcode history if we come across a mismatch. Replace
+ * the repcode with the offset it actually references, determined by the compression
+ * repcode history.
+ */
+ if (dRawOffset != cRawOffset) {
+ seq->offBase = cRawOffset + ZSTD_REP_NUM;
+ }
+ }
+ /* Compression repcode history is always updated with values directly from the unmodified seqStore.
+ * Decompression repcode history may use modified seq->offset value taken from compression repcode history.
+ */
+ ZSTD_updateRep(dRepcodes->rep, OFFBASE_TO_STORED(seq->offBase), ll0);
+ ZSTD_updateRep(cRepcodes->rep, offCode, ll0);
+ }
+}
+
+/* ZSTD_compressSeqStore_singleBlock():
+ * Compresses a seqStore into a block with a block header, into the buffer dst.
+ *
+ * Returns the total size of that block (including header) or a ZSTD error code.
+ */
+static size_t
+ZSTD_compressSeqStore_singleBlock(ZSTD_CCtx* zc, seqStore_t* const seqStore,
+ repcodes_t* const dRep, repcodes_t* const cRep,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ U32 lastBlock, U32 isPartition)
+{
+ const U32 rleMaxLength = 25;
+ BYTE* op = (BYTE*)dst;
+ const BYTE* ip = (const BYTE*)src;
+ size_t cSize;
+ size_t cSeqsSize;
+
+ /* In case of an RLE or raw block, the simulated decompression repcode history must be reset */
+ repcodes_t const dRepOriginal = *dRep;
+ DEBUGLOG(5, "ZSTD_compressSeqStore_singleBlock");
+ if (isPartition)
+ ZSTD_seqStore_resolveOffCodes(dRep, cRep, seqStore, (U32)(seqStore->sequences - seqStore->sequencesStart));
+
+ RETURN_ERROR_IF(dstCapacity < ZSTD_blockHeaderSize, dstSize_tooSmall, "Block header doesn't fit");
+ cSeqsSize = ZSTD_entropyCompressSeqStore(seqStore,
+ &zc->blockState.prevCBlock->entropy, &zc->blockState.nextCBlock->entropy,
+ &zc->appliedParams,
+ op + ZSTD_blockHeaderSize, dstCapacity - ZSTD_blockHeaderSize,
+ srcSize,
+ zc->entropyWorkspace, ENTROPY_WORKSPACE_SIZE /* statically allocated in resetCCtx */,
+ zc->bmi2);
+ FORWARD_IF_ERROR(cSeqsSize, "ZSTD_entropyCompressSeqStore failed!");
+
+ if (!zc->isFirstBlock &&
+ cSeqsSize < rleMaxLength &&
+ ZSTD_isRLE((BYTE const*)src, srcSize)) {
+ /* We don't want to emit our first block as a RLE even if it qualifies because
+ * doing so will cause the decoder (cli only) to throw a "should consume all input error."
+ * This is only an issue for zstd <= v1.4.3
+ */
+ cSeqsSize = 1;
+ }
+
+ if (zc->seqCollector.collectSequences) {
+ ZSTD_copyBlockSequences(zc);
+ ZSTD_blockState_confirmRepcodesAndEntropyTables(&zc->blockState);
+ return 0;
+ }
+
+ if (cSeqsSize == 0) {
+ cSize = ZSTD_noCompressBlock(op, dstCapacity, ip, srcSize, lastBlock);
+ FORWARD_IF_ERROR(cSize, "Nocompress block failed");
+ DEBUGLOG(4, "Writing out nocompress block, size: %zu", cSize);
+ *dRep = dRepOriginal; /* reset simulated decompression repcode history */
+ } else if (cSeqsSize == 1) {
+ cSize = ZSTD_rleCompressBlock(op, dstCapacity, *ip, srcSize, lastBlock);
+ FORWARD_IF_ERROR(cSize, "RLE compress block failed");
+ DEBUGLOG(4, "Writing out RLE block, size: %zu", cSize);
+ *dRep = dRepOriginal; /* reset simulated decompression repcode history */
+ } else {
+ ZSTD_blockState_confirmRepcodesAndEntropyTables(&zc->blockState);
+ writeBlockHeader(op, cSeqsSize, srcSize, lastBlock);
+ cSize = ZSTD_blockHeaderSize + cSeqsSize;
+ DEBUGLOG(4, "Writing out compressed block, size: %zu", cSize);
+ }
+
+ if (zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode == FSE_repeat_valid)
+ zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode = FSE_repeat_check;
+
+ return cSize;
+}
+
+/* Struct to keep track of where we are in our recursive calls. */
+typedef struct {
+ U32* splitLocations; /* Array of split indices */
+ size_t idx; /* The current index within splitLocations being worked on */
+} seqStoreSplits;
+
+#define MIN_SEQUENCES_BLOCK_SPLITTING 300
+
+/* Helper function to perform the recursive search for block splits.
+ * Estimates the cost of seqStore prior to split, and estimates the cost of splitting the sequences in half.
+ * If advantageous to split, then we recurse down the two sub-blocks. If not, or if an error occurred in estimation, then
+ * we do not recurse.
+ *
+ * Note: The recursion depth is capped by a heuristic minimum number of sequences, defined by MIN_SEQUENCES_BLOCK_SPLITTING.
+ * In theory, this means the absolute largest recursion depth is 10 == log2(maxNbSeqInBlock/MIN_SEQUENCES_BLOCK_SPLITTING).
+ * In practice, recursion depth usually doesn't go beyond 4.
+ *
+ * Furthermore, the number of splits is capped by ZSTD_MAX_NB_BLOCK_SPLITS. At ZSTD_MAX_NB_BLOCK_SPLITS == 196 with the current existing blockSize
+ * maximum of 128 KB, this value is actually impossible to reach.
+ */
+static void
+ZSTD_deriveBlockSplitsHelper(seqStoreSplits* splits, size_t startIdx, size_t endIdx,
+ ZSTD_CCtx* zc, const seqStore_t* origSeqStore)
+{
+ seqStore_t* fullSeqStoreChunk = &zc->blockSplitCtx.fullSeqStoreChunk;
+ seqStore_t* firstHalfSeqStore = &zc->blockSplitCtx.firstHalfSeqStore;
+ seqStore_t* secondHalfSeqStore = &zc->blockSplitCtx.secondHalfSeqStore;
+ size_t estimatedOriginalSize;
+ size_t estimatedFirstHalfSize;
+ size_t estimatedSecondHalfSize;
+ size_t midIdx = (startIdx + endIdx)/2;
+
+ if (endIdx - startIdx < MIN_SEQUENCES_BLOCK_SPLITTING || splits->idx >= ZSTD_MAX_NB_BLOCK_SPLITS) {
+ DEBUGLOG(6, "ZSTD_deriveBlockSplitsHelper: Too few sequences");
+ return;
+ }
+ DEBUGLOG(4, "ZSTD_deriveBlockSplitsHelper: startIdx=%zu endIdx=%zu", startIdx, endIdx);
+ ZSTD_deriveSeqStoreChunk(fullSeqStoreChunk, origSeqStore, startIdx, endIdx);
+ ZSTD_deriveSeqStoreChunk(firstHalfSeqStore, origSeqStore, startIdx, midIdx);
+ ZSTD_deriveSeqStoreChunk(secondHalfSeqStore, origSeqStore, midIdx, endIdx);
+ estimatedOriginalSize = ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize(fullSeqStoreChunk, zc);
+ estimatedFirstHalfSize = ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize(firstHalfSeqStore, zc);
+ estimatedSecondHalfSize = ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize(secondHalfSeqStore, zc);
+ DEBUGLOG(4, "Estimated original block size: %zu -- First half split: %zu -- Second half split: %zu",
+ estimatedOriginalSize, estimatedFirstHalfSize, estimatedSecondHalfSize);
+ if (ZSTD_isError(estimatedOriginalSize) || ZSTD_isError(estimatedFirstHalfSize) || ZSTD_isError(estimatedSecondHalfSize)) {
+ return;
+ }
+ if (estimatedFirstHalfSize + estimatedSecondHalfSize < estimatedOriginalSize) {
+ ZSTD_deriveBlockSplitsHelper(splits, startIdx, midIdx, zc, origSeqStore);
+ splits->splitLocations[splits->idx] = (U32)midIdx;
+ splits->idx++;
+ ZSTD_deriveBlockSplitsHelper(splits, midIdx, endIdx, zc, origSeqStore);
+ }
+}
+
+/* Base recursive function. Populates a table with intra-block partition indices that can improve compression ratio.
+ *
+ * Returns the number of splits made (which equals the size of the partition table - 1).
+ */
+static size_t ZSTD_deriveBlockSplits(ZSTD_CCtx* zc, U32 partitions[], U32 nbSeq) {
+ seqStoreSplits splits = {partitions, 0};
+ if (nbSeq <= 4) {
+ DEBUGLOG(4, "ZSTD_deriveBlockSplits: Too few sequences to split");
+ /* Refuse to try and split anything with less than 4 sequences */
+ return 0;
+ }
+ ZSTD_deriveBlockSplitsHelper(&splits, 0, nbSeq, zc, &zc->seqStore);
+ splits.splitLocations[splits.idx] = nbSeq;
+ DEBUGLOG(5, "ZSTD_deriveBlockSplits: final nb partitions: %zu", splits.idx+1);
+ return splits.idx;
+}
+
+/* ZSTD_compressBlock_splitBlock():
+ * Attempts to split a given block into multiple blocks to improve compression ratio.
+ *
+ * Returns combined size of all blocks (which includes headers), or a ZSTD error code.
+ */
+static size_t
+ZSTD_compressBlock_splitBlock_internal(ZSTD_CCtx* zc, void* dst, size_t dstCapacity,
+ const void* src, size_t blockSize, U32 lastBlock, U32 nbSeq)
+{
+ size_t cSize = 0;
+ const BYTE* ip = (const BYTE*)src;
+ BYTE* op = (BYTE*)dst;
+ size_t i = 0;
+ size_t srcBytesTotal = 0;
+ U32* partitions = zc->blockSplitCtx.partitions; /* size == ZSTD_MAX_NB_BLOCK_SPLITS */
+ seqStore_t* nextSeqStore = &zc->blockSplitCtx.nextSeqStore;
+ seqStore_t* currSeqStore = &zc->blockSplitCtx.currSeqStore;
+ size_t numSplits = ZSTD_deriveBlockSplits(zc, partitions, nbSeq);
+
+ /* If a block is split and some partitions are emitted as RLE/uncompressed, then repcode history
+ * may become invalid. In order to reconcile potentially invalid repcodes, we keep track of two
+ * separate repcode histories that simulate repcode history on compression and decompression side,
+ * and use the histories to determine whether we must replace a particular repcode with its raw offset.
+ *
+ * 1) cRep gets updated for each partition, regardless of whether the block was emitted as uncompressed
+ * or RLE. This allows us to retrieve the offset value that an invalid repcode references within
+ * a nocompress/RLE block.
+ * 2) dRep gets updated only for compressed partitions, and when a repcode gets replaced, will use
+ * the replacement offset value rather than the original repcode to update the repcode history.
+ * dRep also will be the final repcode history sent to the next block.
+ *
+ * See ZSTD_seqStore_resolveOffCodes() for more details.
+ */
+ repcodes_t dRep;
+ repcodes_t cRep;
+ ZSTD_memcpy(dRep.rep, zc->blockState.prevCBlock->rep, sizeof(repcodes_t));
+ ZSTD_memcpy(cRep.rep, zc->blockState.prevCBlock->rep, sizeof(repcodes_t));
+ ZSTD_memset(nextSeqStore, 0, sizeof(seqStore_t));
+
+ DEBUGLOG(4, "ZSTD_compressBlock_splitBlock_internal (dstCapacity=%u, dictLimit=%u, nextToUpdate=%u)",
+ (unsigned)dstCapacity, (unsigned)zc->blockState.matchState.window.dictLimit,
+ (unsigned)zc->blockState.matchState.nextToUpdate);
+
+ if (numSplits == 0) {
+ size_t cSizeSingleBlock = ZSTD_compressSeqStore_singleBlock(zc, &zc->seqStore,
+ &dRep, &cRep,
+ op, dstCapacity,
+ ip, blockSize,
+ lastBlock, 0 /* isPartition */);
+ FORWARD_IF_ERROR(cSizeSingleBlock, "Compressing single block from splitBlock_internal() failed!");
+ DEBUGLOG(5, "ZSTD_compressBlock_splitBlock_internal: No splits");
+ assert(cSizeSingleBlock <= ZSTD_BLOCKSIZE_MAX + ZSTD_blockHeaderSize);
+ return cSizeSingleBlock;
+ }
+
+ ZSTD_deriveSeqStoreChunk(currSeqStore, &zc->seqStore, 0, partitions[0]);
+ for (i = 0; i <= numSplits; ++i) {
+ size_t srcBytes;
+ size_t cSizeChunk;
+ U32 const lastPartition = (i == numSplits);
+ U32 lastBlockEntireSrc = 0;
+
+ srcBytes = ZSTD_countSeqStoreLiteralsBytes(currSeqStore) + ZSTD_countSeqStoreMatchBytes(currSeqStore);
+ srcBytesTotal += srcBytes;
+ if (lastPartition) {
+ /* This is the final partition, need to account for possible last literals */
+ srcBytes += blockSize - srcBytesTotal;
+ lastBlockEntireSrc = lastBlock;
+ } else {
+ ZSTD_deriveSeqStoreChunk(nextSeqStore, &zc->seqStore, partitions[i], partitions[i+1]);
+ }
+
+ cSizeChunk = ZSTD_compressSeqStore_singleBlock(zc, currSeqStore,
+ &dRep, &cRep,
+ op, dstCapacity,
+ ip, srcBytes,
+ lastBlockEntireSrc, 1 /* isPartition */);
+ DEBUGLOG(5, "Estimated size: %zu actual size: %zu", ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize(currSeqStore, zc), cSizeChunk);
+ FORWARD_IF_ERROR(cSizeChunk, "Compressing chunk failed!");
+
+ ip += srcBytes;
+ op += cSizeChunk;
+ dstCapacity -= cSizeChunk;
+ cSize += cSizeChunk;
+ *currSeqStore = *nextSeqStore;
+ assert(cSizeChunk <= ZSTD_BLOCKSIZE_MAX + ZSTD_blockHeaderSize);
+ }
+ /* cRep and dRep may have diverged during the compression. If so, we use the dRep repcodes
+ * for the next block.
+ */
+ ZSTD_memcpy(zc->blockState.prevCBlock->rep, dRep.rep, sizeof(repcodes_t));
+ return cSize;
+}
+
+static size_t
+ZSTD_compressBlock_splitBlock(ZSTD_CCtx* zc,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize, U32 lastBlock)
+{
+ const BYTE* ip = (const BYTE*)src;
+ BYTE* op = (BYTE*)dst;
+ U32 nbSeq;
+ size_t cSize;
+ DEBUGLOG(4, "ZSTD_compressBlock_splitBlock");
+ assert(zc->appliedParams.useBlockSplitter == ZSTD_ps_enable);
+
+ { const size_t bss = ZSTD_buildSeqStore(zc, src, srcSize);
+ FORWARD_IF_ERROR(bss, "ZSTD_buildSeqStore failed");
+ if (bss == ZSTDbss_noCompress) {
+ if (zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode == FSE_repeat_valid)
+ zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode = FSE_repeat_check;
+ cSize = ZSTD_noCompressBlock(op, dstCapacity, ip, srcSize, lastBlock);
+ FORWARD_IF_ERROR(cSize, "ZSTD_noCompressBlock failed");
+ DEBUGLOG(4, "ZSTD_compressBlock_splitBlock: Nocompress block");
+ return cSize;
+ }
+ nbSeq = (U32)(zc->seqStore.sequences - zc->seqStore.sequencesStart);
+ }
+
+ cSize = ZSTD_compressBlock_splitBlock_internal(zc, dst, dstCapacity, src, srcSize, lastBlock, nbSeq);
+ FORWARD_IF_ERROR(cSize, "Splitting blocks failed!");
+ return cSize;
+}
+
+static size_t
+ZSTD_compressBlock_internal(ZSTD_CCtx* zc,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize, U32 frame)
+{
+ /* This the upper bound for the length of an rle block.
+ * This isn't the actual upper bound. Finding the real threshold
+ * needs further investigation.
+ */
+ const U32 rleMaxLength = 25;
+ size_t cSize;
+ const BYTE* ip = (const BYTE*)src;
+ BYTE* op = (BYTE*)dst;
+ DEBUGLOG(5, "ZSTD_compressBlock_internal (dstCapacity=%u, dictLimit=%u, nextToUpdate=%u)",
+ (unsigned)dstCapacity, (unsigned)zc->blockState.matchState.window.dictLimit,
+ (unsigned)zc->blockState.matchState.nextToUpdate);
+
+ { const size_t bss = ZSTD_buildSeqStore(zc, src, srcSize);
+ FORWARD_IF_ERROR(bss, "ZSTD_buildSeqStore failed");
+ if (bss == ZSTDbss_noCompress) { cSize = 0; goto out; }
+ }
+
+ if (zc->seqCollector.collectSequences) {
+ ZSTD_copyBlockSequences(zc);
+ ZSTD_blockState_confirmRepcodesAndEntropyTables(&zc->blockState);
+ return 0;
+ }
+
+ /* encode sequences and literals */
+ cSize = ZSTD_entropyCompressSeqStore(&zc->seqStore,
+ &zc->blockState.prevCBlock->entropy, &zc->blockState.nextCBlock->entropy,
+ &zc->appliedParams,
+ dst, dstCapacity,
+ srcSize,
+ zc->entropyWorkspace, ENTROPY_WORKSPACE_SIZE /* statically allocated in resetCCtx */,
+ zc->bmi2);
+
+ if (frame &&
+ /* We don't want to emit our first block as a RLE even if it qualifies because
+ * doing so will cause the decoder (cli only) to throw a "should consume all input error."
+ * This is only an issue for zstd <= v1.4.3
+ */
+ !zc->isFirstBlock &&
+ cSize < rleMaxLength &&
+ ZSTD_isRLE(ip, srcSize))
+ {
+ cSize = 1;
+ op[0] = ip[0];
+ }
+
+out:
+ if (!ZSTD_isError(cSize) && cSize > 1) {
+ ZSTD_blockState_confirmRepcodesAndEntropyTables(&zc->blockState);
+ }
+ /* We check that dictionaries have offset codes available for the first
+ * block. After the first block, the offcode table might not have large
+ * enough codes to represent the offsets in the data.
+ */
+ if (zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode == FSE_repeat_valid)
+ zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode = FSE_repeat_check;
+
+ return cSize;
+}
+
+static size_t ZSTD_compressBlock_targetCBlockSize_body(ZSTD_CCtx* zc,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ const size_t bss, U32 lastBlock)
+{
+ DEBUGLOG(6, "Attempting ZSTD_compressSuperBlock()");
+ if (bss == ZSTDbss_compress) {
+ if (/* We don't want to emit our first block as a RLE even if it qualifies because
+ * doing so will cause the decoder (cli only) to throw a "should consume all input error."
+ * This is only an issue for zstd <= v1.4.3
+ */
+ !zc->isFirstBlock &&
+ ZSTD_maybeRLE(&zc->seqStore) &&
+ ZSTD_isRLE((BYTE const*)src, srcSize))
+ {
+ return ZSTD_rleCompressBlock(dst, dstCapacity, *(BYTE const*)src, srcSize, lastBlock);
+ }
+ /* Attempt superblock compression.
+ *
+ * Note that compressed size of ZSTD_compressSuperBlock() is not bound by the
+ * standard ZSTD_compressBound(). This is a problem, because even if we have
+ * space now, taking an extra byte now could cause us to run out of space later
+ * and violate ZSTD_compressBound().
+ *
+ * Define blockBound(blockSize) = blockSize + ZSTD_blockHeaderSize.
+ *
+ * In order to respect ZSTD_compressBound() we must attempt to emit a raw
+ * uncompressed block in these cases:
+ * * cSize == 0: Return code for an uncompressed block.
+ * * cSize == dstSize_tooSmall: We may have expanded beyond blockBound(srcSize).
+ * ZSTD_noCompressBlock() will return dstSize_tooSmall if we are really out of
+ * output space.
+ * * cSize >= blockBound(srcSize): We have expanded the block too much so
+ * emit an uncompressed block.
+ */
+ {
+ size_t const cSize = ZSTD_compressSuperBlock(zc, dst, dstCapacity, src, srcSize, lastBlock);
+ if (cSize != ERROR(dstSize_tooSmall)) {
+ size_t const maxCSize = srcSize - ZSTD_minGain(srcSize, zc->appliedParams.cParams.strategy);
+ FORWARD_IF_ERROR(cSize, "ZSTD_compressSuperBlock failed");
+ if (cSize != 0 && cSize < maxCSize + ZSTD_blockHeaderSize) {
+ ZSTD_blockState_confirmRepcodesAndEntropyTables(&zc->blockState);
+ return cSize;
+ }
+ }
+ }
+ }
+
+ DEBUGLOG(6, "Resorting to ZSTD_noCompressBlock()");
+ /* Superblock compression failed, attempt to emit a single no compress block.
+ * The decoder will be able to stream this block since it is uncompressed.
+ */
+ return ZSTD_noCompressBlock(dst, dstCapacity, src, srcSize, lastBlock);
+}
+
+static size_t ZSTD_compressBlock_targetCBlockSize(ZSTD_CCtx* zc,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ U32 lastBlock)
+{
+ size_t cSize = 0;
+ const size_t bss = ZSTD_buildSeqStore(zc, src, srcSize);
+ DEBUGLOG(5, "ZSTD_compressBlock_targetCBlockSize (dstCapacity=%u, dictLimit=%u, nextToUpdate=%u, srcSize=%zu)",
+ (unsigned)dstCapacity, (unsigned)zc->blockState.matchState.window.dictLimit, (unsigned)zc->blockState.matchState.nextToUpdate, srcSize);
+ FORWARD_IF_ERROR(bss, "ZSTD_buildSeqStore failed");
+
+ cSize = ZSTD_compressBlock_targetCBlockSize_body(zc, dst, dstCapacity, src, srcSize, bss, lastBlock);
+ FORWARD_IF_ERROR(cSize, "ZSTD_compressBlock_targetCBlockSize_body failed");
+
+ if (zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode == FSE_repeat_valid)
+ zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode = FSE_repeat_check;
+
+ return cSize;
+}
+
+static void ZSTD_overflowCorrectIfNeeded(ZSTD_matchState_t* ms,
+ ZSTD_cwksp* ws,
+ ZSTD_CCtx_params const* params,
+ void const* ip,
+ void const* iend)
+{
+ U32 const cycleLog = ZSTD_cycleLog(params->cParams.chainLog, params->cParams.strategy);
+ U32 const maxDist = (U32)1 << params->cParams.windowLog;
+ if (ZSTD_window_needOverflowCorrection(ms->window, cycleLog, maxDist, ms->loadedDictEnd, ip, iend)) {
+ U32 const correction = ZSTD_window_correctOverflow(&ms->window, cycleLog, maxDist, ip);
+ ZSTD_STATIC_ASSERT(ZSTD_CHAINLOG_MAX <= 30);
+ ZSTD_STATIC_ASSERT(ZSTD_WINDOWLOG_MAX_32 <= 30);
+ ZSTD_STATIC_ASSERT(ZSTD_WINDOWLOG_MAX <= 31);
+ ZSTD_cwksp_mark_tables_dirty(ws);
+ ZSTD_reduceIndex(ms, params, correction);
+ ZSTD_cwksp_mark_tables_clean(ws);
+ if (ms->nextToUpdate < correction) ms->nextToUpdate = 0;
+ else ms->nextToUpdate -= correction;
+ /* invalidate dictionaries on overflow correction */
+ ms->loadedDictEnd = 0;
+ ms->dictMatchState = NULL;
+ }
+}
+
+/*! ZSTD_compress_frameChunk() :
+* Compress a chunk of data into one or multiple blocks.
+* All blocks will be terminated, all input will be consumed.
+* Function will issue an error if there is not enough `dstCapacity` to hold the compressed content.
+* Frame is supposed already started (header already produced)
+* @return : compressed size, or an error code
+*/
+static size_t ZSTD_compress_frameChunk(ZSTD_CCtx* cctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ U32 lastFrameChunk)
+{
+ size_t blockSize = cctx->blockSize;
+ size_t remaining = srcSize;
+ const BYTE* ip = (const BYTE*)src;
+ BYTE* const ostart = (BYTE*)dst;
+ BYTE* op = ostart;
+ U32 const maxDist = (U32)1 << cctx->appliedParams.cParams.windowLog;
+
+ assert(cctx->appliedParams.cParams.windowLog <= ZSTD_WINDOWLOG_MAX);
+
+ DEBUGLOG(4, "ZSTD_compress_frameChunk (blockSize=%u)", (unsigned)blockSize);
+ if (cctx->appliedParams.fParams.checksumFlag && srcSize)
+ xxh64_update(&cctx->xxhState, src, srcSize);
+
+ while (remaining) {
+ ZSTD_matchState_t* const ms = &cctx->blockState.matchState;
+ U32 const lastBlock = lastFrameChunk & (blockSize >= remaining);
+
+ RETURN_ERROR_IF(dstCapacity < ZSTD_blockHeaderSize + MIN_CBLOCK_SIZE,
+ dstSize_tooSmall,
+ "not enough space to store compressed block");
+ if (remaining < blockSize) blockSize = remaining;
+
+ ZSTD_overflowCorrectIfNeeded(
+ ms, &cctx->workspace, &cctx->appliedParams, ip, ip + blockSize);
+ ZSTD_checkDictValidity(&ms->window, ip + blockSize, maxDist, &ms->loadedDictEnd, &ms->dictMatchState);
+ ZSTD_window_enforceMaxDist(&ms->window, ip, maxDist, &ms->loadedDictEnd, &ms->dictMatchState);
+
+ /* Ensure hash/chain table insertion resumes no sooner than lowlimit */
+ if (ms->nextToUpdate < ms->window.lowLimit) ms->nextToUpdate = ms->window.lowLimit;
+
+ { size_t cSize;
+ if (ZSTD_useTargetCBlockSize(&cctx->appliedParams)) {
+ cSize = ZSTD_compressBlock_targetCBlockSize(cctx, op, dstCapacity, ip, blockSize, lastBlock);
+ FORWARD_IF_ERROR(cSize, "ZSTD_compressBlock_targetCBlockSize failed");
+ assert(cSize > 0);
+ assert(cSize <= blockSize + ZSTD_blockHeaderSize);
+ } else if (ZSTD_blockSplitterEnabled(&cctx->appliedParams)) {
+ cSize = ZSTD_compressBlock_splitBlock(cctx, op, dstCapacity, ip, blockSize, lastBlock);
+ FORWARD_IF_ERROR(cSize, "ZSTD_compressBlock_splitBlock failed");
+ assert(cSize > 0 || cctx->seqCollector.collectSequences == 1);
+ } else {
+ cSize = ZSTD_compressBlock_internal(cctx,
+ op+ZSTD_blockHeaderSize, dstCapacity-ZSTD_blockHeaderSize,
+ ip, blockSize, 1 /* frame */);
+ FORWARD_IF_ERROR(cSize, "ZSTD_compressBlock_internal failed");
+
+ if (cSize == 0) { /* block is not compressible */
+ cSize = ZSTD_noCompressBlock(op, dstCapacity, ip, blockSize, lastBlock);
+ FORWARD_IF_ERROR(cSize, "ZSTD_noCompressBlock failed");
+ } else {
+ U32 const cBlockHeader = cSize == 1 ?
+ lastBlock + (((U32)bt_rle)<<1) + (U32)(blockSize << 3) :
+ lastBlock + (((U32)bt_compressed)<<1) + (U32)(cSize << 3);
+ MEM_writeLE24(op, cBlockHeader);
+ cSize += ZSTD_blockHeaderSize;
+ }
+ }
+
+
+ ip += blockSize;
+ assert(remaining >= blockSize);
+ remaining -= blockSize;
+ op += cSize;
+ assert(dstCapacity >= cSize);
+ dstCapacity -= cSize;
+ cctx->isFirstBlock = 0;
+ DEBUGLOG(5, "ZSTD_compress_frameChunk: adding a block of size %u",
+ (unsigned)cSize);
+ } }
+
+ if (lastFrameChunk && (op>ostart)) cctx->stage = ZSTDcs_ending;
+ return (size_t)(op-ostart);
+}
+
+
+static size_t ZSTD_writeFrameHeader(void* dst, size_t dstCapacity,
+ const ZSTD_CCtx_params* params, U64 pledgedSrcSize, U32 dictID)
+{ BYTE* const op = (BYTE*)dst;
+ U32 const dictIDSizeCodeLength = (dictID>0) + (dictID>=256) + (dictID>=65536); /* 0-3 */
+ U32 const dictIDSizeCode = params->fParams.noDictIDFlag ? 0 : dictIDSizeCodeLength; /* 0-3 */
+ U32 const checksumFlag = params->fParams.checksumFlag>0;
+ U32 const windowSize = (U32)1 << params->cParams.windowLog;
+ U32 const singleSegment = params->fParams.contentSizeFlag && (windowSize >= pledgedSrcSize);
+ BYTE const windowLogByte = (BYTE)((params->cParams.windowLog - ZSTD_WINDOWLOG_ABSOLUTEMIN) << 3);
+ U32 const fcsCode = params->fParams.contentSizeFlag ?
+ (pledgedSrcSize>=256) + (pledgedSrcSize>=65536+256) + (pledgedSrcSize>=0xFFFFFFFFU) : 0; /* 0-3 */
+ BYTE const frameHeaderDescriptionByte = (BYTE)(dictIDSizeCode + (checksumFlag<<2) + (singleSegment<<5) + (fcsCode<<6) );
+ size_t pos=0;
+
+ assert(!(params->fParams.contentSizeFlag && pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN));
+ RETURN_ERROR_IF(dstCapacity < ZSTD_FRAMEHEADERSIZE_MAX, dstSize_tooSmall,
+ "dst buf is too small to fit worst-case frame header size.");
+ DEBUGLOG(4, "ZSTD_writeFrameHeader : dictIDFlag : %u ; dictID : %u ; dictIDSizeCode : %u",
+ !params->fParams.noDictIDFlag, (unsigned)dictID, (unsigned)dictIDSizeCode);
+ if (params->format == ZSTD_f_zstd1) {
+ MEM_writeLE32(dst, ZSTD_MAGICNUMBER);
+ pos = 4;
+ }
+ op[pos++] = frameHeaderDescriptionByte;
+ if (!singleSegment) op[pos++] = windowLogByte;
+ switch(dictIDSizeCode)
+ {
+ default:
+ assert(0); /* impossible */
+ ZSTD_FALLTHROUGH;
+ case 0 : break;
+ case 1 : op[pos] = (BYTE)(dictID); pos++; break;
+ case 2 : MEM_writeLE16(op+pos, (U16)dictID); pos+=2; break;
+ case 3 : MEM_writeLE32(op+pos, dictID); pos+=4; break;
+ }
+ switch(fcsCode)
+ {
+ default:
+ assert(0); /* impossible */
+ ZSTD_FALLTHROUGH;
+ case 0 : if (singleSegment) op[pos++] = (BYTE)(pledgedSrcSize); break;
+ case 1 : MEM_writeLE16(op+pos, (U16)(pledgedSrcSize-256)); pos+=2; break;
+ case 2 : MEM_writeLE32(op+pos, (U32)(pledgedSrcSize)); pos+=4; break;
+ case 3 : MEM_writeLE64(op+pos, (U64)(pledgedSrcSize)); pos+=8; break;
+ }
+ return pos;
+}
+
+/* ZSTD_writeSkippableFrame_advanced() :
+ * Writes out a skippable frame with the specified magic number variant (16 are supported),
+ * from ZSTD_MAGIC_SKIPPABLE_START to ZSTD_MAGIC_SKIPPABLE_START+15, and the desired source data.
+ *
+ * Returns the total number of bytes written, or a ZSTD error code.
+ */
+size_t ZSTD_writeSkippableFrame(void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize, unsigned magicVariant) {
+ BYTE* op = (BYTE*)dst;
+ RETURN_ERROR_IF(dstCapacity < srcSize + ZSTD_SKIPPABLEHEADERSIZE /* Skippable frame overhead */,
+ dstSize_tooSmall, "Not enough room for skippable frame");
+ RETURN_ERROR_IF(srcSize > (unsigned)0xFFFFFFFF, srcSize_wrong, "Src size too large for skippable frame");
+ RETURN_ERROR_IF(magicVariant > 15, parameter_outOfBound, "Skippable frame magic number variant not supported");
+
+ MEM_writeLE32(op, (U32)(ZSTD_MAGIC_SKIPPABLE_START + magicVariant));
+ MEM_writeLE32(op+4, (U32)srcSize);
+ ZSTD_memcpy(op+8, src, srcSize);
+ return srcSize + ZSTD_SKIPPABLEHEADERSIZE;
+}
+
+/* ZSTD_writeLastEmptyBlock() :
+ * output an empty Block with end-of-frame mark to complete a frame
+ * @return : size of data written into `dst` (== ZSTD_blockHeaderSize (defined in zstd_internal.h))
+ * or an error code if `dstCapacity` is too small (<ZSTD_blockHeaderSize)
+ */
+size_t ZSTD_writeLastEmptyBlock(void* dst, size_t dstCapacity)
+{
+ RETURN_ERROR_IF(dstCapacity < ZSTD_blockHeaderSize, dstSize_tooSmall,
+ "dst buf is too small to write frame trailer empty block.");
+ { U32 const cBlockHeader24 = 1 /*lastBlock*/ + (((U32)bt_raw)<<1); /* 0 size */
+ MEM_writeLE24(dst, cBlockHeader24);
+ return ZSTD_blockHeaderSize;
+ }
+}
+
+size_t ZSTD_referenceExternalSequences(ZSTD_CCtx* cctx, rawSeq* seq, size_t nbSeq)
+{
+ RETURN_ERROR_IF(cctx->stage != ZSTDcs_init, stage_wrong,
+ "wrong cctx stage");
+ RETURN_ERROR_IF(cctx->appliedParams.ldmParams.enableLdm == ZSTD_ps_enable,
+ parameter_unsupported,
+ "incompatible with ldm");
+ cctx->externSeqStore.seq = seq;
+ cctx->externSeqStore.size = nbSeq;
+ cctx->externSeqStore.capacity = nbSeq;
+ cctx->externSeqStore.pos = 0;
+ cctx->externSeqStore.posInSequence = 0;
+ return 0;
+}
+
+
+static size_t ZSTD_compressContinue_internal (ZSTD_CCtx* cctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ U32 frame, U32 lastFrameChunk)
+{
+ ZSTD_matchState_t* const ms = &cctx->blockState.matchState;
+ size_t fhSize = 0;
+
+ DEBUGLOG(5, "ZSTD_compressContinue_internal, stage: %u, srcSize: %u",
+ cctx->stage, (unsigned)srcSize);
+ RETURN_ERROR_IF(cctx->stage==ZSTDcs_created, stage_wrong,
+ "missing init (ZSTD_compressBegin)");
+
+ if (frame && (cctx->stage==ZSTDcs_init)) {
+ fhSize = ZSTD_writeFrameHeader(dst, dstCapacity, &cctx->appliedParams,
+ cctx->pledgedSrcSizePlusOne-1, cctx->dictID);
+ FORWARD_IF_ERROR(fhSize, "ZSTD_writeFrameHeader failed");
+ assert(fhSize <= dstCapacity);
+ dstCapacity -= fhSize;
+ dst = (char*)dst + fhSize;
+ cctx->stage = ZSTDcs_ongoing;
+ }
+
+ if (!srcSize) return fhSize; /* do not generate an empty block if no input */
+
+ if (!ZSTD_window_update(&ms->window, src, srcSize, ms->forceNonContiguous)) {
+ ms->forceNonContiguous = 0;
+ ms->nextToUpdate = ms->window.dictLimit;
+ }
+ if (cctx->appliedParams.ldmParams.enableLdm == ZSTD_ps_enable) {
+ ZSTD_window_update(&cctx->ldmState.window, src, srcSize, /* forceNonContiguous */ 0);
+ }
+
+ if (!frame) {
+ /* overflow check and correction for block mode */
+ ZSTD_overflowCorrectIfNeeded(
+ ms, &cctx->workspace, &cctx->appliedParams,
+ src, (BYTE const*)src + srcSize);
+ }
+
+ DEBUGLOG(5, "ZSTD_compressContinue_internal (blockSize=%u)", (unsigned)cctx->blockSize);
+ { size_t const cSize = frame ?
+ ZSTD_compress_frameChunk (cctx, dst, dstCapacity, src, srcSize, lastFrameChunk) :
+ ZSTD_compressBlock_internal (cctx, dst, dstCapacity, src, srcSize, 0 /* frame */);
+ FORWARD_IF_ERROR(cSize, "%s", frame ? "ZSTD_compress_frameChunk failed" : "ZSTD_compressBlock_internal failed");
+ cctx->consumedSrcSize += srcSize;
+ cctx->producedCSize += (cSize + fhSize);
+ assert(!(cctx->appliedParams.fParams.contentSizeFlag && cctx->pledgedSrcSizePlusOne == 0));
+ if (cctx->pledgedSrcSizePlusOne != 0) { /* control src size */
+ ZSTD_STATIC_ASSERT(ZSTD_CONTENTSIZE_UNKNOWN == (unsigned long long)-1);
+ RETURN_ERROR_IF(
+ cctx->consumedSrcSize+1 > cctx->pledgedSrcSizePlusOne,
+ srcSize_wrong,
+ "error : pledgedSrcSize = %u, while realSrcSize >= %u",
+ (unsigned)cctx->pledgedSrcSizePlusOne-1,
+ (unsigned)cctx->consumedSrcSize);
+ }
+ return cSize + fhSize;
+ }
+}
+
+size_t ZSTD_compressContinue (ZSTD_CCtx* cctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize)
+{
+ DEBUGLOG(5, "ZSTD_compressContinue (srcSize=%u)", (unsigned)srcSize);
+ return ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 1 /* frame mode */, 0 /* last chunk */);
+}
+
+
+size_t ZSTD_getBlockSize(const ZSTD_CCtx* cctx)
+{
+ ZSTD_compressionParameters const cParams = cctx->appliedParams.cParams;
+ assert(!ZSTD_checkCParams(cParams));
+ return MIN (ZSTD_BLOCKSIZE_MAX, (U32)1 << cParams.windowLog);
+}
+
+size_t ZSTD_compressBlock(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize)
+{
+ DEBUGLOG(5, "ZSTD_compressBlock: srcSize = %u", (unsigned)srcSize);
+ { size_t const blockSizeMax = ZSTD_getBlockSize(cctx);
+ RETURN_ERROR_IF(srcSize > blockSizeMax, srcSize_wrong, "input is larger than a block"); }
+
+ return ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 0 /* frame mode */, 0 /* last chunk */);
+}
+
+/*! ZSTD_loadDictionaryContent() :
+ * @return : 0, or an error code
+ */
+static size_t ZSTD_loadDictionaryContent(ZSTD_matchState_t* ms,
+ ldmState_t* ls,
+ ZSTD_cwksp* ws,
+ ZSTD_CCtx_params const* params,
+ const void* src, size_t srcSize,
+ ZSTD_dictTableLoadMethod_e dtlm)
+{
+ const BYTE* ip = (const BYTE*) src;
+ const BYTE* const iend = ip + srcSize;
+ int const loadLdmDict = params->ldmParams.enableLdm == ZSTD_ps_enable && ls != NULL;
+
+ /* Assert that we the ms params match the params we're being given */
+ ZSTD_assertEqualCParams(params->cParams, ms->cParams);
+
+ if (srcSize > ZSTD_CHUNKSIZE_MAX) {
+ /* Allow the dictionary to set indices up to exactly ZSTD_CURRENT_MAX.
+ * Dictionaries right at the edge will immediately trigger overflow
+ * correction, but I don't want to insert extra constraints here.
+ */
+ U32 const maxDictSize = ZSTD_CURRENT_MAX - 1;
+ /* We must have cleared our windows when our source is this large. */
+ assert(ZSTD_window_isEmpty(ms->window));
+ if (loadLdmDict)
+ assert(ZSTD_window_isEmpty(ls->window));
+ /* If the dictionary is too large, only load the suffix of the dictionary. */
+ if (srcSize > maxDictSize) {
+ ip = iend - maxDictSize;
+ src = ip;
+ srcSize = maxDictSize;
+ }
+ }
+
+ DEBUGLOG(4, "ZSTD_loadDictionaryContent(): useRowMatchFinder=%d", (int)params->useRowMatchFinder);
+ ZSTD_window_update(&ms->window, src, srcSize, /* forceNonContiguous */ 0);
+ ms->loadedDictEnd = params->forceWindow ? 0 : (U32)(iend - ms->window.base);
+ ms->forceNonContiguous = params->deterministicRefPrefix;
+
+ if (loadLdmDict) {
+ ZSTD_window_update(&ls->window, src, srcSize, /* forceNonContiguous */ 0);
+ ls->loadedDictEnd = params->forceWindow ? 0 : (U32)(iend - ls->window.base);
+ }
+
+ if (srcSize <= HASH_READ_SIZE) return 0;
+
+ ZSTD_overflowCorrectIfNeeded(ms, ws, params, ip, iend);
+
+ if (loadLdmDict)
+ ZSTD_ldm_fillHashTable(ls, ip, iend, &params->ldmParams);
+
+ switch(params->cParams.strategy)
+ {
+ case ZSTD_fast:
+ ZSTD_fillHashTable(ms, iend, dtlm);
+ break;
+ case ZSTD_dfast:
+ ZSTD_fillDoubleHashTable(ms, iend, dtlm);
+ break;
+
+ case ZSTD_greedy:
+ case ZSTD_lazy:
+ case ZSTD_lazy2:
+ assert(srcSize >= HASH_READ_SIZE);
+ if (ms->dedicatedDictSearch) {
+ assert(ms->chainTable != NULL);
+ ZSTD_dedicatedDictSearch_lazy_loadDictionary(ms, iend-HASH_READ_SIZE);
+ } else {
+ assert(params->useRowMatchFinder != ZSTD_ps_auto);
+ if (params->useRowMatchFinder == ZSTD_ps_enable) {
+ size_t const tagTableSize = ((size_t)1 << params->cParams.hashLog) * sizeof(U16);
+ ZSTD_memset(ms->tagTable, 0, tagTableSize);
+ ZSTD_row_update(ms, iend-HASH_READ_SIZE);
+ DEBUGLOG(4, "Using row-based hash table for lazy dict");
+ } else {
+ ZSTD_insertAndFindFirstIndex(ms, iend-HASH_READ_SIZE);
+ DEBUGLOG(4, "Using chain-based hash table for lazy dict");
+ }
+ }
+ break;
+
+ case ZSTD_btlazy2: /* we want the dictionary table fully sorted */
+ case ZSTD_btopt:
+ case ZSTD_btultra:
+ case ZSTD_btultra2:
+ assert(srcSize >= HASH_READ_SIZE);
+ ZSTD_updateTree(ms, iend-HASH_READ_SIZE, iend);
+ break;
+
+ default:
+ assert(0); /* not possible : not a valid strategy id */
+ }
+
+ ms->nextToUpdate = (U32)(iend - ms->window.base);
+ return 0;
+}
+
+
+/* Dictionaries that assign zero probability to symbols that show up causes problems
+ * when FSE encoding. Mark dictionaries with zero probability symbols as FSE_repeat_check
+ * and only dictionaries with 100% valid symbols can be assumed valid.
+ */
+static FSE_repeat ZSTD_dictNCountRepeat(short* normalizedCounter, unsigned dictMaxSymbolValue, unsigned maxSymbolValue)
+{
+ U32 s;
+ if (dictMaxSymbolValue < maxSymbolValue) {
+ return FSE_repeat_check;
+ }
+ for (s = 0; s <= maxSymbolValue; ++s) {
+ if (normalizedCounter[s] == 0) {
+ return FSE_repeat_check;
+ }
+ }
+ return FSE_repeat_valid;
+}
+
+size_t ZSTD_loadCEntropy(ZSTD_compressedBlockState_t* bs, void* workspace,
+ const void* const dict, size_t dictSize)
+{
+ short offcodeNCount[MaxOff+1];
+ unsigned offcodeMaxValue = MaxOff;
+ const BYTE* dictPtr = (const BYTE*)dict; /* skip magic num and dict ID */
+ const BYTE* const dictEnd = dictPtr + dictSize;
+ dictPtr += 8;
+ bs->entropy.huf.repeatMode = HUF_repeat_check;
+
+ { unsigned maxSymbolValue = 255;
+ unsigned hasZeroWeights = 1;
+ size_t const hufHeaderSize = HUF_readCTable((HUF_CElt*)bs->entropy.huf.CTable, &maxSymbolValue, dictPtr,
+ dictEnd-dictPtr, &hasZeroWeights);
+
+ /* We only set the loaded table as valid if it contains all non-zero
+ * weights. Otherwise, we set it to check */
+ if (!hasZeroWeights)
+ bs->entropy.huf.repeatMode = HUF_repeat_valid;
+
+ RETURN_ERROR_IF(HUF_isError(hufHeaderSize), dictionary_corrupted, "");
+ RETURN_ERROR_IF(maxSymbolValue < 255, dictionary_corrupted, "");
+ dictPtr += hufHeaderSize;
+ }
+
+ { unsigned offcodeLog;
+ size_t const offcodeHeaderSize = FSE_readNCount(offcodeNCount, &offcodeMaxValue, &offcodeLog, dictPtr, dictEnd-dictPtr);
+ RETURN_ERROR_IF(FSE_isError(offcodeHeaderSize), dictionary_corrupted, "");
+ RETURN_ERROR_IF(offcodeLog > OffFSELog, dictionary_corrupted, "");
+ /* fill all offset symbols to avoid garbage at end of table */
+ RETURN_ERROR_IF(FSE_isError(FSE_buildCTable_wksp(
+ bs->entropy.fse.offcodeCTable,
+ offcodeNCount, MaxOff, offcodeLog,
+ workspace, HUF_WORKSPACE_SIZE)),
+ dictionary_corrupted, "");
+ /* Defer checking offcodeMaxValue because we need to know the size of the dictionary content */
+ dictPtr += offcodeHeaderSize;
+ }
+
+ { short matchlengthNCount[MaxML+1];
+ unsigned matchlengthMaxValue = MaxML, matchlengthLog;
+ size_t const matchlengthHeaderSize = FSE_readNCount(matchlengthNCount, &matchlengthMaxValue, &matchlengthLog, dictPtr, dictEnd-dictPtr);
+ RETURN_ERROR_IF(FSE_isError(matchlengthHeaderSize), dictionary_corrupted, "");
+ RETURN_ERROR_IF(matchlengthLog > MLFSELog, dictionary_corrupted, "");
+ RETURN_ERROR_IF(FSE_isError(FSE_buildCTable_wksp(
+ bs->entropy.fse.matchlengthCTable,
+ matchlengthNCount, matchlengthMaxValue, matchlengthLog,
+ workspace, HUF_WORKSPACE_SIZE)),
+ dictionary_corrupted, "");
+ bs->entropy.fse.matchlength_repeatMode = ZSTD_dictNCountRepeat(matchlengthNCount, matchlengthMaxValue, MaxML);
+ dictPtr += matchlengthHeaderSize;
+ }
+
+ { short litlengthNCount[MaxLL+1];
+ unsigned litlengthMaxValue = MaxLL, litlengthLog;
+ size_t const litlengthHeaderSize = FSE_readNCount(litlengthNCount, &litlengthMaxValue, &litlengthLog, dictPtr, dictEnd-dictPtr);
+ RETURN_ERROR_IF(FSE_isError(litlengthHeaderSize), dictionary_corrupted, "");
+ RETURN_ERROR_IF(litlengthLog > LLFSELog, dictionary_corrupted, "");
+ RETURN_ERROR_IF(FSE_isError(FSE_buildCTable_wksp(
+ bs->entropy.fse.litlengthCTable,
+ litlengthNCount, litlengthMaxValue, litlengthLog,
+ workspace, HUF_WORKSPACE_SIZE)),
+ dictionary_corrupted, "");
+ bs->entropy.fse.litlength_repeatMode = ZSTD_dictNCountRepeat(litlengthNCount, litlengthMaxValue, MaxLL);
+ dictPtr += litlengthHeaderSize;
+ }
+
+ RETURN_ERROR_IF(dictPtr+12 > dictEnd, dictionary_corrupted, "");
+ bs->rep[0] = MEM_readLE32(dictPtr+0);
+ bs->rep[1] = MEM_readLE32(dictPtr+4);
+ bs->rep[2] = MEM_readLE32(dictPtr+8);
+ dictPtr += 12;
+
+ { size_t const dictContentSize = (size_t)(dictEnd - dictPtr);
+ U32 offcodeMax = MaxOff;
+ if (dictContentSize <= ((U32)-1) - 128 KB) {
+ U32 const maxOffset = (U32)dictContentSize + 128 KB; /* The maximum offset that must be supported */
+ offcodeMax = ZSTD_highbit32(maxOffset); /* Calculate minimum offset code required to represent maxOffset */
+ }
+ /* All offset values <= dictContentSize + 128 KB must be representable for a valid table */
+ bs->entropy.fse.offcode_repeatMode = ZSTD_dictNCountRepeat(offcodeNCount, offcodeMaxValue, MIN(offcodeMax, MaxOff));
+
+ /* All repCodes must be <= dictContentSize and != 0 */
+ { U32 u;
+ for (u=0; u<3; u++) {
+ RETURN_ERROR_IF(bs->rep[u] == 0, dictionary_corrupted, "");
+ RETURN_ERROR_IF(bs->rep[u] > dictContentSize, dictionary_corrupted, "");
+ } } }
+
+ return dictPtr - (const BYTE*)dict;
+}
+
+/* Dictionary format :
+ * See :
+ * https://github.com/facebook/zstd/blob/release/doc/zstd_compression_format.md#dictionary-format
+ */
+/*! ZSTD_loadZstdDictionary() :
+ * @return : dictID, or an error code
+ * assumptions : magic number supposed already checked
+ * dictSize supposed >= 8
+ */
+static size_t ZSTD_loadZstdDictionary(ZSTD_compressedBlockState_t* bs,
+ ZSTD_matchState_t* ms,
+ ZSTD_cwksp* ws,
+ ZSTD_CCtx_params const* params,
+ const void* dict, size_t dictSize,
+ ZSTD_dictTableLoadMethod_e dtlm,
+ void* workspace)
+{
+ const BYTE* dictPtr = (const BYTE*)dict;
+ const BYTE* const dictEnd = dictPtr + dictSize;
+ size_t dictID;
+ size_t eSize;
+ ZSTD_STATIC_ASSERT(HUF_WORKSPACE_SIZE >= (1<<MAX(MLFSELog,LLFSELog)));
+ assert(dictSize >= 8);
+ assert(MEM_readLE32(dictPtr) == ZSTD_MAGIC_DICTIONARY);
+
+ dictID = params->fParams.noDictIDFlag ? 0 : MEM_readLE32(dictPtr + 4 /* skip magic number */ );
+ eSize = ZSTD_loadCEntropy(bs, workspace, dict, dictSize);
+ FORWARD_IF_ERROR(eSize, "ZSTD_loadCEntropy failed");
+ dictPtr += eSize;
+
+ {
+ size_t const dictContentSize = (size_t)(dictEnd - dictPtr);
+ FORWARD_IF_ERROR(ZSTD_loadDictionaryContent(
+ ms, NULL, ws, params, dictPtr, dictContentSize, dtlm), "");
+ }
+ return dictID;
+}
+
+/* ZSTD_compress_insertDictionary() :
+* @return : dictID, or an error code */
+static size_t
+ZSTD_compress_insertDictionary(ZSTD_compressedBlockState_t* bs,
+ ZSTD_matchState_t* ms,
+ ldmState_t* ls,
+ ZSTD_cwksp* ws,
+ const ZSTD_CCtx_params* params,
+ const void* dict, size_t dictSize,
+ ZSTD_dictContentType_e dictContentType,
+ ZSTD_dictTableLoadMethod_e dtlm,
+ void* workspace)
+{
+ DEBUGLOG(4, "ZSTD_compress_insertDictionary (dictSize=%u)", (U32)dictSize);
+ if ((dict==NULL) || (dictSize<8)) {
+ RETURN_ERROR_IF(dictContentType == ZSTD_dct_fullDict, dictionary_wrong, "");
+ return 0;
+ }
+
+ ZSTD_reset_compressedBlockState(bs);
+
+ /* dict restricted modes */
+ if (dictContentType == ZSTD_dct_rawContent)
+ return ZSTD_loadDictionaryContent(ms, ls, ws, params, dict, dictSize, dtlm);
+
+ if (MEM_readLE32(dict) != ZSTD_MAGIC_DICTIONARY) {
+ if (dictContentType == ZSTD_dct_auto) {
+ DEBUGLOG(4, "raw content dictionary detected");
+ return ZSTD_loadDictionaryContent(
+ ms, ls, ws, params, dict, dictSize, dtlm);
+ }
+ RETURN_ERROR_IF(dictContentType == ZSTD_dct_fullDict, dictionary_wrong, "");
+ assert(0); /* impossible */
+ }
+
+ /* dict as full zstd dictionary */
+ return ZSTD_loadZstdDictionary(
+ bs, ms, ws, params, dict, dictSize, dtlm, workspace);
+}
+
+#define ZSTD_USE_CDICT_PARAMS_SRCSIZE_CUTOFF (128 KB)
+#define ZSTD_USE_CDICT_PARAMS_DICTSIZE_MULTIPLIER (6ULL)
+
+/*! ZSTD_compressBegin_internal() :
+ * @return : 0, or an error code */
+static size_t ZSTD_compressBegin_internal(ZSTD_CCtx* cctx,
+ const void* dict, size_t dictSize,
+ ZSTD_dictContentType_e dictContentType,
+ ZSTD_dictTableLoadMethod_e dtlm,
+ const ZSTD_CDict* cdict,
+ const ZSTD_CCtx_params* params, U64 pledgedSrcSize,
+ ZSTD_buffered_policy_e zbuff)
+{
+ size_t const dictContentSize = cdict ? cdict->dictContentSize : dictSize;
+ DEBUGLOG(4, "ZSTD_compressBegin_internal: wlog=%u", params->cParams.windowLog);
+ /* params are supposed to be fully validated at this point */
+ assert(!ZSTD_isError(ZSTD_checkCParams(params->cParams)));
+ assert(!((dict) && (cdict))); /* either dict or cdict, not both */
+ if ( (cdict)
+ && (cdict->dictContentSize > 0)
+ && ( pledgedSrcSize < ZSTD_USE_CDICT_PARAMS_SRCSIZE_CUTOFF
+ || pledgedSrcSize < cdict->dictContentSize * ZSTD_USE_CDICT_PARAMS_DICTSIZE_MULTIPLIER
+ || pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN
+ || cdict->compressionLevel == 0)
+ && (params->attachDictPref != ZSTD_dictForceLoad) ) {
+ return ZSTD_resetCCtx_usingCDict(cctx, cdict, params, pledgedSrcSize, zbuff);
+ }
+
+ FORWARD_IF_ERROR( ZSTD_resetCCtx_internal(cctx, params, pledgedSrcSize,
+ dictContentSize,
+ ZSTDcrp_makeClean, zbuff) , "");
+ { size_t const dictID = cdict ?
+ ZSTD_compress_insertDictionary(
+ cctx->blockState.prevCBlock, &cctx->blockState.matchState,
+ &cctx->ldmState, &cctx->workspace, &cctx->appliedParams, cdict->dictContent,
+ cdict->dictContentSize, cdict->dictContentType, dtlm,
+ cctx->entropyWorkspace)
+ : ZSTD_compress_insertDictionary(
+ cctx->blockState.prevCBlock, &cctx->blockState.matchState,
+ &cctx->ldmState, &cctx->workspace, &cctx->appliedParams, dict, dictSize,
+ dictContentType, dtlm, cctx->entropyWorkspace);
+ FORWARD_IF_ERROR(dictID, "ZSTD_compress_insertDictionary failed");
+ assert(dictID <= UINT_MAX);
+ cctx->dictID = (U32)dictID;
+ cctx->dictContentSize = dictContentSize;
+ }
+ return 0;
+}
+
+size_t ZSTD_compressBegin_advanced_internal(ZSTD_CCtx* cctx,
+ const void* dict, size_t dictSize,
+ ZSTD_dictContentType_e dictContentType,
+ ZSTD_dictTableLoadMethod_e dtlm,
+ const ZSTD_CDict* cdict,
+ const ZSTD_CCtx_params* params,
+ unsigned long long pledgedSrcSize)
+{
+ DEBUGLOG(4, "ZSTD_compressBegin_advanced_internal: wlog=%u", params->cParams.windowLog);
+ /* compression parameters verification and optimization */
+ FORWARD_IF_ERROR( ZSTD_checkCParams(params->cParams) , "");
+ return ZSTD_compressBegin_internal(cctx,
+ dict, dictSize, dictContentType, dtlm,
+ cdict,
+ params, pledgedSrcSize,
+ ZSTDb_not_buffered);
+}
+
+/*! ZSTD_compressBegin_advanced() :
+* @return : 0, or an error code */
+size_t ZSTD_compressBegin_advanced(ZSTD_CCtx* cctx,
+ const void* dict, size_t dictSize,
+ ZSTD_parameters params, unsigned long long pledgedSrcSize)
+{
+ ZSTD_CCtx_params cctxParams;
+ ZSTD_CCtxParams_init_internal(&cctxParams, &params, ZSTD_NO_CLEVEL);
+ return ZSTD_compressBegin_advanced_internal(cctx,
+ dict, dictSize, ZSTD_dct_auto, ZSTD_dtlm_fast,
+ NULL /*cdict*/,
+ &cctxParams, pledgedSrcSize);
+}
+
+size_t ZSTD_compressBegin_usingDict(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, int compressionLevel)
+{
+ ZSTD_CCtx_params cctxParams;
+ {
+ ZSTD_parameters const params = ZSTD_getParams_internal(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, dictSize, ZSTD_cpm_noAttachDict);
+ ZSTD_CCtxParams_init_internal(&cctxParams, &params, (compressionLevel == 0) ? ZSTD_CLEVEL_DEFAULT : compressionLevel);
+ }
+ DEBUGLOG(4, "ZSTD_compressBegin_usingDict (dictSize=%u)", (unsigned)dictSize);
+ return ZSTD_compressBegin_internal(cctx, dict, dictSize, ZSTD_dct_auto, ZSTD_dtlm_fast, NULL,
+ &cctxParams, ZSTD_CONTENTSIZE_UNKNOWN, ZSTDb_not_buffered);
+}
+
+size_t ZSTD_compressBegin(ZSTD_CCtx* cctx, int compressionLevel)
+{
+ return ZSTD_compressBegin_usingDict(cctx, NULL, 0, compressionLevel);
+}
+
+
+/*! ZSTD_writeEpilogue() :
+* Ends a frame.
+* @return : nb of bytes written into dst (or an error code) */
+static size_t ZSTD_writeEpilogue(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity)
+{
+ BYTE* const ostart = (BYTE*)dst;
+ BYTE* op = ostart;
+ size_t fhSize = 0;
+
+ DEBUGLOG(4, "ZSTD_writeEpilogue");
+ RETURN_ERROR_IF(cctx->stage == ZSTDcs_created, stage_wrong, "init missing");
+
+ /* special case : empty frame */
+ if (cctx->stage == ZSTDcs_init) {
+ fhSize = ZSTD_writeFrameHeader(dst, dstCapacity, &cctx->appliedParams, 0, 0);
+ FORWARD_IF_ERROR(fhSize, "ZSTD_writeFrameHeader failed");
+ dstCapacity -= fhSize;
+ op += fhSize;
+ cctx->stage = ZSTDcs_ongoing;
+ }
+
+ if (cctx->stage != ZSTDcs_ending) {
+ /* write one last empty block, make it the "last" block */
+ U32 const cBlockHeader24 = 1 /* last block */ + (((U32)bt_raw)<<1) + 0;
+ RETURN_ERROR_IF(dstCapacity<4, dstSize_tooSmall, "no room for epilogue");
+ MEM_writeLE32(op, cBlockHeader24);
+ op += ZSTD_blockHeaderSize;
+ dstCapacity -= ZSTD_blockHeaderSize;
+ }
+
+ if (cctx->appliedParams.fParams.checksumFlag) {
+ U32 const checksum = (U32) xxh64_digest(&cctx->xxhState);
+ RETURN_ERROR_IF(dstCapacity<4, dstSize_tooSmall, "no room for checksum");
+ DEBUGLOG(4, "ZSTD_writeEpilogue: write checksum : %08X", (unsigned)checksum);
+ MEM_writeLE32(op, checksum);
+ op += 4;
+ }
+
+ cctx->stage = ZSTDcs_created; /* return to "created but no init" status */
+ return op-ostart;
+}
+
+void ZSTD_CCtx_trace(ZSTD_CCtx* cctx, size_t extraCSize)
+{
+ (void)cctx;
+ (void)extraCSize;
+}
+
+size_t ZSTD_compressEnd (ZSTD_CCtx* cctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize)
+{
+ size_t endResult;
+ size_t const cSize = ZSTD_compressContinue_internal(cctx,
+ dst, dstCapacity, src, srcSize,
+ 1 /* frame mode */, 1 /* last chunk */);
+ FORWARD_IF_ERROR(cSize, "ZSTD_compressContinue_internal failed");
+ endResult = ZSTD_writeEpilogue(cctx, (char*)dst + cSize, dstCapacity-cSize);
+ FORWARD_IF_ERROR(endResult, "ZSTD_writeEpilogue failed");
+ assert(!(cctx->appliedParams.fParams.contentSizeFlag && cctx->pledgedSrcSizePlusOne == 0));
+ if (cctx->pledgedSrcSizePlusOne != 0) { /* control src size */
+ ZSTD_STATIC_ASSERT(ZSTD_CONTENTSIZE_UNKNOWN == (unsigned long long)-1);
+ DEBUGLOG(4, "end of frame : controlling src size");
+ RETURN_ERROR_IF(
+ cctx->pledgedSrcSizePlusOne != cctx->consumedSrcSize+1,
+ srcSize_wrong,
+ "error : pledgedSrcSize = %u, while realSrcSize = %u",
+ (unsigned)cctx->pledgedSrcSizePlusOne-1,
+ (unsigned)cctx->consumedSrcSize);
+ }
+ ZSTD_CCtx_trace(cctx, endResult);
+ return cSize + endResult;
+}
+
+size_t ZSTD_compress_advanced (ZSTD_CCtx* cctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ const void* dict,size_t dictSize,
+ ZSTD_parameters params)
+{
+ DEBUGLOG(4, "ZSTD_compress_advanced");
+ FORWARD_IF_ERROR(ZSTD_checkCParams(params.cParams), "");
+ ZSTD_CCtxParams_init_internal(&cctx->simpleApiParams, &params, ZSTD_NO_CLEVEL);
+ return ZSTD_compress_advanced_internal(cctx,
+ dst, dstCapacity,
+ src, srcSize,
+ dict, dictSize,
+ &cctx->simpleApiParams);
+}
+
+/* Internal */
+size_t ZSTD_compress_advanced_internal(
+ ZSTD_CCtx* cctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ const void* dict,size_t dictSize,
+ const ZSTD_CCtx_params* params)
+{
+ DEBUGLOG(4, "ZSTD_compress_advanced_internal (srcSize:%u)", (unsigned)srcSize);
+ FORWARD_IF_ERROR( ZSTD_compressBegin_internal(cctx,
+ dict, dictSize, ZSTD_dct_auto, ZSTD_dtlm_fast, NULL,
+ params, srcSize, ZSTDb_not_buffered) , "");
+ return ZSTD_compressEnd(cctx, dst, dstCapacity, src, srcSize);
+}
+
+size_t ZSTD_compress_usingDict(ZSTD_CCtx* cctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ const void* dict, size_t dictSize,
+ int compressionLevel)
+{
+ {
+ ZSTD_parameters const params = ZSTD_getParams_internal(compressionLevel, srcSize, dict ? dictSize : 0, ZSTD_cpm_noAttachDict);
+ assert(params.fParams.contentSizeFlag == 1);
+ ZSTD_CCtxParams_init_internal(&cctx->simpleApiParams, &params, (compressionLevel == 0) ? ZSTD_CLEVEL_DEFAULT: compressionLevel);
+ }
+ DEBUGLOG(4, "ZSTD_compress_usingDict (srcSize=%u)", (unsigned)srcSize);
+ return ZSTD_compress_advanced_internal(cctx, dst, dstCapacity, src, srcSize, dict, dictSize, &cctx->simpleApiParams);
+}
+
+size_t ZSTD_compressCCtx(ZSTD_CCtx* cctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ int compressionLevel)
+{
+ DEBUGLOG(4, "ZSTD_compressCCtx (srcSize=%u)", (unsigned)srcSize);
+ assert(cctx != NULL);
+ return ZSTD_compress_usingDict(cctx, dst, dstCapacity, src, srcSize, NULL, 0, compressionLevel);
+}
+
+size_t ZSTD_compress(void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ int compressionLevel)
+{
+ size_t result;
+ ZSTD_CCtx* cctx = ZSTD_createCCtx();
+ RETURN_ERROR_IF(!cctx, memory_allocation, "ZSTD_createCCtx failed");
+ result = ZSTD_compressCCtx(cctx, dst, dstCapacity, src, srcSize, compressionLevel);
+ ZSTD_freeCCtx(cctx);
+ return result;
+}
+
+
+/* ===== Dictionary API ===== */
+
+/*! ZSTD_estimateCDictSize_advanced() :
+ * Estimate amount of memory that will be needed to create a dictionary with following arguments */
+size_t ZSTD_estimateCDictSize_advanced(
+ size_t dictSize, ZSTD_compressionParameters cParams,
+ ZSTD_dictLoadMethod_e dictLoadMethod)
+{
+ DEBUGLOG(5, "sizeof(ZSTD_CDict) : %u", (unsigned)sizeof(ZSTD_CDict));
+ return ZSTD_cwksp_alloc_size(sizeof(ZSTD_CDict))
+ + ZSTD_cwksp_alloc_size(HUF_WORKSPACE_SIZE)
+ /* enableDedicatedDictSearch == 1 ensures that CDict estimation will not be too small
+ * in case we are using DDS with row-hash. */
+ + ZSTD_sizeof_matchState(&cParams, ZSTD_resolveRowMatchFinderMode(ZSTD_ps_auto, &cParams),
+ /* enableDedicatedDictSearch */ 1, /* forCCtx */ 0)
+ + (dictLoadMethod == ZSTD_dlm_byRef ? 0
+ : ZSTD_cwksp_alloc_size(ZSTD_cwksp_align(dictSize, sizeof(void *))));
+}
+
+size_t ZSTD_estimateCDictSize(size_t dictSize, int compressionLevel)
+{
+ ZSTD_compressionParameters const cParams = ZSTD_getCParams_internal(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, dictSize, ZSTD_cpm_createCDict);
+ return ZSTD_estimateCDictSize_advanced(dictSize, cParams, ZSTD_dlm_byCopy);
+}
+
+size_t ZSTD_sizeof_CDict(const ZSTD_CDict* cdict)
+{
+ if (cdict==NULL) return 0; /* support sizeof on NULL */
+ DEBUGLOG(5, "sizeof(*cdict) : %u", (unsigned)sizeof(*cdict));
+ /* cdict may be in the workspace */
+ return (cdict->workspace.workspace == cdict ? 0 : sizeof(*cdict))
+ + ZSTD_cwksp_sizeof(&cdict->workspace);
+}
+
+static size_t ZSTD_initCDict_internal(
+ ZSTD_CDict* cdict,
+ const void* dictBuffer, size_t dictSize,
+ ZSTD_dictLoadMethod_e dictLoadMethod,
+ ZSTD_dictContentType_e dictContentType,
+ ZSTD_CCtx_params params)
+{
+ DEBUGLOG(3, "ZSTD_initCDict_internal (dictContentType:%u)", (unsigned)dictContentType);
+ assert(!ZSTD_checkCParams(params.cParams));
+ cdict->matchState.cParams = params.cParams;
+ cdict->matchState.dedicatedDictSearch = params.enableDedicatedDictSearch;
+ if ((dictLoadMethod == ZSTD_dlm_byRef) || (!dictBuffer) || (!dictSize)) {
+ cdict->dictContent = dictBuffer;
+ } else {
+ void *internalBuffer = ZSTD_cwksp_reserve_object(&cdict->workspace, ZSTD_cwksp_align(dictSize, sizeof(void*)));
+ RETURN_ERROR_IF(!internalBuffer, memory_allocation, "NULL pointer!");
+ cdict->dictContent = internalBuffer;
+ ZSTD_memcpy(internalBuffer, dictBuffer, dictSize);
+ }
+ cdict->dictContentSize = dictSize;
+ cdict->dictContentType = dictContentType;
+
+ cdict->entropyWorkspace = (U32*)ZSTD_cwksp_reserve_object(&cdict->workspace, HUF_WORKSPACE_SIZE);
+
+
+ /* Reset the state to no dictionary */
+ ZSTD_reset_compressedBlockState(&cdict->cBlockState);
+ FORWARD_IF_ERROR(ZSTD_reset_matchState(
+ &cdict->matchState,
+ &cdict->workspace,
+ &params.cParams,
+ params.useRowMatchFinder,
+ ZSTDcrp_makeClean,
+ ZSTDirp_reset,
+ ZSTD_resetTarget_CDict), "");
+ /* (Maybe) load the dictionary
+ * Skips loading the dictionary if it is < 8 bytes.
+ */
+ { params.compressionLevel = ZSTD_CLEVEL_DEFAULT;
+ params.fParams.contentSizeFlag = 1;
+ { size_t const dictID = ZSTD_compress_insertDictionary(
+ &cdict->cBlockState, &cdict->matchState, NULL, &cdict->workspace,
+ &params, cdict->dictContent, cdict->dictContentSize,
+ dictContentType, ZSTD_dtlm_full, cdict->entropyWorkspace);
+ FORWARD_IF_ERROR(dictID, "ZSTD_compress_insertDictionary failed");
+ assert(dictID <= (size_t)(U32)-1);
+ cdict->dictID = (U32)dictID;
+ }
+ }
+
+ return 0;
+}
+
+static ZSTD_CDict* ZSTD_createCDict_advanced_internal(size_t dictSize,
+ ZSTD_dictLoadMethod_e dictLoadMethod,
+ ZSTD_compressionParameters cParams,
+ ZSTD_paramSwitch_e useRowMatchFinder,
+ U32 enableDedicatedDictSearch,
+ ZSTD_customMem customMem)
+{
+ if ((!customMem.customAlloc) ^ (!customMem.customFree)) return NULL;
+
+ { size_t const workspaceSize =
+ ZSTD_cwksp_alloc_size(sizeof(ZSTD_CDict)) +
+ ZSTD_cwksp_alloc_size(HUF_WORKSPACE_SIZE) +
+ ZSTD_sizeof_matchState(&cParams, useRowMatchFinder, enableDedicatedDictSearch, /* forCCtx */ 0) +
+ (dictLoadMethod == ZSTD_dlm_byRef ? 0
+ : ZSTD_cwksp_alloc_size(ZSTD_cwksp_align(dictSize, sizeof(void*))));
+ void* const workspace = ZSTD_customMalloc(workspaceSize, customMem);
+ ZSTD_cwksp ws;
+ ZSTD_CDict* cdict;
+
+ if (!workspace) {
+ ZSTD_customFree(workspace, customMem);
+ return NULL;
+ }
+
+ ZSTD_cwksp_init(&ws, workspace, workspaceSize, ZSTD_cwksp_dynamic_alloc);
+
+ cdict = (ZSTD_CDict*)ZSTD_cwksp_reserve_object(&ws, sizeof(ZSTD_CDict));
+ assert(cdict != NULL);
+ ZSTD_cwksp_move(&cdict->workspace, &ws);
+ cdict->customMem = customMem;
+ cdict->compressionLevel = ZSTD_NO_CLEVEL; /* signals advanced API usage */
+ cdict->useRowMatchFinder = useRowMatchFinder;
+ return cdict;
+ }
+}
+
+ZSTD_CDict* ZSTD_createCDict_advanced(const void* dictBuffer, size_t dictSize,
+ ZSTD_dictLoadMethod_e dictLoadMethod,
+ ZSTD_dictContentType_e dictContentType,
+ ZSTD_compressionParameters cParams,
+ ZSTD_customMem customMem)
+{
+ ZSTD_CCtx_params cctxParams;
+ ZSTD_memset(&cctxParams, 0, sizeof(cctxParams));
+ ZSTD_CCtxParams_init(&cctxParams, 0);
+ cctxParams.cParams = cParams;
+ cctxParams.customMem = customMem;
+ return ZSTD_createCDict_advanced2(
+ dictBuffer, dictSize,
+ dictLoadMethod, dictContentType,
+ &cctxParams, customMem);
+}
+
+ZSTD_CDict* ZSTD_createCDict_advanced2(
+ const void* dict, size_t dictSize,
+ ZSTD_dictLoadMethod_e dictLoadMethod,
+ ZSTD_dictContentType_e dictContentType,
+ const ZSTD_CCtx_params* originalCctxParams,
+ ZSTD_customMem customMem)
+{
+ ZSTD_CCtx_params cctxParams = *originalCctxParams;
+ ZSTD_compressionParameters cParams;
+ ZSTD_CDict* cdict;
+
+ DEBUGLOG(3, "ZSTD_createCDict_advanced2, mode %u", (unsigned)dictContentType);
+ if (!customMem.customAlloc ^ !customMem.customFree) return NULL;
+
+ if (cctxParams.enableDedicatedDictSearch) {
+ cParams = ZSTD_dedicatedDictSearch_getCParams(
+ cctxParams.compressionLevel, dictSize);
+ ZSTD_overrideCParams(&cParams, &cctxParams.cParams);
+ } else {
+ cParams = ZSTD_getCParamsFromCCtxParams(
+ &cctxParams, ZSTD_CONTENTSIZE_UNKNOWN, dictSize, ZSTD_cpm_createCDict);
+ }
+
+ if (!ZSTD_dedicatedDictSearch_isSupported(&cParams)) {
+ /* Fall back to non-DDSS params */
+ cctxParams.enableDedicatedDictSearch = 0;
+ cParams = ZSTD_getCParamsFromCCtxParams(
+ &cctxParams, ZSTD_CONTENTSIZE_UNKNOWN, dictSize, ZSTD_cpm_createCDict);
+ }
+
+ DEBUGLOG(3, "ZSTD_createCDict_advanced2: DDS: %u", cctxParams.enableDedicatedDictSearch);
+ cctxParams.cParams = cParams;
+ cctxParams.useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(cctxParams.useRowMatchFinder, &cParams);
+
+ cdict = ZSTD_createCDict_advanced_internal(dictSize,
+ dictLoadMethod, cctxParams.cParams,
+ cctxParams.useRowMatchFinder, cctxParams.enableDedicatedDictSearch,
+ customMem);
+
+ if (ZSTD_isError( ZSTD_initCDict_internal(cdict,
+ dict, dictSize,
+ dictLoadMethod, dictContentType,
+ cctxParams) )) {
+ ZSTD_freeCDict(cdict);
+ return NULL;
+ }
+
+ return cdict;
+}
+
+ZSTD_CDict* ZSTD_createCDict(const void* dict, size_t dictSize, int compressionLevel)
+{
+ ZSTD_compressionParameters cParams = ZSTD_getCParams_internal(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, dictSize, ZSTD_cpm_createCDict);
+ ZSTD_CDict* const cdict = ZSTD_createCDict_advanced(dict, dictSize,
+ ZSTD_dlm_byCopy, ZSTD_dct_auto,
+ cParams, ZSTD_defaultCMem);
+ if (cdict)
+ cdict->compressionLevel = (compressionLevel == 0) ? ZSTD_CLEVEL_DEFAULT : compressionLevel;
+ return cdict;
+}
+
+ZSTD_CDict* ZSTD_createCDict_byReference(const void* dict, size_t dictSize, int compressionLevel)
+{
+ ZSTD_compressionParameters cParams = ZSTD_getCParams_internal(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, dictSize, ZSTD_cpm_createCDict);
+ ZSTD_CDict* const cdict = ZSTD_createCDict_advanced(dict, dictSize,
+ ZSTD_dlm_byRef, ZSTD_dct_auto,
+ cParams, ZSTD_defaultCMem);
+ if (cdict)
+ cdict->compressionLevel = (compressionLevel == 0) ? ZSTD_CLEVEL_DEFAULT : compressionLevel;
+ return cdict;
+}
+
+size_t ZSTD_freeCDict(ZSTD_CDict* cdict)
+{
+ if (cdict==NULL) return 0; /* support free on NULL */
+ { ZSTD_customMem const cMem = cdict->customMem;
+ int cdictInWorkspace = ZSTD_cwksp_owns_buffer(&cdict->workspace, cdict);
+ ZSTD_cwksp_free(&cdict->workspace, cMem);
+ if (!cdictInWorkspace) {
+ ZSTD_customFree(cdict, cMem);
+ }
+ return 0;
+ }
+}
+
+/*! ZSTD_initStaticCDict_advanced() :
+ * Generate a digested dictionary in provided memory area.
+ * workspace: The memory area to emplace the dictionary into.
+ * Provided pointer must 8-bytes aligned.
+ * It must outlive dictionary usage.
+ * workspaceSize: Use ZSTD_estimateCDictSize()
+ * to determine how large workspace must be.
+ * cParams : use ZSTD_getCParams() to transform a compression level
+ * into its relevants cParams.
+ * @return : pointer to ZSTD_CDict*, or NULL if error (size too small)
+ * Note : there is no corresponding "free" function.
+ * Since workspace was allocated externally, it must be freed externally.
+ */
+const ZSTD_CDict* ZSTD_initStaticCDict(
+ void* workspace, size_t workspaceSize,
+ const void* dict, size_t dictSize,
+ ZSTD_dictLoadMethod_e dictLoadMethod,
+ ZSTD_dictContentType_e dictContentType,
+ ZSTD_compressionParameters cParams)
+{
+ ZSTD_paramSwitch_e const useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(ZSTD_ps_auto, &cParams);
+ /* enableDedicatedDictSearch == 1 ensures matchstate is not too small in case this CDict will be used for DDS + row hash */
+ size_t const matchStateSize = ZSTD_sizeof_matchState(&cParams, useRowMatchFinder, /* enableDedicatedDictSearch */ 1, /* forCCtx */ 0);
+ size_t const neededSize = ZSTD_cwksp_alloc_size(sizeof(ZSTD_CDict))
+ + (dictLoadMethod == ZSTD_dlm_byRef ? 0
+ : ZSTD_cwksp_alloc_size(ZSTD_cwksp_align(dictSize, sizeof(void*))))
+ + ZSTD_cwksp_alloc_size(HUF_WORKSPACE_SIZE)
+ + matchStateSize;
+ ZSTD_CDict* cdict;
+ ZSTD_CCtx_params params;
+
+ if ((size_t)workspace & 7) return NULL; /* 8-aligned */
+
+ {
+ ZSTD_cwksp ws;
+ ZSTD_cwksp_init(&ws, workspace, workspaceSize, ZSTD_cwksp_static_alloc);
+ cdict = (ZSTD_CDict*)ZSTD_cwksp_reserve_object(&ws, sizeof(ZSTD_CDict));
+ if (cdict == NULL) return NULL;
+ ZSTD_cwksp_move(&cdict->workspace, &ws);
+ }
+
+ DEBUGLOG(4, "(workspaceSize < neededSize) : (%u < %u) => %u",
+ (unsigned)workspaceSize, (unsigned)neededSize, (unsigned)(workspaceSize < neededSize));
+ if (workspaceSize < neededSize) return NULL;
+
+ ZSTD_CCtxParams_init(&params, 0);
+ params.cParams = cParams;
+ params.useRowMatchFinder = useRowMatchFinder;
+ cdict->useRowMatchFinder = useRowMatchFinder;
+
+ if (ZSTD_isError( ZSTD_initCDict_internal(cdict,
+ dict, dictSize,
+ dictLoadMethod, dictContentType,
+ params) ))
+ return NULL;
+
+ return cdict;
+}
+
+ZSTD_compressionParameters ZSTD_getCParamsFromCDict(const ZSTD_CDict* cdict)
+{
+ assert(cdict != NULL);
+ return cdict->matchState.cParams;
+}
+
+/*! ZSTD_getDictID_fromCDict() :
+ * Provides the dictID of the dictionary loaded into `cdict`.
+ * If @return == 0, the dictionary is not conformant to Zstandard specification, or empty.
+ * Non-conformant dictionaries can still be loaded, but as content-only dictionaries. */
+unsigned ZSTD_getDictID_fromCDict(const ZSTD_CDict* cdict)
+{
+ if (cdict==NULL) return 0;
+ return cdict->dictID;
+}
+
+/* ZSTD_compressBegin_usingCDict_internal() :
+ * Implementation of various ZSTD_compressBegin_usingCDict* functions.
+ */
+static size_t ZSTD_compressBegin_usingCDict_internal(
+ ZSTD_CCtx* const cctx, const ZSTD_CDict* const cdict,
+ ZSTD_frameParameters const fParams, unsigned long long const pledgedSrcSize)
+{
+ ZSTD_CCtx_params cctxParams;
+ DEBUGLOG(4, "ZSTD_compressBegin_usingCDict_internal");
+ RETURN_ERROR_IF(cdict==NULL, dictionary_wrong, "NULL pointer!");
+ /* Initialize the cctxParams from the cdict */
+ {
+ ZSTD_parameters params;
+ params.fParams = fParams;
+ params.cParams = ( pledgedSrcSize < ZSTD_USE_CDICT_PARAMS_SRCSIZE_CUTOFF
+ || pledgedSrcSize < cdict->dictContentSize * ZSTD_USE_CDICT_PARAMS_DICTSIZE_MULTIPLIER
+ || pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN
+ || cdict->compressionLevel == 0 ) ?
+ ZSTD_getCParamsFromCDict(cdict)
+ : ZSTD_getCParams(cdict->compressionLevel,
+ pledgedSrcSize,
+ cdict->dictContentSize);
+ ZSTD_CCtxParams_init_internal(&cctxParams, &params, cdict->compressionLevel);
+ }
+ /* Increase window log to fit the entire dictionary and source if the
+ * source size is known. Limit the increase to 19, which is the
+ * window log for compression level 1 with the largest source size.
+ */
+ if (pledgedSrcSize != ZSTD_CONTENTSIZE_UNKNOWN) {
+ U32 const limitedSrcSize = (U32)MIN(pledgedSrcSize, 1U << 19);
+ U32 const limitedSrcLog = limitedSrcSize > 1 ? ZSTD_highbit32(limitedSrcSize - 1) + 1 : 1;
+ cctxParams.cParams.windowLog = MAX(cctxParams.cParams.windowLog, limitedSrcLog);
+ }
+ return ZSTD_compressBegin_internal(cctx,
+ NULL, 0, ZSTD_dct_auto, ZSTD_dtlm_fast,
+ cdict,
+ &cctxParams, pledgedSrcSize,
+ ZSTDb_not_buffered);
+}
+
+
+/* ZSTD_compressBegin_usingCDict_advanced() :
+ * This function is DEPRECATED.
+ * cdict must be != NULL */
+size_t ZSTD_compressBegin_usingCDict_advanced(
+ ZSTD_CCtx* const cctx, const ZSTD_CDict* const cdict,
+ ZSTD_frameParameters const fParams, unsigned long long const pledgedSrcSize)
+{
+ return ZSTD_compressBegin_usingCDict_internal(cctx, cdict, fParams, pledgedSrcSize);
+}
+
+/* ZSTD_compressBegin_usingCDict() :
+ * cdict must be != NULL */
+size_t ZSTD_compressBegin_usingCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict)
+{
+ ZSTD_frameParameters const fParams = { 0 /*content*/, 0 /*checksum*/, 0 /*noDictID*/ };
+ return ZSTD_compressBegin_usingCDict_internal(cctx, cdict, fParams, ZSTD_CONTENTSIZE_UNKNOWN);
+}
+
+/*! ZSTD_compress_usingCDict_internal():
+ * Implementation of various ZSTD_compress_usingCDict* functions.
+ */
+static size_t ZSTD_compress_usingCDict_internal(ZSTD_CCtx* cctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ const ZSTD_CDict* cdict, ZSTD_frameParameters fParams)
+{
+ FORWARD_IF_ERROR(ZSTD_compressBegin_usingCDict_internal(cctx, cdict, fParams, srcSize), ""); /* will check if cdict != NULL */
+ return ZSTD_compressEnd(cctx, dst, dstCapacity, src, srcSize);
+}
+
+/*! ZSTD_compress_usingCDict_advanced():
+ * This function is DEPRECATED.
+ */
+size_t ZSTD_compress_usingCDict_advanced(ZSTD_CCtx* cctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ const ZSTD_CDict* cdict, ZSTD_frameParameters fParams)
+{
+ return ZSTD_compress_usingCDict_internal(cctx, dst, dstCapacity, src, srcSize, cdict, fParams);
+}
+
+/*! ZSTD_compress_usingCDict() :
+ * Compression using a digested Dictionary.
+ * Faster startup than ZSTD_compress_usingDict(), recommended when same dictionary is used multiple times.
+ * Note that compression parameters are decided at CDict creation time
+ * while frame parameters are hardcoded */
+size_t ZSTD_compress_usingCDict(ZSTD_CCtx* cctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ const ZSTD_CDict* cdict)
+{
+ ZSTD_frameParameters const fParams = { 1 /*content*/, 0 /*checksum*/, 0 /*noDictID*/ };
+ return ZSTD_compress_usingCDict_internal(cctx, dst, dstCapacity, src, srcSize, cdict, fParams);
+}
+
+
+
+/* ******************************************************************
+* Streaming
+********************************************************************/
+
+ZSTD_CStream* ZSTD_createCStream(void)
+{
+ DEBUGLOG(3, "ZSTD_createCStream");
+ return ZSTD_createCStream_advanced(ZSTD_defaultCMem);
+}
+
+ZSTD_CStream* ZSTD_initStaticCStream(void *workspace, size_t workspaceSize)
+{
+ return ZSTD_initStaticCCtx(workspace, workspaceSize);
+}
+
+ZSTD_CStream* ZSTD_createCStream_advanced(ZSTD_customMem customMem)
+{ /* CStream and CCtx are now same object */
+ return ZSTD_createCCtx_advanced(customMem);
+}
+
+size_t ZSTD_freeCStream(ZSTD_CStream* zcs)
+{
+ return ZSTD_freeCCtx(zcs); /* same object */
+}
+
+
+
+/*====== Initialization ======*/
+
+size_t ZSTD_CStreamInSize(void) { return ZSTD_BLOCKSIZE_MAX; }
+
+size_t ZSTD_CStreamOutSize(void)
+{
+ return ZSTD_compressBound(ZSTD_BLOCKSIZE_MAX) + ZSTD_blockHeaderSize + 4 /* 32-bits hash */ ;
+}
+
+static ZSTD_cParamMode_e ZSTD_getCParamMode(ZSTD_CDict const* cdict, ZSTD_CCtx_params const* params, U64 pledgedSrcSize)
+{
+ if (cdict != NULL && ZSTD_shouldAttachDict(cdict, params, pledgedSrcSize))
+ return ZSTD_cpm_attachDict;
+ else
+ return ZSTD_cpm_noAttachDict;
+}
+
+/* ZSTD_resetCStream():
+ * pledgedSrcSize == 0 means "unknown" */
+size_t ZSTD_resetCStream(ZSTD_CStream* zcs, unsigned long long pss)
+{
+ /* temporary : 0 interpreted as "unknown" during transition period.
+ * Users willing to specify "unknown" **must** use ZSTD_CONTENTSIZE_UNKNOWN.
+ * 0 will be interpreted as "empty" in the future.
+ */
+ U64 const pledgedSrcSize = (pss==0) ? ZSTD_CONTENTSIZE_UNKNOWN : pss;
+ DEBUGLOG(4, "ZSTD_resetCStream: pledgedSrcSize = %u", (unsigned)pledgedSrcSize);
+ FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , "");
+ FORWARD_IF_ERROR( ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize) , "");
+ return 0;
+}
+
+/*! ZSTD_initCStream_internal() :
+ * Note : for lib/compress only. Used by zstdmt_compress.c.
+ * Assumption 1 : params are valid
+ * Assumption 2 : either dict, or cdict, is defined, not both */
+size_t ZSTD_initCStream_internal(ZSTD_CStream* zcs,
+ const void* dict, size_t dictSize, const ZSTD_CDict* cdict,
+ const ZSTD_CCtx_params* params,
+ unsigned long long pledgedSrcSize)
+{
+ DEBUGLOG(4, "ZSTD_initCStream_internal");
+ FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , "");
+ FORWARD_IF_ERROR( ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize) , "");
+ assert(!ZSTD_isError(ZSTD_checkCParams(params->cParams)));
+ zcs->requestedParams = *params;
+ assert(!((dict) && (cdict))); /* either dict or cdict, not both */
+ if (dict) {
+ FORWARD_IF_ERROR( ZSTD_CCtx_loadDictionary(zcs, dict, dictSize) , "");
+ } else {
+ /* Dictionary is cleared if !cdict */
+ FORWARD_IF_ERROR( ZSTD_CCtx_refCDict(zcs, cdict) , "");
+ }
+ return 0;
+}
+
+/* ZSTD_initCStream_usingCDict_advanced() :
+ * same as ZSTD_initCStream_usingCDict(), with control over frame parameters */
+size_t ZSTD_initCStream_usingCDict_advanced(ZSTD_CStream* zcs,
+ const ZSTD_CDict* cdict,
+ ZSTD_frameParameters fParams,
+ unsigned long long pledgedSrcSize)
+{
+ DEBUGLOG(4, "ZSTD_initCStream_usingCDict_advanced");
+ FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , "");
+ FORWARD_IF_ERROR( ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize) , "");
+ zcs->requestedParams.fParams = fParams;
+ FORWARD_IF_ERROR( ZSTD_CCtx_refCDict(zcs, cdict) , "");
+ return 0;
+}
+
+/* note : cdict must outlive compression session */
+size_t ZSTD_initCStream_usingCDict(ZSTD_CStream* zcs, const ZSTD_CDict* cdict)
+{
+ DEBUGLOG(4, "ZSTD_initCStream_usingCDict");
+ FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , "");
+ FORWARD_IF_ERROR( ZSTD_CCtx_refCDict(zcs, cdict) , "");
+ return 0;
+}
+
+
+/* ZSTD_initCStream_advanced() :
+ * pledgedSrcSize must be exact.
+ * if srcSize is not known at init time, use value ZSTD_CONTENTSIZE_UNKNOWN.
+ * dict is loaded with default parameters ZSTD_dct_auto and ZSTD_dlm_byCopy. */
+size_t ZSTD_initCStream_advanced(ZSTD_CStream* zcs,
+ const void* dict, size_t dictSize,
+ ZSTD_parameters params, unsigned long long pss)
+{
+ /* for compatibility with older programs relying on this behavior.
+ * Users should now specify ZSTD_CONTENTSIZE_UNKNOWN.
+ * This line will be removed in the future.
+ */
+ U64 const pledgedSrcSize = (pss==0 && params.fParams.contentSizeFlag==0) ? ZSTD_CONTENTSIZE_UNKNOWN : pss;
+ DEBUGLOG(4, "ZSTD_initCStream_advanced");
+ FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , "");
+ FORWARD_IF_ERROR( ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize) , "");
+ FORWARD_IF_ERROR( ZSTD_checkCParams(params.cParams) , "");
+ ZSTD_CCtxParams_setZstdParams(&zcs->requestedParams, &params);
+ FORWARD_IF_ERROR( ZSTD_CCtx_loadDictionary(zcs, dict, dictSize) , "");
+ return 0;
+}
+
+size_t ZSTD_initCStream_usingDict(ZSTD_CStream* zcs, const void* dict, size_t dictSize, int compressionLevel)
+{
+ DEBUGLOG(4, "ZSTD_initCStream_usingDict");
+ FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , "");
+ FORWARD_IF_ERROR( ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel) , "");
+ FORWARD_IF_ERROR( ZSTD_CCtx_loadDictionary(zcs, dict, dictSize) , "");
+ return 0;
+}
+
+size_t ZSTD_initCStream_srcSize(ZSTD_CStream* zcs, int compressionLevel, unsigned long long pss)
+{
+ /* temporary : 0 interpreted as "unknown" during transition period.
+ * Users willing to specify "unknown" **must** use ZSTD_CONTENTSIZE_UNKNOWN.
+ * 0 will be interpreted as "empty" in the future.
+ */
+ U64 const pledgedSrcSize = (pss==0) ? ZSTD_CONTENTSIZE_UNKNOWN : pss;
+ DEBUGLOG(4, "ZSTD_initCStream_srcSize");
+ FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , "");
+ FORWARD_IF_ERROR( ZSTD_CCtx_refCDict(zcs, NULL) , "");
+ FORWARD_IF_ERROR( ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel) , "");
+ FORWARD_IF_ERROR( ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize) , "");
+ return 0;
+}
+
+size_t ZSTD_initCStream(ZSTD_CStream* zcs, int compressionLevel)
+{
+ DEBUGLOG(4, "ZSTD_initCStream");
+ FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , "");
+ FORWARD_IF_ERROR( ZSTD_CCtx_refCDict(zcs, NULL) , "");
+ FORWARD_IF_ERROR( ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel) , "");
+ return 0;
+}
+
+/*====== Compression ======*/
+
+static size_t ZSTD_nextInputSizeHint(const ZSTD_CCtx* cctx)
+{
+ size_t hintInSize = cctx->inBuffTarget - cctx->inBuffPos;
+ if (hintInSize==0) hintInSize = cctx->blockSize;
+ return hintInSize;
+}
+
+/* ZSTD_compressStream_generic():
+ * internal function for all *compressStream*() variants
+ * non-static, because can be called from zstdmt_compress.c
+ * @return : hint size for next input */
+static size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs,
+ ZSTD_outBuffer* output,
+ ZSTD_inBuffer* input,
+ ZSTD_EndDirective const flushMode)
+{
+ const char* const istart = (const char*)input->src;
+ const char* const iend = input->size != 0 ? istart + input->size : istart;
+ const char* ip = input->pos != 0 ? istart + input->pos : istart;
+ char* const ostart = (char*)output->dst;
+ char* const oend = output->size != 0 ? ostart + output->size : ostart;
+ char* op = output->pos != 0 ? ostart + output->pos : ostart;
+ U32 someMoreWork = 1;
+
+ /* check expectations */
+ DEBUGLOG(5, "ZSTD_compressStream_generic, flush=%u", (unsigned)flushMode);
+ if (zcs->appliedParams.inBufferMode == ZSTD_bm_buffered) {
+ assert(zcs->inBuff != NULL);
+ assert(zcs->inBuffSize > 0);
+ }
+ if (zcs->appliedParams.outBufferMode == ZSTD_bm_buffered) {
+ assert(zcs->outBuff != NULL);
+ assert(zcs->outBuffSize > 0);
+ }
+ assert(output->pos <= output->size);
+ assert(input->pos <= input->size);
+ assert((U32)flushMode <= (U32)ZSTD_e_end);
+
+ while (someMoreWork) {
+ switch(zcs->streamStage)
+ {
+ case zcss_init:
+ RETURN_ERROR(init_missing, "call ZSTD_initCStream() first!");
+
+ case zcss_load:
+ if ( (flushMode == ZSTD_e_end)
+ && ( (size_t)(oend-op) >= ZSTD_compressBound(iend-ip) /* Enough output space */
+ || zcs->appliedParams.outBufferMode == ZSTD_bm_stable) /* OR we are allowed to return dstSizeTooSmall */
+ && (zcs->inBuffPos == 0) ) {
+ /* shortcut to compression pass directly into output buffer */
+ size_t const cSize = ZSTD_compressEnd(zcs,
+ op, oend-op, ip, iend-ip);
+ DEBUGLOG(4, "ZSTD_compressEnd : cSize=%u", (unsigned)cSize);
+ FORWARD_IF_ERROR(cSize, "ZSTD_compressEnd failed");
+ ip = iend;
+ op += cSize;
+ zcs->frameEnded = 1;
+ ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
+ someMoreWork = 0; break;
+ }
+ /* complete loading into inBuffer in buffered mode */
+ if (zcs->appliedParams.inBufferMode == ZSTD_bm_buffered) {
+ size_t const toLoad = zcs->inBuffTarget - zcs->inBuffPos;
+ size_t const loaded = ZSTD_limitCopy(
+ zcs->inBuff + zcs->inBuffPos, toLoad,
+ ip, iend-ip);
+ zcs->inBuffPos += loaded;
+ if (loaded != 0)
+ ip += loaded;
+ if ( (flushMode == ZSTD_e_continue)
+ && (zcs->inBuffPos < zcs->inBuffTarget) ) {
+ /* not enough input to fill full block : stop here */
+ someMoreWork = 0; break;
+ }
+ if ( (flushMode == ZSTD_e_flush)
+ && (zcs->inBuffPos == zcs->inToCompress) ) {
+ /* empty */
+ someMoreWork = 0; break;
+ }
+ }
+ /* compress current block (note : this stage cannot be stopped in the middle) */
+ DEBUGLOG(5, "stream compression stage (flushMode==%u)", flushMode);
+ { int const inputBuffered = (zcs->appliedParams.inBufferMode == ZSTD_bm_buffered);
+ void* cDst;
+ size_t cSize;
+ size_t oSize = oend-op;
+ size_t const iSize = inputBuffered
+ ? zcs->inBuffPos - zcs->inToCompress
+ : MIN((size_t)(iend - ip), zcs->blockSize);
+ if (oSize >= ZSTD_compressBound(iSize) || zcs->appliedParams.outBufferMode == ZSTD_bm_stable)
+ cDst = op; /* compress into output buffer, to skip flush stage */
+ else
+ cDst = zcs->outBuff, oSize = zcs->outBuffSize;
+ if (inputBuffered) {
+ unsigned const lastBlock = (flushMode == ZSTD_e_end) && (ip==iend);
+ cSize = lastBlock ?
+ ZSTD_compressEnd(zcs, cDst, oSize,
+ zcs->inBuff + zcs->inToCompress, iSize) :
+ ZSTD_compressContinue(zcs, cDst, oSize,
+ zcs->inBuff + zcs->inToCompress, iSize);
+ FORWARD_IF_ERROR(cSize, "%s", lastBlock ? "ZSTD_compressEnd failed" : "ZSTD_compressContinue failed");
+ zcs->frameEnded = lastBlock;
+ /* prepare next block */
+ zcs->inBuffTarget = zcs->inBuffPos + zcs->blockSize;
+ if (zcs->inBuffTarget > zcs->inBuffSize)
+ zcs->inBuffPos = 0, zcs->inBuffTarget = zcs->blockSize;
+ DEBUGLOG(5, "inBuffTarget:%u / inBuffSize:%u",
+ (unsigned)zcs->inBuffTarget, (unsigned)zcs->inBuffSize);
+ if (!lastBlock)
+ assert(zcs->inBuffTarget <= zcs->inBuffSize);
+ zcs->inToCompress = zcs->inBuffPos;
+ } else {
+ unsigned const lastBlock = (ip + iSize == iend);
+ assert(flushMode == ZSTD_e_end /* Already validated */);
+ cSize = lastBlock ?
+ ZSTD_compressEnd(zcs, cDst, oSize, ip, iSize) :
+ ZSTD_compressContinue(zcs, cDst, oSize, ip, iSize);
+ /* Consume the input prior to error checking to mirror buffered mode. */
+ if (iSize > 0)
+ ip += iSize;
+ FORWARD_IF_ERROR(cSize, "%s", lastBlock ? "ZSTD_compressEnd failed" : "ZSTD_compressContinue failed");
+ zcs->frameEnded = lastBlock;
+ if (lastBlock)
+ assert(ip == iend);
+ }
+ if (cDst == op) { /* no need to flush */
+ op += cSize;
+ if (zcs->frameEnded) {
+ DEBUGLOG(5, "Frame completed directly in outBuffer");
+ someMoreWork = 0;
+ ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
+ }
+ break;
+ }
+ zcs->outBuffContentSize = cSize;
+ zcs->outBuffFlushedSize = 0;
+ zcs->streamStage = zcss_flush; /* pass-through to flush stage */
+ }
+ ZSTD_FALLTHROUGH;
+ case zcss_flush:
+ DEBUGLOG(5, "flush stage");
+ assert(zcs->appliedParams.outBufferMode == ZSTD_bm_buffered);
+ { size_t const toFlush = zcs->outBuffContentSize - zcs->outBuffFlushedSize;
+ size_t const flushed = ZSTD_limitCopy(op, (size_t)(oend-op),
+ zcs->outBuff + zcs->outBuffFlushedSize, toFlush);
+ DEBUGLOG(5, "toFlush: %u into %u ==> flushed: %u",
+ (unsigned)toFlush, (unsigned)(oend-op), (unsigned)flushed);
+ if (flushed)
+ op += flushed;
+ zcs->outBuffFlushedSize += flushed;
+ if (toFlush!=flushed) {
+ /* flush not fully completed, presumably because dst is too small */
+ assert(op==oend);
+ someMoreWork = 0;
+ break;
+ }
+ zcs->outBuffContentSize = zcs->outBuffFlushedSize = 0;
+ if (zcs->frameEnded) {
+ DEBUGLOG(5, "Frame completed on flush");
+ someMoreWork = 0;
+ ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
+ break;
+ }
+ zcs->streamStage = zcss_load;
+ break;
+ }
+
+ default: /* impossible */
+ assert(0);
+ }
+ }
+
+ input->pos = ip - istart;
+ output->pos = op - ostart;
+ if (zcs->frameEnded) return 0;
+ return ZSTD_nextInputSizeHint(zcs);
+}
+
+static size_t ZSTD_nextInputSizeHint_MTorST(const ZSTD_CCtx* cctx)
+{
+ return ZSTD_nextInputSizeHint(cctx);
+
+}
+
+size_t ZSTD_compressStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output, ZSTD_inBuffer* input)
+{
+ FORWARD_IF_ERROR( ZSTD_compressStream2(zcs, output, input, ZSTD_e_continue) , "");
+ return ZSTD_nextInputSizeHint_MTorST(zcs);
+}
+
+/* After a compression call set the expected input/output buffer.
+ * This is validated at the start of the next compression call.
+ */
+static void ZSTD_setBufferExpectations(ZSTD_CCtx* cctx, ZSTD_outBuffer const* output, ZSTD_inBuffer const* input)
+{
+ if (cctx->appliedParams.inBufferMode == ZSTD_bm_stable) {
+ cctx->expectedInBuffer = *input;
+ }
+ if (cctx->appliedParams.outBufferMode == ZSTD_bm_stable) {
+ cctx->expectedOutBufferSize = output->size - output->pos;
+ }
+}
+
+/* Validate that the input/output buffers match the expectations set by
+ * ZSTD_setBufferExpectations.
+ */
+static size_t ZSTD_checkBufferStability(ZSTD_CCtx const* cctx,
+ ZSTD_outBuffer const* output,
+ ZSTD_inBuffer const* input,
+ ZSTD_EndDirective endOp)
+{
+ if (cctx->appliedParams.inBufferMode == ZSTD_bm_stable) {
+ ZSTD_inBuffer const expect = cctx->expectedInBuffer;
+ if (expect.src != input->src || expect.pos != input->pos || expect.size != input->size)
+ RETURN_ERROR(srcBuffer_wrong, "ZSTD_c_stableInBuffer enabled but input differs!");
+ if (endOp != ZSTD_e_end)
+ RETURN_ERROR(srcBuffer_wrong, "ZSTD_c_stableInBuffer can only be used with ZSTD_e_end!");
+ }
+ if (cctx->appliedParams.outBufferMode == ZSTD_bm_stable) {
+ size_t const outBufferSize = output->size - output->pos;
+ if (cctx->expectedOutBufferSize != outBufferSize)
+ RETURN_ERROR(dstBuffer_wrong, "ZSTD_c_stableOutBuffer enabled but output size differs!");
+ }
+ return 0;
+}
+
+static size_t ZSTD_CCtx_init_compressStream2(ZSTD_CCtx* cctx,
+ ZSTD_EndDirective endOp,
+ size_t inSize) {
+ ZSTD_CCtx_params params = cctx->requestedParams;
+ ZSTD_prefixDict const prefixDict = cctx->prefixDict;
+ FORWARD_IF_ERROR( ZSTD_initLocalDict(cctx) , ""); /* Init the local dict if present. */
+ ZSTD_memset(&cctx->prefixDict, 0, sizeof(cctx->prefixDict)); /* single usage */
+ assert(prefixDict.dict==NULL || cctx->cdict==NULL); /* only one can be set */
+ if (cctx->cdict && !cctx->localDict.cdict) {
+ /* Let the cdict's compression level take priority over the requested params.
+ * But do not take the cdict's compression level if the "cdict" is actually a localDict
+ * generated from ZSTD_initLocalDict().
+ */
+ params.compressionLevel = cctx->cdict->compressionLevel;
+ }
+ DEBUGLOG(4, "ZSTD_compressStream2 : transparent init stage");
+ if (endOp == ZSTD_e_end) cctx->pledgedSrcSizePlusOne = inSize + 1; /* auto-fix pledgedSrcSize */
+ {
+ size_t const dictSize = prefixDict.dict
+ ? prefixDict.dictSize
+ : (cctx->cdict ? cctx->cdict->dictContentSize : 0);
+ ZSTD_cParamMode_e const mode = ZSTD_getCParamMode(cctx->cdict, &params, cctx->pledgedSrcSizePlusOne - 1);
+ params.cParams = ZSTD_getCParamsFromCCtxParams(
+ &params, cctx->pledgedSrcSizePlusOne-1,
+ dictSize, mode);
+ }
+
+ params.useBlockSplitter = ZSTD_resolveBlockSplitterMode(params.useBlockSplitter, &params.cParams);
+ params.ldmParams.enableLdm = ZSTD_resolveEnableLdm(params.ldmParams.enableLdm, &params.cParams);
+ params.useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(params.useRowMatchFinder, &params.cParams);
+
+ { U64 const pledgedSrcSize = cctx->pledgedSrcSizePlusOne - 1;
+ assert(!ZSTD_isError(ZSTD_checkCParams(params.cParams)));
+ FORWARD_IF_ERROR( ZSTD_compressBegin_internal(cctx,
+ prefixDict.dict, prefixDict.dictSize, prefixDict.dictContentType, ZSTD_dtlm_fast,
+ cctx->cdict,
+ &params, pledgedSrcSize,
+ ZSTDb_buffered) , "");
+ assert(cctx->appliedParams.nbWorkers == 0);
+ cctx->inToCompress = 0;
+ cctx->inBuffPos = 0;
+ if (cctx->appliedParams.inBufferMode == ZSTD_bm_buffered) {
+ /* for small input: avoid automatic flush on reaching end of block, since
+ * it would require to add a 3-bytes null block to end frame
+ */
+ cctx->inBuffTarget = cctx->blockSize + (cctx->blockSize == pledgedSrcSize);
+ } else {
+ cctx->inBuffTarget = 0;
+ }
+ cctx->outBuffContentSize = cctx->outBuffFlushedSize = 0;
+ cctx->streamStage = zcss_load;
+ cctx->frameEnded = 0;
+ }
+ return 0;
+}
+
+size_t ZSTD_compressStream2( ZSTD_CCtx* cctx,
+ ZSTD_outBuffer* output,
+ ZSTD_inBuffer* input,
+ ZSTD_EndDirective endOp)
+{
+ DEBUGLOG(5, "ZSTD_compressStream2, endOp=%u ", (unsigned)endOp);
+ /* check conditions */
+ RETURN_ERROR_IF(output->pos > output->size, dstSize_tooSmall, "invalid output buffer");
+ RETURN_ERROR_IF(input->pos > input->size, srcSize_wrong, "invalid input buffer");
+ RETURN_ERROR_IF((U32)endOp > (U32)ZSTD_e_end, parameter_outOfBound, "invalid endDirective");
+ assert(cctx != NULL);
+
+ /* transparent initialization stage */
+ if (cctx->streamStage == zcss_init) {
+ FORWARD_IF_ERROR(ZSTD_CCtx_init_compressStream2(cctx, endOp, input->size), "CompressStream2 initialization failed");
+ ZSTD_setBufferExpectations(cctx, output, input); /* Set initial buffer expectations now that we've initialized */
+ }
+ /* end of transparent initialization stage */
+
+ FORWARD_IF_ERROR(ZSTD_checkBufferStability(cctx, output, input, endOp), "invalid buffers");
+ /* compression stage */
+ FORWARD_IF_ERROR( ZSTD_compressStream_generic(cctx, output, input, endOp) , "");
+ DEBUGLOG(5, "completed ZSTD_compressStream2");
+ ZSTD_setBufferExpectations(cctx, output, input);
+ return cctx->outBuffContentSize - cctx->outBuffFlushedSize; /* remaining to flush */
+}
+
+size_t ZSTD_compressStream2_simpleArgs (
+ ZSTD_CCtx* cctx,
+ void* dst, size_t dstCapacity, size_t* dstPos,
+ const void* src, size_t srcSize, size_t* srcPos,
+ ZSTD_EndDirective endOp)
+{
+ ZSTD_outBuffer output = { dst, dstCapacity, *dstPos };
+ ZSTD_inBuffer input = { src, srcSize, *srcPos };
+ /* ZSTD_compressStream2() will check validity of dstPos and srcPos */
+ size_t const cErr = ZSTD_compressStream2(cctx, &output, &input, endOp);
+ *dstPos = output.pos;
+ *srcPos = input.pos;
+ return cErr;
+}
+
+size_t ZSTD_compress2(ZSTD_CCtx* cctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize)
+{
+ ZSTD_bufferMode_e const originalInBufferMode = cctx->requestedParams.inBufferMode;
+ ZSTD_bufferMode_e const originalOutBufferMode = cctx->requestedParams.outBufferMode;
+ DEBUGLOG(4, "ZSTD_compress2 (srcSize=%u)", (unsigned)srcSize);
+ ZSTD_CCtx_reset(cctx, ZSTD_reset_session_only);
+ /* Enable stable input/output buffers. */
+ cctx->requestedParams.inBufferMode = ZSTD_bm_stable;
+ cctx->requestedParams.outBufferMode = ZSTD_bm_stable;
+ { size_t oPos = 0;
+ size_t iPos = 0;
+ size_t const result = ZSTD_compressStream2_simpleArgs(cctx,
+ dst, dstCapacity, &oPos,
+ src, srcSize, &iPos,
+ ZSTD_e_end);
+ /* Reset to the original values. */
+ cctx->requestedParams.inBufferMode = originalInBufferMode;
+ cctx->requestedParams.outBufferMode = originalOutBufferMode;
+ FORWARD_IF_ERROR(result, "ZSTD_compressStream2_simpleArgs failed");
+ if (result != 0) { /* compression not completed, due to lack of output space */
+ assert(oPos == dstCapacity);
+ RETURN_ERROR(dstSize_tooSmall, "");
+ }
+ assert(iPos == srcSize); /* all input is expected consumed */
+ return oPos;
+ }
+}
+
+typedef struct {
+ U32 idx; /* Index in array of ZSTD_Sequence */
+ U32 posInSequence; /* Position within sequence at idx */
+ size_t posInSrc; /* Number of bytes given by sequences provided so far */
+} ZSTD_sequencePosition;
+
+/* ZSTD_validateSequence() :
+ * @offCode : is presumed to follow format required by ZSTD_storeSeq()
+ * @returns a ZSTD error code if sequence is not valid
+ */
+static size_t
+ZSTD_validateSequence(U32 offCode, U32 matchLength,
+ size_t posInSrc, U32 windowLog, size_t dictSize)
+{
+ U32 const windowSize = 1 << windowLog;
+ /* posInSrc represents the amount of data the decoder would decode up to this point.
+ * As long as the amount of data decoded is less than or equal to window size, offsets may be
+ * larger than the total length of output decoded in order to reference the dict, even larger than
+ * window size. After output surpasses windowSize, we're limited to windowSize offsets again.
+ */
+ size_t const offsetBound = posInSrc > windowSize ? (size_t)windowSize : posInSrc + (size_t)dictSize;
+ RETURN_ERROR_IF(offCode > STORE_OFFSET(offsetBound), corruption_detected, "Offset too large!");
+ RETURN_ERROR_IF(matchLength < MINMATCH, corruption_detected, "Matchlength too small");
+ return 0;
+}
+
+/* Returns an offset code, given a sequence's raw offset, the ongoing repcode array, and whether litLength == 0 */
+static U32 ZSTD_finalizeOffCode(U32 rawOffset, const U32 rep[ZSTD_REP_NUM], U32 ll0)
+{
+ U32 offCode = STORE_OFFSET(rawOffset);
+
+ if (!ll0 && rawOffset == rep[0]) {
+ offCode = STORE_REPCODE_1;
+ } else if (rawOffset == rep[1]) {
+ offCode = STORE_REPCODE(2 - ll0);
+ } else if (rawOffset == rep[2]) {
+ offCode = STORE_REPCODE(3 - ll0);
+ } else if (ll0 && rawOffset == rep[0] - 1) {
+ offCode = STORE_REPCODE_3;
+ }
+ return offCode;
+}
+
+/* Returns 0 on success, and a ZSTD_error otherwise. This function scans through an array of
+ * ZSTD_Sequence, storing the sequences it finds, until it reaches a block delimiter.
+ */
+static size_t
+ZSTD_copySequencesToSeqStoreExplicitBlockDelim(ZSTD_CCtx* cctx,
+ ZSTD_sequencePosition* seqPos,
+ const ZSTD_Sequence* const inSeqs, size_t inSeqsSize,
+ const void* src, size_t blockSize)
+{
+ U32 idx = seqPos->idx;
+ BYTE const* ip = (BYTE const*)(src);
+ const BYTE* const iend = ip + blockSize;
+ repcodes_t updatedRepcodes;
+ U32 dictSize;
+
+ if (cctx->cdict) {
+ dictSize = (U32)cctx->cdict->dictContentSize;
+ } else if (cctx->prefixDict.dict) {
+ dictSize = (U32)cctx->prefixDict.dictSize;
+ } else {
+ dictSize = 0;
+ }
+ ZSTD_memcpy(updatedRepcodes.rep, cctx->blockState.prevCBlock->rep, sizeof(repcodes_t));
+ for (; (inSeqs[idx].matchLength != 0 || inSeqs[idx].offset != 0) && idx < inSeqsSize; ++idx) {
+ U32 const litLength = inSeqs[idx].litLength;
+ U32 const ll0 = (litLength == 0);
+ U32 const matchLength = inSeqs[idx].matchLength;
+ U32 const offCode = ZSTD_finalizeOffCode(inSeqs[idx].offset, updatedRepcodes.rep, ll0);
+ ZSTD_updateRep(updatedRepcodes.rep, offCode, ll0);
+
+ DEBUGLOG(6, "Storing sequence: (of: %u, ml: %u, ll: %u)", offCode, matchLength, litLength);
+ if (cctx->appliedParams.validateSequences) {
+ seqPos->posInSrc += litLength + matchLength;
+ FORWARD_IF_ERROR(ZSTD_validateSequence(offCode, matchLength, seqPos->posInSrc,
+ cctx->appliedParams.cParams.windowLog, dictSize),
+ "Sequence validation failed");
+ }
+ RETURN_ERROR_IF(idx - seqPos->idx > cctx->seqStore.maxNbSeq, memory_allocation,
+ "Not enough memory allocated. Try adjusting ZSTD_c_minMatch.");
+ ZSTD_storeSeq(&cctx->seqStore, litLength, ip, iend, offCode, matchLength);
+ ip += matchLength + litLength;
+ }
+ ZSTD_memcpy(cctx->blockState.nextCBlock->rep, updatedRepcodes.rep, sizeof(repcodes_t));
+
+ if (inSeqs[idx].litLength) {
+ DEBUGLOG(6, "Storing last literals of size: %u", inSeqs[idx].litLength);
+ ZSTD_storeLastLiterals(&cctx->seqStore, ip, inSeqs[idx].litLength);
+ ip += inSeqs[idx].litLength;
+ seqPos->posInSrc += inSeqs[idx].litLength;
+ }
+ RETURN_ERROR_IF(ip != iend, corruption_detected, "Blocksize doesn't agree with block delimiter!");
+ seqPos->idx = idx+1;
+ return 0;
+}
+
+/* Returns the number of bytes to move the current read position back by. Only non-zero
+ * if we ended up splitting a sequence. Otherwise, it may return a ZSTD error if something
+ * went wrong.
+ *
+ * This function will attempt to scan through blockSize bytes represented by the sequences
+ * in inSeqs, storing any (partial) sequences.
+ *
+ * Occasionally, we may want to change the actual number of bytes we consumed from inSeqs to
+ * avoid splitting a match, or to avoid splitting a match such that it would produce a match
+ * smaller than MINMATCH. In this case, we return the number of bytes that we didn't read from this block.
+ */
+static size_t
+ZSTD_copySequencesToSeqStoreNoBlockDelim(ZSTD_CCtx* cctx, ZSTD_sequencePosition* seqPos,
+ const ZSTD_Sequence* const inSeqs, size_t inSeqsSize,
+ const void* src, size_t blockSize)
+{
+ U32 idx = seqPos->idx;
+ U32 startPosInSequence = seqPos->posInSequence;
+ U32 endPosInSequence = seqPos->posInSequence + (U32)blockSize;
+ size_t dictSize;
+ BYTE const* ip = (BYTE const*)(src);
+ BYTE const* iend = ip + blockSize; /* May be adjusted if we decide to process fewer than blockSize bytes */
+ repcodes_t updatedRepcodes;
+ U32 bytesAdjustment = 0;
+ U32 finalMatchSplit = 0;
+
+ if (cctx->cdict) {
+ dictSize = cctx->cdict->dictContentSize;
+ } else if (cctx->prefixDict.dict) {
+ dictSize = cctx->prefixDict.dictSize;
+ } else {
+ dictSize = 0;
+ }
+ DEBUGLOG(5, "ZSTD_copySequencesToSeqStore: idx: %u PIS: %u blockSize: %zu", idx, startPosInSequence, blockSize);
+ DEBUGLOG(5, "Start seq: idx: %u (of: %u ml: %u ll: %u)", idx, inSeqs[idx].offset, inSeqs[idx].matchLength, inSeqs[idx].litLength);
+ ZSTD_memcpy(updatedRepcodes.rep, cctx->blockState.prevCBlock->rep, sizeof(repcodes_t));
+ while (endPosInSequence && idx < inSeqsSize && !finalMatchSplit) {
+ const ZSTD_Sequence currSeq = inSeqs[idx];
+ U32 litLength = currSeq.litLength;
+ U32 matchLength = currSeq.matchLength;
+ U32 const rawOffset = currSeq.offset;
+ U32 offCode;
+
+ /* Modify the sequence depending on where endPosInSequence lies */
+ if (endPosInSequence >= currSeq.litLength + currSeq.matchLength) {
+ if (startPosInSequence >= litLength) {
+ startPosInSequence -= litLength;
+ litLength = 0;
+ matchLength -= startPosInSequence;
+ } else {
+ litLength -= startPosInSequence;
+ }
+ /* Move to the next sequence */
+ endPosInSequence -= currSeq.litLength + currSeq.matchLength;
+ startPosInSequence = 0;
+ idx++;
+ } else {
+ /* This is the final (partial) sequence we're adding from inSeqs, and endPosInSequence
+ does not reach the end of the match. So, we have to split the sequence */
+ DEBUGLOG(6, "Require a split: diff: %u, idx: %u PIS: %u",
+ currSeq.litLength + currSeq.matchLength - endPosInSequence, idx, endPosInSequence);
+ if (endPosInSequence > litLength) {
+ U32 firstHalfMatchLength;
+ litLength = startPosInSequence >= litLength ? 0 : litLength - startPosInSequence;
+ firstHalfMatchLength = endPosInSequence - startPosInSequence - litLength;
+ if (matchLength > blockSize && firstHalfMatchLength >= cctx->appliedParams.cParams.minMatch) {
+ /* Only ever split the match if it is larger than the block size */
+ U32 secondHalfMatchLength = currSeq.matchLength + currSeq.litLength - endPosInSequence;
+ if (secondHalfMatchLength < cctx->appliedParams.cParams.minMatch) {
+ /* Move the endPosInSequence backward so that it creates match of minMatch length */
+ endPosInSequence -= cctx->appliedParams.cParams.minMatch - secondHalfMatchLength;
+ bytesAdjustment = cctx->appliedParams.cParams.minMatch - secondHalfMatchLength;
+ firstHalfMatchLength -= bytesAdjustment;
+ }
+ matchLength = firstHalfMatchLength;
+ /* Flag that we split the last match - after storing the sequence, exit the loop,
+ but keep the value of endPosInSequence */
+ finalMatchSplit = 1;
+ } else {
+ /* Move the position in sequence backwards so that we don't split match, and break to store
+ * the last literals. We use the original currSeq.litLength as a marker for where endPosInSequence
+ * should go. We prefer to do this whenever it is not necessary to split the match, or if doing so
+ * would cause the first half of the match to be too small
+ */
+ bytesAdjustment = endPosInSequence - currSeq.litLength;
+ endPosInSequence = currSeq.litLength;
+ break;
+ }
+ } else {
+ /* This sequence ends inside the literals, break to store the last literals */
+ break;
+ }
+ }
+ /* Check if this offset can be represented with a repcode */
+ { U32 const ll0 = (litLength == 0);
+ offCode = ZSTD_finalizeOffCode(rawOffset, updatedRepcodes.rep, ll0);
+ ZSTD_updateRep(updatedRepcodes.rep, offCode, ll0);
+ }
+
+ if (cctx->appliedParams.validateSequences) {
+ seqPos->posInSrc += litLength + matchLength;
+ FORWARD_IF_ERROR(ZSTD_validateSequence(offCode, matchLength, seqPos->posInSrc,
+ cctx->appliedParams.cParams.windowLog, dictSize),
+ "Sequence validation failed");
+ }
+ DEBUGLOG(6, "Storing sequence: (of: %u, ml: %u, ll: %u)", offCode, matchLength, litLength);
+ RETURN_ERROR_IF(idx - seqPos->idx > cctx->seqStore.maxNbSeq, memory_allocation,
+ "Not enough memory allocated. Try adjusting ZSTD_c_minMatch.");
+ ZSTD_storeSeq(&cctx->seqStore, litLength, ip, iend, offCode, matchLength);
+ ip += matchLength + litLength;
+ }
+ DEBUGLOG(5, "Ending seq: idx: %u (of: %u ml: %u ll: %u)", idx, inSeqs[idx].offset, inSeqs[idx].matchLength, inSeqs[idx].litLength);
+ assert(idx == inSeqsSize || endPosInSequence <= inSeqs[idx].litLength + inSeqs[idx].matchLength);
+ seqPos->idx = idx;
+ seqPos->posInSequence = endPosInSequence;
+ ZSTD_memcpy(cctx->blockState.nextCBlock->rep, updatedRepcodes.rep, sizeof(repcodes_t));
+
+ iend -= bytesAdjustment;
+ if (ip != iend) {
+ /* Store any last literals */
+ U32 lastLLSize = (U32)(iend - ip);
+ assert(ip <= iend);
+ DEBUGLOG(6, "Storing last literals of size: %u", lastLLSize);
+ ZSTD_storeLastLiterals(&cctx->seqStore, ip, lastLLSize);
+ seqPos->posInSrc += lastLLSize;
+ }
+
+ return bytesAdjustment;
+}
+
+typedef size_t (*ZSTD_sequenceCopier) (ZSTD_CCtx* cctx, ZSTD_sequencePosition* seqPos,
+ const ZSTD_Sequence* const inSeqs, size_t inSeqsSize,
+ const void* src, size_t blockSize);
+static ZSTD_sequenceCopier ZSTD_selectSequenceCopier(ZSTD_sequenceFormat_e mode)
+{
+ ZSTD_sequenceCopier sequenceCopier = NULL;
+ assert(ZSTD_cParam_withinBounds(ZSTD_c_blockDelimiters, mode));
+ if (mode == ZSTD_sf_explicitBlockDelimiters) {
+ return ZSTD_copySequencesToSeqStoreExplicitBlockDelim;
+ } else if (mode == ZSTD_sf_noBlockDelimiters) {
+ return ZSTD_copySequencesToSeqStoreNoBlockDelim;
+ }
+ assert(sequenceCopier != NULL);
+ return sequenceCopier;
+}
+
+/* Compress, block-by-block, all of the sequences given.
+ *
+ * Returns the cumulative size of all compressed blocks (including their headers),
+ * otherwise a ZSTD error.
+ */
+static size_t
+ZSTD_compressSequences_internal(ZSTD_CCtx* cctx,
+ void* dst, size_t dstCapacity,
+ const ZSTD_Sequence* inSeqs, size_t inSeqsSize,
+ const void* src, size_t srcSize)
+{
+ size_t cSize = 0;
+ U32 lastBlock;
+ size_t blockSize;
+ size_t compressedSeqsSize;
+ size_t remaining = srcSize;
+ ZSTD_sequencePosition seqPos = {0, 0, 0};
+
+ BYTE const* ip = (BYTE const*)src;
+ BYTE* op = (BYTE*)dst;
+ ZSTD_sequenceCopier const sequenceCopier = ZSTD_selectSequenceCopier(cctx->appliedParams.blockDelimiters);
+
+ DEBUGLOG(4, "ZSTD_compressSequences_internal srcSize: %zu, inSeqsSize: %zu", srcSize, inSeqsSize);
+ /* Special case: empty frame */
+ if (remaining == 0) {
+ U32 const cBlockHeader24 = 1 /* last block */ + (((U32)bt_raw)<<1);
+ RETURN_ERROR_IF(dstCapacity<4, dstSize_tooSmall, "No room for empty frame block header");
+ MEM_writeLE32(op, cBlockHeader24);
+ op += ZSTD_blockHeaderSize;
+ dstCapacity -= ZSTD_blockHeaderSize;
+ cSize += ZSTD_blockHeaderSize;
+ }
+
+ while (remaining) {
+ size_t cBlockSize;
+ size_t additionalByteAdjustment;
+ lastBlock = remaining <= cctx->blockSize;
+ blockSize = lastBlock ? (U32)remaining : (U32)cctx->blockSize;
+ ZSTD_resetSeqStore(&cctx->seqStore);
+ DEBUGLOG(4, "Working on new block. Blocksize: %zu", blockSize);
+
+ additionalByteAdjustment = sequenceCopier(cctx, &seqPos, inSeqs, inSeqsSize, ip, blockSize);
+ FORWARD_IF_ERROR(additionalByteAdjustment, "Bad sequence copy");
+ blockSize -= additionalByteAdjustment;
+
+ /* If blocks are too small, emit as a nocompress block */
+ if (blockSize < MIN_CBLOCK_SIZE+ZSTD_blockHeaderSize+1) {
+ cBlockSize = ZSTD_noCompressBlock(op, dstCapacity, ip, blockSize, lastBlock);
+ FORWARD_IF_ERROR(cBlockSize, "Nocompress block failed");
+ DEBUGLOG(4, "Block too small, writing out nocompress block: cSize: %zu", cBlockSize);
+ cSize += cBlockSize;
+ ip += blockSize;
+ op += cBlockSize;
+ remaining -= blockSize;
+ dstCapacity -= cBlockSize;
+ continue;
+ }
+
+ compressedSeqsSize = ZSTD_entropyCompressSeqStore(&cctx->seqStore,
+ &cctx->blockState.prevCBlock->entropy, &cctx->blockState.nextCBlock->entropy,
+ &cctx->appliedParams,
+ op + ZSTD_blockHeaderSize /* Leave space for block header */, dstCapacity - ZSTD_blockHeaderSize,
+ blockSize,
+ cctx->entropyWorkspace, ENTROPY_WORKSPACE_SIZE /* statically allocated in resetCCtx */,
+ cctx->bmi2);
+ FORWARD_IF_ERROR(compressedSeqsSize, "Compressing sequences of block failed");
+ DEBUGLOG(4, "Compressed sequences size: %zu", compressedSeqsSize);
+
+ if (!cctx->isFirstBlock &&
+ ZSTD_maybeRLE(&cctx->seqStore) &&
+ ZSTD_isRLE((BYTE const*)src, srcSize)) {
+ /* We don't want to emit our first block as a RLE even if it qualifies because
+ * doing so will cause the decoder (cli only) to throw a "should consume all input error."
+ * This is only an issue for zstd <= v1.4.3
+ */
+ compressedSeqsSize = 1;
+ }
+
+ if (compressedSeqsSize == 0) {
+ /* ZSTD_noCompressBlock writes the block header as well */
+ cBlockSize = ZSTD_noCompressBlock(op, dstCapacity, ip, blockSize, lastBlock);
+ FORWARD_IF_ERROR(cBlockSize, "Nocompress block failed");
+ DEBUGLOG(4, "Writing out nocompress block, size: %zu", cBlockSize);
+ } else if (compressedSeqsSize == 1) {
+ cBlockSize = ZSTD_rleCompressBlock(op, dstCapacity, *ip, blockSize, lastBlock);
+ FORWARD_IF_ERROR(cBlockSize, "RLE compress block failed");
+ DEBUGLOG(4, "Writing out RLE block, size: %zu", cBlockSize);
+ } else {
+ U32 cBlockHeader;
+ /* Error checking and repcodes update */
+ ZSTD_blockState_confirmRepcodesAndEntropyTables(&cctx->blockState);
+ if (cctx->blockState.prevCBlock->entropy.fse.offcode_repeatMode == FSE_repeat_valid)
+ cctx->blockState.prevCBlock->entropy.fse.offcode_repeatMode = FSE_repeat_check;
+
+ /* Write block header into beginning of block*/
+ cBlockHeader = lastBlock + (((U32)bt_compressed)<<1) + (U32)(compressedSeqsSize << 3);
+ MEM_writeLE24(op, cBlockHeader);
+ cBlockSize = ZSTD_blockHeaderSize + compressedSeqsSize;
+ DEBUGLOG(4, "Writing out compressed block, size: %zu", cBlockSize);
+ }
+
+ cSize += cBlockSize;
+ DEBUGLOG(4, "cSize running total: %zu", cSize);
+
+ if (lastBlock) {
+ break;
+ } else {
+ ip += blockSize;
+ op += cBlockSize;
+ remaining -= blockSize;
+ dstCapacity -= cBlockSize;
+ cctx->isFirstBlock = 0;
+ }
+ }
+
+ return cSize;
+}
+
+size_t ZSTD_compressSequences(ZSTD_CCtx* const cctx, void* dst, size_t dstCapacity,
+ const ZSTD_Sequence* inSeqs, size_t inSeqsSize,
+ const void* src, size_t srcSize)
+{
+ BYTE* op = (BYTE*)dst;
+ size_t cSize = 0;
+ size_t compressedBlocksSize = 0;
+ size_t frameHeaderSize = 0;
+
+ /* Transparent initialization stage, same as compressStream2() */
+ DEBUGLOG(3, "ZSTD_compressSequences()");
+ assert(cctx != NULL);
+ FORWARD_IF_ERROR(ZSTD_CCtx_init_compressStream2(cctx, ZSTD_e_end, srcSize), "CCtx initialization failed");
+ /* Begin writing output, starting with frame header */
+ frameHeaderSize = ZSTD_writeFrameHeader(op, dstCapacity, &cctx->appliedParams, srcSize, cctx->dictID);
+ op += frameHeaderSize;
+ dstCapacity -= frameHeaderSize;
+ cSize += frameHeaderSize;
+ if (cctx->appliedParams.fParams.checksumFlag && srcSize) {
+ xxh64_update(&cctx->xxhState, src, srcSize);
+ }
+ /* cSize includes block header size and compressed sequences size */
+ compressedBlocksSize = ZSTD_compressSequences_internal(cctx,
+ op, dstCapacity,
+ inSeqs, inSeqsSize,
+ src, srcSize);
+ FORWARD_IF_ERROR(compressedBlocksSize, "Compressing blocks failed!");
+ cSize += compressedBlocksSize;
+ dstCapacity -= compressedBlocksSize;
+
+ if (cctx->appliedParams.fParams.checksumFlag) {
+ U32 const checksum = (U32) xxh64_digest(&cctx->xxhState);
+ RETURN_ERROR_IF(dstCapacity<4, dstSize_tooSmall, "no room for checksum");
+ DEBUGLOG(4, "Write checksum : %08X", (unsigned)checksum);
+ MEM_writeLE32((char*)dst + cSize, checksum);
+ cSize += 4;
+ }
+
+ DEBUGLOG(3, "Final compressed size: %zu", cSize);
+ return cSize;
+}
+
+/*====== Finalize ======*/
+
+/*! ZSTD_flushStream() :
+ * @return : amount of data remaining to flush */
+size_t ZSTD_flushStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output)
+{
+ ZSTD_inBuffer input = { NULL, 0, 0 };
+ return ZSTD_compressStream2(zcs, output, &input, ZSTD_e_flush);
+}
+
+
+size_t ZSTD_endStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output)
+{
+ ZSTD_inBuffer input = { NULL, 0, 0 };
+ size_t const remainingToFlush = ZSTD_compressStream2(zcs, output, &input, ZSTD_e_end);
+ FORWARD_IF_ERROR( remainingToFlush , "ZSTD_compressStream2 failed");
+ if (zcs->appliedParams.nbWorkers > 0) return remainingToFlush; /* minimal estimation */
+ /* single thread mode : attempt to calculate remaining to flush more precisely */
+ { size_t const lastBlockSize = zcs->frameEnded ? 0 : ZSTD_BLOCKHEADERSIZE;
+ size_t const checksumSize = (size_t)(zcs->frameEnded ? 0 : zcs->appliedParams.fParams.checksumFlag * 4);
+ size_t const toFlush = remainingToFlush + lastBlockSize + checksumSize;
+ DEBUGLOG(4, "ZSTD_endStream : remaining to flush : %u", (unsigned)toFlush);
+ return toFlush;
+ }
+}
+
+
+/*-===== Pre-defined compression levels =====-*/
+#include "clevels.h"
+
+int ZSTD_maxCLevel(void) { return ZSTD_MAX_CLEVEL; }
+int ZSTD_minCLevel(void) { return (int)-ZSTD_TARGETLENGTH_MAX; }
+int ZSTD_defaultCLevel(void) { return ZSTD_CLEVEL_DEFAULT; }
+
+static ZSTD_compressionParameters ZSTD_dedicatedDictSearch_getCParams(int const compressionLevel, size_t const dictSize)
+{
+ ZSTD_compressionParameters cParams = ZSTD_getCParams_internal(compressionLevel, 0, dictSize, ZSTD_cpm_createCDict);
+ switch (cParams.strategy) {
+ case ZSTD_fast:
+ case ZSTD_dfast:
+ break;
+ case ZSTD_greedy:
+ case ZSTD_lazy:
+ case ZSTD_lazy2:
+ cParams.hashLog += ZSTD_LAZY_DDSS_BUCKET_LOG;
+ break;
+ case ZSTD_btlazy2:
+ case ZSTD_btopt:
+ case ZSTD_btultra:
+ case ZSTD_btultra2:
+ break;
+ }
+ return cParams;
+}
+
+static int ZSTD_dedicatedDictSearch_isSupported(
+ ZSTD_compressionParameters const* cParams)
+{
+ return (cParams->strategy >= ZSTD_greedy)
+ && (cParams->strategy <= ZSTD_lazy2)
+ && (cParams->hashLog > cParams->chainLog)
+ && (cParams->chainLog <= 24);
+}
+
+/*
+ * Reverses the adjustment applied to cparams when enabling dedicated dict
+ * search. This is used to recover the params set to be used in the working
+ * context. (Otherwise, those tables would also grow.)
+ */
+static void ZSTD_dedicatedDictSearch_revertCParams(
+ ZSTD_compressionParameters* cParams) {
+ switch (cParams->strategy) {
+ case ZSTD_fast:
+ case ZSTD_dfast:
+ break;
+ case ZSTD_greedy:
+ case ZSTD_lazy:
+ case ZSTD_lazy2:
+ cParams->hashLog -= ZSTD_LAZY_DDSS_BUCKET_LOG;
+ if (cParams->hashLog < ZSTD_HASHLOG_MIN) {
+ cParams->hashLog = ZSTD_HASHLOG_MIN;
+ }
+ break;
+ case ZSTD_btlazy2:
+ case ZSTD_btopt:
+ case ZSTD_btultra:
+ case ZSTD_btultra2:
+ break;
+ }
+}
+
+static U64 ZSTD_getCParamRowSize(U64 srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode)
+{
+ switch (mode) {
+ case ZSTD_cpm_unknown:
+ case ZSTD_cpm_noAttachDict:
+ case ZSTD_cpm_createCDict:
+ break;
+ case ZSTD_cpm_attachDict:
+ dictSize = 0;
+ break;
+ default:
+ assert(0);
+ break;
+ }
+ { int const unknown = srcSizeHint == ZSTD_CONTENTSIZE_UNKNOWN;
+ size_t const addedSize = unknown && dictSize > 0 ? 500 : 0;
+ return unknown && dictSize == 0 ? ZSTD_CONTENTSIZE_UNKNOWN : srcSizeHint+dictSize+addedSize;
+ }
+}
+
+/*! ZSTD_getCParams_internal() :
+ * @return ZSTD_compressionParameters structure for a selected compression level, srcSize and dictSize.
+ * Note: srcSizeHint 0 means 0, use ZSTD_CONTENTSIZE_UNKNOWN for unknown.
+ * Use dictSize == 0 for unknown or unused.
+ * Note: `mode` controls how we treat the `dictSize`. See docs for `ZSTD_cParamMode_e`. */
+static ZSTD_compressionParameters ZSTD_getCParams_internal(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode)
+{
+ U64 const rSize = ZSTD_getCParamRowSize(srcSizeHint, dictSize, mode);
+ U32 const tableID = (rSize <= 256 KB) + (rSize <= 128 KB) + (rSize <= 16 KB);
+ int row;
+ DEBUGLOG(5, "ZSTD_getCParams_internal (cLevel=%i)", compressionLevel);
+
+ /* row */
+ if (compressionLevel == 0) row = ZSTD_CLEVEL_DEFAULT; /* 0 == default */
+ else if (compressionLevel < 0) row = 0; /* entry 0 is baseline for fast mode */
+ else if (compressionLevel > ZSTD_MAX_CLEVEL) row = ZSTD_MAX_CLEVEL;
+ else row = compressionLevel;
+
+ { ZSTD_compressionParameters cp = ZSTD_defaultCParameters[tableID][row];
+ DEBUGLOG(5, "ZSTD_getCParams_internal selected tableID: %u row: %u strat: %u", tableID, row, (U32)cp.strategy);
+ /* acceleration factor */
+ if (compressionLevel < 0) {
+ int const clampedCompressionLevel = MAX(ZSTD_minCLevel(), compressionLevel);
+ cp.targetLength = (unsigned)(-clampedCompressionLevel);
+ }
+ /* refine parameters based on srcSize & dictSize */
+ return ZSTD_adjustCParams_internal(cp, srcSizeHint, dictSize, mode);
+ }
+}
+
+/*! ZSTD_getCParams() :
+ * @return ZSTD_compressionParameters structure for a selected compression level, srcSize and dictSize.
+ * Size values are optional, provide 0 if not known or unused */
+ZSTD_compressionParameters ZSTD_getCParams(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize)
+{
+ if (srcSizeHint == 0) srcSizeHint = ZSTD_CONTENTSIZE_UNKNOWN;
+ return ZSTD_getCParams_internal(compressionLevel, srcSizeHint, dictSize, ZSTD_cpm_unknown);
+}
+
+/*! ZSTD_getParams() :
+ * same idea as ZSTD_getCParams()
+ * @return a `ZSTD_parameters` structure (instead of `ZSTD_compressionParameters`).
+ * Fields of `ZSTD_frameParameters` are set to default values */
+static ZSTD_parameters ZSTD_getParams_internal(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode) {
+ ZSTD_parameters params;
+ ZSTD_compressionParameters const cParams = ZSTD_getCParams_internal(compressionLevel, srcSizeHint, dictSize, mode);
+ DEBUGLOG(5, "ZSTD_getParams (cLevel=%i)", compressionLevel);
+ ZSTD_memset(&params, 0, sizeof(params));
+ params.cParams = cParams;
+ params.fParams.contentSizeFlag = 1;
+ return params;
+}
+
+/*! ZSTD_getParams() :
+ * same idea as ZSTD_getCParams()
+ * @return a `ZSTD_parameters` structure (instead of `ZSTD_compressionParameters`).
+ * Fields of `ZSTD_frameParameters` are set to default values */
+ZSTD_parameters ZSTD_getParams(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize) {
+ if (srcSizeHint == 0) srcSizeHint = ZSTD_CONTENTSIZE_UNKNOWN;
+ return ZSTD_getParams_internal(compressionLevel, srcSizeHint, dictSize, ZSTD_cpm_unknown);
+}
diff --git a/lib/zstd/compress/zstd_compress_internal.h b/lib/zstd/compress/zstd_compress_internal.h
new file mode 100644
index 000000000000..71697a11ae30
--- /dev/null
+++ b/lib/zstd/compress/zstd_compress_internal.h
@@ -0,0 +1,1399 @@
+/*
+ * Copyright (c) Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+/* This header contains definitions
+ * that shall **only** be used by modules within lib/compress.
+ */
+
+#ifndef ZSTD_COMPRESS_H
+#define ZSTD_COMPRESS_H
+
+/*-*************************************
+* Dependencies
+***************************************/
+#include "../common/zstd_internal.h"
+#include "zstd_cwksp.h"
+
+
+/*-*************************************
+* Constants
+***************************************/
+#define kSearchStrength 8
+#define HASH_READ_SIZE 8
+#define ZSTD_DUBT_UNSORTED_MARK 1 /* For btlazy2 strategy, index ZSTD_DUBT_UNSORTED_MARK==1 means "unsorted".
+ It could be confused for a real successor at index "1", if sorted as larger than its predecessor.
+ It's not a big deal though : candidate will just be sorted again.
+ Additionally, candidate position 1 will be lost.
+ But candidate 1 cannot hide a large tree of candidates, so it's a minimal loss.
+ The benefit is that ZSTD_DUBT_UNSORTED_MARK cannot be mishandled after table re-use with a different strategy.
+ This constant is required by ZSTD_compressBlock_btlazy2() and ZSTD_reduceTable_internal() */
+
+
+/*-*************************************
+* Context memory management
+***************************************/
+typedef enum { ZSTDcs_created=0, ZSTDcs_init, ZSTDcs_ongoing, ZSTDcs_ending } ZSTD_compressionStage_e;
+typedef enum { zcss_init=0, zcss_load, zcss_flush } ZSTD_cStreamStage;
+
+typedef struct ZSTD_prefixDict_s {
+ const void* dict;
+ size_t dictSize;
+ ZSTD_dictContentType_e dictContentType;
+} ZSTD_prefixDict;
+
+typedef struct {
+ void* dictBuffer;
+ void const* dict;
+ size_t dictSize;
+ ZSTD_dictContentType_e dictContentType;
+ ZSTD_CDict* cdict;
+} ZSTD_localDict;
+
+typedef struct {
+ HUF_CElt CTable[HUF_CTABLE_SIZE_ST(255)];
+ HUF_repeat repeatMode;
+} ZSTD_hufCTables_t;
+
+typedef struct {
+ FSE_CTable offcodeCTable[FSE_CTABLE_SIZE_U32(OffFSELog, MaxOff)];
+ FSE_CTable matchlengthCTable[FSE_CTABLE_SIZE_U32(MLFSELog, MaxML)];
+ FSE_CTable litlengthCTable[FSE_CTABLE_SIZE_U32(LLFSELog, MaxLL)];
+ FSE_repeat offcode_repeatMode;
+ FSE_repeat matchlength_repeatMode;
+ FSE_repeat litlength_repeatMode;
+} ZSTD_fseCTables_t;
+
+typedef struct {
+ ZSTD_hufCTables_t huf;
+ ZSTD_fseCTables_t fse;
+} ZSTD_entropyCTables_t;
+
+/* *********************************************
+* Entropy buffer statistics structs and funcs *
+***********************************************/
+/* ZSTD_hufCTablesMetadata_t :
+ * Stores Literals Block Type for a super-block in hType, and
+ * huffman tree description in hufDesBuffer.
+ * hufDesSize refers to the size of huffman tree description in bytes.
+ * This metadata is populated in ZSTD_buildBlockEntropyStats_literals() */
+typedef struct {
+ symbolEncodingType_e hType;
+ BYTE hufDesBuffer[ZSTD_MAX_HUF_HEADER_SIZE];
+ size_t hufDesSize;
+} ZSTD_hufCTablesMetadata_t;
+
+/* ZSTD_fseCTablesMetadata_t :
+ * Stores symbol compression modes for a super-block in {ll, ol, ml}Type, and
+ * fse tables in fseTablesBuffer.
+ * fseTablesSize refers to the size of fse tables in bytes.
+ * This metadata is populated in ZSTD_buildBlockEntropyStats_sequences() */
+typedef struct {
+ symbolEncodingType_e llType;
+ symbolEncodingType_e ofType;
+ symbolEncodingType_e mlType;
+ BYTE fseTablesBuffer[ZSTD_MAX_FSE_HEADERS_SIZE];
+ size_t fseTablesSize;
+ size_t lastCountSize; /* This is to account for bug in 1.3.4. More detail in ZSTD_entropyCompressSeqStore_internal() */
+} ZSTD_fseCTablesMetadata_t;
+
+typedef struct {
+ ZSTD_hufCTablesMetadata_t hufMetadata;
+ ZSTD_fseCTablesMetadata_t fseMetadata;
+} ZSTD_entropyCTablesMetadata_t;
+
+/* ZSTD_buildBlockEntropyStats() :
+ * Builds entropy for the block.
+ * @return : 0 on success or error code */
+size_t ZSTD_buildBlockEntropyStats(seqStore_t* seqStorePtr,
+ const ZSTD_entropyCTables_t* prevEntropy,
+ ZSTD_entropyCTables_t* nextEntropy,
+ const ZSTD_CCtx_params* cctxParams,
+ ZSTD_entropyCTablesMetadata_t* entropyMetadata,
+ void* workspace, size_t wkspSize);
+
+/* *******************************
+* Compression internals structs *
+*********************************/
+
+typedef struct {
+ U32 off; /* Offset sumtype code for the match, using ZSTD_storeSeq() format */
+ U32 len; /* Raw length of match */
+} ZSTD_match_t;
+
+typedef struct {
+ U32 offset; /* Offset of sequence */
+ U32 litLength; /* Length of literals prior to match */
+ U32 matchLength; /* Raw length of match */
+} rawSeq;
+
+typedef struct {
+ rawSeq* seq; /* The start of the sequences */
+ size_t pos; /* The index in seq where reading stopped. pos <= size. */
+ size_t posInSequence; /* The position within the sequence at seq[pos] where reading
+ stopped. posInSequence <= seq[pos].litLength + seq[pos].matchLength */
+ size_t size; /* The number of sequences. <= capacity. */
+ size_t capacity; /* The capacity starting from `seq` pointer */
+} rawSeqStore_t;
+
+UNUSED_ATTR static const rawSeqStore_t kNullRawSeqStore = {NULL, 0, 0, 0, 0};
+
+typedef struct {
+ int price;
+ U32 off;
+ U32 mlen;
+ U32 litlen;
+ U32 rep[ZSTD_REP_NUM];
+} ZSTD_optimal_t;
+
+typedef enum { zop_dynamic=0, zop_predef } ZSTD_OptPrice_e;
+
+typedef struct {
+ /* All tables are allocated inside cctx->workspace by ZSTD_resetCCtx_internal() */
+ unsigned* litFreq; /* table of literals statistics, of size 256 */
+ unsigned* litLengthFreq; /* table of litLength statistics, of size (MaxLL+1) */
+ unsigned* matchLengthFreq; /* table of matchLength statistics, of size (MaxML+1) */
+ unsigned* offCodeFreq; /* table of offCode statistics, of size (MaxOff+1) */
+ ZSTD_match_t* matchTable; /* list of found matches, of size ZSTD_OPT_NUM+1 */
+ ZSTD_optimal_t* priceTable; /* All positions tracked by optimal parser, of size ZSTD_OPT_NUM+1 */
+
+ U32 litSum; /* nb of literals */
+ U32 litLengthSum; /* nb of litLength codes */
+ U32 matchLengthSum; /* nb of matchLength codes */
+ U32 offCodeSum; /* nb of offset codes */
+ U32 litSumBasePrice; /* to compare to log2(litfreq) */
+ U32 litLengthSumBasePrice; /* to compare to log2(llfreq) */
+ U32 matchLengthSumBasePrice;/* to compare to log2(mlfreq) */
+ U32 offCodeSumBasePrice; /* to compare to log2(offreq) */
+ ZSTD_OptPrice_e priceType; /* prices can be determined dynamically, or follow a pre-defined cost structure */
+ const ZSTD_entropyCTables_t* symbolCosts; /* pre-calculated dictionary statistics */
+ ZSTD_paramSwitch_e literalCompressionMode;
+} optState_t;
+
+typedef struct {
+ ZSTD_entropyCTables_t entropy;
+ U32 rep[ZSTD_REP_NUM];
+} ZSTD_compressedBlockState_t;
+
+typedef struct {
+ BYTE const* nextSrc; /* next block here to continue on current prefix */
+ BYTE const* base; /* All regular indexes relative to this position */
+ BYTE const* dictBase; /* extDict indexes relative to this position */
+ U32 dictLimit; /* below that point, need extDict */
+ U32 lowLimit; /* below that point, no more valid data */
+ U32 nbOverflowCorrections; /* Number of times overflow correction has run since
+ * ZSTD_window_init(). Useful for debugging coredumps
+ * and for ZSTD_WINDOW_OVERFLOW_CORRECT_FREQUENTLY.
+ */
+} ZSTD_window_t;
+
+#define ZSTD_WINDOW_START_INDEX 2
+
+typedef struct ZSTD_matchState_t ZSTD_matchState_t;
+
+#define ZSTD_ROW_HASH_CACHE_SIZE 8 /* Size of prefetching hash cache for row-based matchfinder */
+
+struct ZSTD_matchState_t {
+ ZSTD_window_t window; /* State for window round buffer management */
+ U32 loadedDictEnd; /* index of end of dictionary, within context's referential.
+ * When loadedDictEnd != 0, a dictionary is in use, and still valid.
+ * This relies on a mechanism to set loadedDictEnd=0 when dictionary is no longer within distance.
+ * Such mechanism is provided within ZSTD_window_enforceMaxDist() and ZSTD_checkDictValidity().
+ * When dict referential is copied into active context (i.e. not attached),
+ * loadedDictEnd == dictSize, since referential starts from zero.
+ */
+ U32 nextToUpdate; /* index from which to continue table update */
+ U32 hashLog3; /* dispatch table for matches of len==3 : larger == faster, more memory */
+
+ U32 rowHashLog; /* For row-based matchfinder: Hashlog based on nb of rows in the hashTable.*/
+ U16* tagTable; /* For row-based matchFinder: A row-based table containing the hashes and head index. */
+ U32 hashCache[ZSTD_ROW_HASH_CACHE_SIZE]; /* For row-based matchFinder: a cache of hashes to improve speed */
+
+ U32* hashTable;
+ U32* hashTable3;
+ U32* chainTable;
+
+ U32 forceNonContiguous; /* Non-zero if we should force non-contiguous load for the next window update. */
+
+ int dedicatedDictSearch; /* Indicates whether this matchState is using the
+ * dedicated dictionary search structure.
+ */
+ optState_t opt; /* optimal parser state */
+ const ZSTD_matchState_t* dictMatchState;
+ ZSTD_compressionParameters cParams;
+ const rawSeqStore_t* ldmSeqStore;
+};
+
+typedef struct {
+ ZSTD_compressedBlockState_t* prevCBlock;
+ ZSTD_compressedBlockState_t* nextCBlock;
+ ZSTD_matchState_t matchState;
+} ZSTD_blockState_t;
+
+typedef struct {
+ U32 offset;
+ U32 checksum;
+} ldmEntry_t;
+
+typedef struct {
+ BYTE const* split;
+ U32 hash;
+ U32 checksum;
+ ldmEntry_t* bucket;
+} ldmMatchCandidate_t;
+
+#define LDM_BATCH_SIZE 64
+
+typedef struct {
+ ZSTD_window_t window; /* State for the window round buffer management */
+ ldmEntry_t* hashTable;
+ U32 loadedDictEnd;
+ BYTE* bucketOffsets; /* Next position in bucket to insert entry */
+ size_t splitIndices[LDM_BATCH_SIZE];
+ ldmMatchCandidate_t matchCandidates[LDM_BATCH_SIZE];
+} ldmState_t;
+
+typedef struct {
+ ZSTD_paramSwitch_e enableLdm; /* ZSTD_ps_enable to enable LDM. ZSTD_ps_auto by default */
+ U32 hashLog; /* Log size of hashTable */
+ U32 bucketSizeLog; /* Log bucket size for collision resolution, at most 8 */
+ U32 minMatchLength; /* Minimum match length */
+ U32 hashRateLog; /* Log number of entries to skip */
+ U32 windowLog; /* Window log for the LDM */
+} ldmParams_t;
+
+typedef struct {
+ int collectSequences;
+ ZSTD_Sequence* seqStart;
+ size_t seqIndex;
+ size_t maxSequences;
+} SeqCollector;
+
+struct ZSTD_CCtx_params_s {
+ ZSTD_format_e format;
+ ZSTD_compressionParameters cParams;
+ ZSTD_frameParameters fParams;
+
+ int compressionLevel;
+ int forceWindow; /* force back-references to respect limit of
+ * 1<<wLog, even for dictionary */
+ size_t targetCBlockSize; /* Tries to fit compressed block size to be around targetCBlockSize.
+ * No target when targetCBlockSize == 0.
+ * There is no guarantee on compressed block size */
+ int srcSizeHint; /* User's best guess of source size.
+ * Hint is not valid when srcSizeHint == 0.
+ * There is no guarantee that hint is close to actual source size */
+
+ ZSTD_dictAttachPref_e attachDictPref;
+ ZSTD_paramSwitch_e literalCompressionMode;
+
+ /* Multithreading: used to pass parameters to mtctx */
+ int nbWorkers;
+ size_t jobSize;
+ int overlapLog;
+ int rsyncable;
+
+ /* Long distance matching parameters */
+ ldmParams_t ldmParams;
+
+ /* Dedicated dict search algorithm trigger */
+ int enableDedicatedDictSearch;
+
+ /* Input/output buffer modes */
+ ZSTD_bufferMode_e inBufferMode;
+ ZSTD_bufferMode_e outBufferMode;
+
+ /* Sequence compression API */
+ ZSTD_sequenceFormat_e blockDelimiters;
+ int validateSequences;
+
+ /* Block splitting */
+ ZSTD_paramSwitch_e useBlockSplitter;
+
+ /* Param for deciding whether to use row-based matchfinder */
+ ZSTD_paramSwitch_e useRowMatchFinder;
+
+ /* Always load a dictionary in ext-dict mode (not prefix mode)? */
+ int deterministicRefPrefix;
+
+ /* Internal use, for createCCtxParams() and freeCCtxParams() only */
+ ZSTD_customMem customMem;
+}; /* typedef'd to ZSTD_CCtx_params within "zstd.h" */
+
+#define COMPRESS_SEQUENCES_WORKSPACE_SIZE (sizeof(unsigned) * (MaxSeq + 2))
+#define ENTROPY_WORKSPACE_SIZE (HUF_WORKSPACE_SIZE + COMPRESS_SEQUENCES_WORKSPACE_SIZE)
+
+/*
+ * Indicates whether this compression proceeds directly from user-provided
+ * source buffer to user-provided destination buffer (ZSTDb_not_buffered), or
+ * whether the context needs to buffer the input/output (ZSTDb_buffered).
+ */
+typedef enum {
+ ZSTDb_not_buffered,
+ ZSTDb_buffered
+} ZSTD_buffered_policy_e;
+
+/*
+ * Struct that contains all elements of block splitter that should be allocated
+ * in a wksp.
+ */
+#define ZSTD_MAX_NB_BLOCK_SPLITS 196
+typedef struct {
+ seqStore_t fullSeqStoreChunk;
+ seqStore_t firstHalfSeqStore;
+ seqStore_t secondHalfSeqStore;
+ seqStore_t currSeqStore;
+ seqStore_t nextSeqStore;
+
+ U32 partitions[ZSTD_MAX_NB_BLOCK_SPLITS];
+ ZSTD_entropyCTablesMetadata_t entropyMetadata;
+} ZSTD_blockSplitCtx;
+
+struct ZSTD_CCtx_s {
+ ZSTD_compressionStage_e stage;
+ int cParamsChanged; /* == 1 if cParams(except wlog) or compression level are changed in requestedParams. Triggers transmission of new params to ZSTDMT (if available) then reset to 0. */
+ int bmi2; /* == 1 if the CPU supports BMI2 and 0 otherwise. CPU support is determined dynamically once per context lifetime. */
+ ZSTD_CCtx_params requestedParams;
+ ZSTD_CCtx_params appliedParams;
+ ZSTD_CCtx_params simpleApiParams; /* Param storage used by the simple API - not sticky. Must only be used in top-level simple API functions for storage. */
+ U32 dictID;
+ size_t dictContentSize;
+
+ ZSTD_cwksp workspace; /* manages buffer for dynamic allocations */
+ size_t blockSize;
+ unsigned long long pledgedSrcSizePlusOne; /* this way, 0 (default) == unknown */
+ unsigned long long consumedSrcSize;
+ unsigned long long producedCSize;
+ struct xxh64_state xxhState;
+ ZSTD_customMem customMem;
+ ZSTD_threadPool* pool;
+ size_t staticSize;
+ SeqCollector seqCollector;
+ int isFirstBlock;
+ int initialized;
+
+ seqStore_t seqStore; /* sequences storage ptrs */
+ ldmState_t ldmState; /* long distance matching state */
+ rawSeq* ldmSequences; /* Storage for the ldm output sequences */
+ size_t maxNbLdmSequences;
+ rawSeqStore_t externSeqStore; /* Mutable reference to external sequences */
+ ZSTD_blockState_t blockState;
+ U32* entropyWorkspace; /* entropy workspace of ENTROPY_WORKSPACE_SIZE bytes */
+
+ /* Whether we are streaming or not */
+ ZSTD_buffered_policy_e bufferedPolicy;
+
+ /* streaming */
+ char* inBuff;
+ size_t inBuffSize;
+ size_t inToCompress;
+ size_t inBuffPos;
+ size_t inBuffTarget;
+ char* outBuff;
+ size_t outBuffSize;
+ size_t outBuffContentSize;
+ size_t outBuffFlushedSize;
+ ZSTD_cStreamStage streamStage;
+ U32 frameEnded;
+
+ /* Stable in/out buffer verification */
+ ZSTD_inBuffer expectedInBuffer;
+ size_t expectedOutBufferSize;
+
+ /* Dictionary */
+ ZSTD_localDict localDict;
+ const ZSTD_CDict* cdict;
+ ZSTD_prefixDict prefixDict; /* single-usage dictionary */
+
+ /* Multi-threading */
+
+ /* Tracing */
+
+ /* Workspace for block splitter */
+ ZSTD_blockSplitCtx blockSplitCtx;
+};
+
+typedef enum { ZSTD_dtlm_fast, ZSTD_dtlm_full } ZSTD_dictTableLoadMethod_e;
+
+typedef enum {
+ ZSTD_noDict = 0,
+ ZSTD_extDict = 1,
+ ZSTD_dictMatchState = 2,
+ ZSTD_dedicatedDictSearch = 3
+} ZSTD_dictMode_e;
+
+typedef enum {
+ ZSTD_cpm_noAttachDict = 0, /* Compression with ZSTD_noDict or ZSTD_extDict.
+ * In this mode we use both the srcSize and the dictSize
+ * when selecting and adjusting parameters.
+ */
+ ZSTD_cpm_attachDict = 1, /* Compression with ZSTD_dictMatchState or ZSTD_dedicatedDictSearch.
+ * In this mode we only take the srcSize into account when selecting
+ * and adjusting parameters.
+ */
+ ZSTD_cpm_createCDict = 2, /* Creating a CDict.
+ * In this mode we take both the source size and the dictionary size
+ * into account when selecting and adjusting the parameters.
+ */
+ ZSTD_cpm_unknown = 3, /* ZSTD_getCParams, ZSTD_getParams, ZSTD_adjustParams.
+ * We don't know what these parameters are for. We default to the legacy
+ * behavior of taking both the source size and the dict size into account
+ * when selecting and adjusting parameters.
+ */
+} ZSTD_cParamMode_e;
+
+typedef size_t (*ZSTD_blockCompressor) (
+ ZSTD_matchState_t* bs, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+ZSTD_blockCompressor ZSTD_selectBlockCompressor(ZSTD_strategy strat, ZSTD_paramSwitch_e rowMatchfinderMode, ZSTD_dictMode_e dictMode);
+
+
+MEM_STATIC U32 ZSTD_LLcode(U32 litLength)
+{
+ static const BYTE LL_Code[64] = { 0, 1, 2, 3, 4, 5, 6, 7,
+ 8, 9, 10, 11, 12, 13, 14, 15,
+ 16, 16, 17, 17, 18, 18, 19, 19,
+ 20, 20, 20, 20, 21, 21, 21, 21,
+ 22, 22, 22, 22, 22, 22, 22, 22,
+ 23, 23, 23, 23, 23, 23, 23, 23,
+ 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24 };
+ static const U32 LL_deltaCode = 19;
+ return (litLength > 63) ? ZSTD_highbit32(litLength) + LL_deltaCode : LL_Code[litLength];
+}
+
+/* ZSTD_MLcode() :
+ * note : mlBase = matchLength - MINMATCH;
+ * because it's the format it's stored in seqStore->sequences */
+MEM_STATIC U32 ZSTD_MLcode(U32 mlBase)
+{
+ static const BYTE ML_Code[128] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
+ 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+ 32, 32, 33, 33, 34, 34, 35, 35, 36, 36, 36, 36, 37, 37, 37, 37,
+ 38, 38, 38, 38, 38, 38, 38, 38, 39, 39, 39, 39, 39, 39, 39, 39,
+ 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40,
+ 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41,
+ 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42,
+ 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42 };
+ static const U32 ML_deltaCode = 36;
+ return (mlBase > 127) ? ZSTD_highbit32(mlBase) + ML_deltaCode : ML_Code[mlBase];
+}
+
+/* ZSTD_cParam_withinBounds:
+ * @return 1 if value is within cParam bounds,
+ * 0 otherwise */
+MEM_STATIC int ZSTD_cParam_withinBounds(ZSTD_cParameter cParam, int value)
+{
+ ZSTD_bounds const bounds = ZSTD_cParam_getBounds(cParam);
+ if (ZSTD_isError(bounds.error)) return 0;
+ if (value < bounds.lowerBound) return 0;
+ if (value > bounds.upperBound) return 0;
+ return 1;
+}
+
+/* ZSTD_noCompressBlock() :
+ * Writes uncompressed block to dst buffer from given src.
+ * Returns the size of the block */
+MEM_STATIC size_t ZSTD_noCompressBlock (void* dst, size_t dstCapacity, const void* src, size_t srcSize, U32 lastBlock)
+{
+ U32 const cBlockHeader24 = lastBlock + (((U32)bt_raw)<<1) + (U32)(srcSize << 3);
+ RETURN_ERROR_IF(srcSize + ZSTD_blockHeaderSize > dstCapacity,
+ dstSize_tooSmall, "dst buf too small for uncompressed block");
+ MEM_writeLE24(dst, cBlockHeader24);
+ ZSTD_memcpy((BYTE*)dst + ZSTD_blockHeaderSize, src, srcSize);
+ return ZSTD_blockHeaderSize + srcSize;
+}
+
+MEM_STATIC size_t ZSTD_rleCompressBlock (void* dst, size_t dstCapacity, BYTE src, size_t srcSize, U32 lastBlock)
+{
+ BYTE* const op = (BYTE*)dst;
+ U32 const cBlockHeader = lastBlock + (((U32)bt_rle)<<1) + (U32)(srcSize << 3);
+ RETURN_ERROR_IF(dstCapacity < 4, dstSize_tooSmall, "");
+ MEM_writeLE24(op, cBlockHeader);
+ op[3] = src;
+ return 4;
+}
+
+
+/* ZSTD_minGain() :
+ * minimum compression required
+ * to generate a compress block or a compressed literals section.
+ * note : use same formula for both situations */
+MEM_STATIC size_t ZSTD_minGain(size_t srcSize, ZSTD_strategy strat)
+{
+ U32 const minlog = (strat>=ZSTD_btultra) ? (U32)(strat) - 1 : 6;
+ ZSTD_STATIC_ASSERT(ZSTD_btultra == 8);
+ assert(ZSTD_cParam_withinBounds(ZSTD_c_strategy, strat));
+ return (srcSize >> minlog) + 2;
+}
+
+MEM_STATIC int ZSTD_literalsCompressionIsDisabled(const ZSTD_CCtx_params* cctxParams)
+{
+ switch (cctxParams->literalCompressionMode) {
+ case ZSTD_ps_enable:
+ return 0;
+ case ZSTD_ps_disable:
+ return 1;
+ default:
+ assert(0 /* impossible: pre-validated */);
+ ZSTD_FALLTHROUGH;
+ case ZSTD_ps_auto:
+ return (cctxParams->cParams.strategy == ZSTD_fast) && (cctxParams->cParams.targetLength > 0);
+ }
+}
+
+/*! ZSTD_safecopyLiterals() :
+ * memcpy() function that won't read beyond more than WILDCOPY_OVERLENGTH bytes past ilimit_w.
+ * Only called when the sequence ends past ilimit_w, so it only needs to be optimized for single
+ * large copies.
+ */
+static void
+ZSTD_safecopyLiterals(BYTE* op, BYTE const* ip, BYTE const* const iend, BYTE const* ilimit_w)
+{
+ assert(iend > ilimit_w);
+ if (ip <= ilimit_w) {
+ ZSTD_wildcopy(op, ip, ilimit_w - ip, ZSTD_no_overlap);
+ op += ilimit_w - ip;
+ ip = ilimit_w;
+ }
+ while (ip < iend) *op++ = *ip++;
+}
+
+#define ZSTD_REP_MOVE (ZSTD_REP_NUM-1)
+#define STORE_REPCODE_1 STORE_REPCODE(1)
+#define STORE_REPCODE_2 STORE_REPCODE(2)
+#define STORE_REPCODE_3 STORE_REPCODE(3)
+#define STORE_REPCODE(r) (assert((r)>=1), assert((r)<=3), (r)-1)
+#define STORE_OFFSET(o) (assert((o)>0), o + ZSTD_REP_MOVE)
+#define STORED_IS_OFFSET(o) ((o) > ZSTD_REP_MOVE)
+#define STORED_IS_REPCODE(o) ((o) <= ZSTD_REP_MOVE)
+#define STORED_OFFSET(o) (assert(STORED_IS_OFFSET(o)), (o)-ZSTD_REP_MOVE)
+#define STORED_REPCODE(o) (assert(STORED_IS_REPCODE(o)), (o)+1) /* returns ID 1,2,3 */
+#define STORED_TO_OFFBASE(o) ((o)+1)
+#define OFFBASE_TO_STORED(o) ((o)-1)
+
+/*! ZSTD_storeSeq() :
+ * Store a sequence (litlen, litPtr, offCode and matchLength) into seqStore_t.
+ * @offBase_minus1 : Users should use employ macros STORE_REPCODE_X and STORE_OFFSET().
+ * @matchLength : must be >= MINMATCH
+ * Allowed to overread literals up to litLimit.
+*/
+HINT_INLINE UNUSED_ATTR void
+ZSTD_storeSeq(seqStore_t* seqStorePtr,
+ size_t litLength, const BYTE* literals, const BYTE* litLimit,
+ U32 offBase_minus1,
+ size_t matchLength)
+{
+ BYTE const* const litLimit_w = litLimit - WILDCOPY_OVERLENGTH;
+ BYTE const* const litEnd = literals + litLength;
+#if defined(DEBUGLEVEL) && (DEBUGLEVEL >= 6)
+ static const BYTE* g_start = NULL;
+ if (g_start==NULL) g_start = (const BYTE*)literals; /* note : index only works for compression within a single segment */
+ { U32 const pos = (U32)((const BYTE*)literals - g_start);
+ DEBUGLOG(6, "Cpos%7u :%3u literals, match%4u bytes at offCode%7u",
+ pos, (U32)litLength, (U32)matchLength, (U32)offBase_minus1);
+ }
+#endif
+ assert((size_t)(seqStorePtr->sequences - seqStorePtr->sequencesStart) < seqStorePtr->maxNbSeq);
+ /* copy Literals */
+ assert(seqStorePtr->maxNbLit <= 128 KB);
+ assert(seqStorePtr->lit + litLength <= seqStorePtr->litStart + seqStorePtr->maxNbLit);
+ assert(literals + litLength <= litLimit);
+ if (litEnd <= litLimit_w) {
+ /* Common case we can use wildcopy.
+ * First copy 16 bytes, because literals are likely short.
+ */
+ assert(WILDCOPY_OVERLENGTH >= 16);
+ ZSTD_copy16(seqStorePtr->lit, literals);
+ if (litLength > 16) {
+ ZSTD_wildcopy(seqStorePtr->lit+16, literals+16, (ptrdiff_t)litLength-16, ZSTD_no_overlap);
+ }
+ } else {
+ ZSTD_safecopyLiterals(seqStorePtr->lit, literals, litEnd, litLimit_w);
+ }
+ seqStorePtr->lit += litLength;
+
+ /* literal Length */
+ if (litLength>0xFFFF) {
+ assert(seqStorePtr->longLengthType == ZSTD_llt_none); /* there can only be a single long length */
+ seqStorePtr->longLengthType = ZSTD_llt_literalLength;
+ seqStorePtr->longLengthPos = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
+ }
+ seqStorePtr->sequences[0].litLength = (U16)litLength;
+
+ /* match offset */
+ seqStorePtr->sequences[0].offBase = STORED_TO_OFFBASE(offBase_minus1);
+
+ /* match Length */
+ assert(matchLength >= MINMATCH);
+ { size_t const mlBase = matchLength - MINMATCH;
+ if (mlBase>0xFFFF) {
+ assert(seqStorePtr->longLengthType == ZSTD_llt_none); /* there can only be a single long length */
+ seqStorePtr->longLengthType = ZSTD_llt_matchLength;
+ seqStorePtr->longLengthPos = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
+ }
+ seqStorePtr->sequences[0].mlBase = (U16)mlBase;
+ }
+
+ seqStorePtr->sequences++;
+}
+
+/* ZSTD_updateRep() :
+ * updates in-place @rep (array of repeat offsets)
+ * @offBase_minus1 : sum-type, with same numeric representation as ZSTD_storeSeq()
+ */
+MEM_STATIC void
+ZSTD_updateRep(U32 rep[ZSTD_REP_NUM], U32 const offBase_minus1, U32 const ll0)
+{
+ if (STORED_IS_OFFSET(offBase_minus1)) { /* full offset */
+ rep[2] = rep[1];
+ rep[1] = rep[0];
+ rep[0] = STORED_OFFSET(offBase_minus1);
+ } else { /* repcode */
+ U32 const repCode = STORED_REPCODE(offBase_minus1) - 1 + ll0;
+ if (repCode > 0) { /* note : if repCode==0, no change */
+ U32 const currentOffset = (repCode==ZSTD_REP_NUM) ? (rep[0] - 1) : rep[repCode];
+ rep[2] = (repCode >= 2) ? rep[1] : rep[2];
+ rep[1] = rep[0];
+ rep[0] = currentOffset;
+ } else { /* repCode == 0 */
+ /* nothing to do */
+ }
+ }
+}
+
+typedef struct repcodes_s {
+ U32 rep[3];
+} repcodes_t;
+
+MEM_STATIC repcodes_t
+ZSTD_newRep(U32 const rep[ZSTD_REP_NUM], U32 const offBase_minus1, U32 const ll0)
+{
+ repcodes_t newReps;
+ ZSTD_memcpy(&newReps, rep, sizeof(newReps));
+ ZSTD_updateRep(newReps.rep, offBase_minus1, ll0);
+ return newReps;
+}
+
+
+/*-*************************************
+* Match length counter
+***************************************/
+static unsigned ZSTD_NbCommonBytes (size_t val)
+{
+ if (MEM_isLittleEndian()) {
+ if (MEM_64bits()) {
+# if (__GNUC__ >= 4)
+ return (__builtin_ctzll((U64)val) >> 3);
+# else
+ static const int DeBruijnBytePos[64] = { 0, 0, 0, 0, 0, 1, 1, 2,
+ 0, 3, 1, 3, 1, 4, 2, 7,
+ 0, 2, 3, 6, 1, 5, 3, 5,
+ 1, 3, 4, 4, 2, 5, 6, 7,
+ 7, 0, 1, 2, 3, 3, 4, 6,
+ 2, 6, 5, 5, 3, 4, 5, 6,
+ 7, 1, 2, 4, 6, 4, 4, 5,
+ 7, 2, 6, 5, 7, 6, 7, 7 };
+ return DeBruijnBytePos[((U64)((val & -(long long)val) * 0x0218A392CDABBD3FULL)) >> 58];
+# endif
+ } else { /* 32 bits */
+# if (__GNUC__ >= 3)
+ return (__builtin_ctz((U32)val) >> 3);
+# else
+ static const int DeBruijnBytePos[32] = { 0, 0, 3, 0, 3, 1, 3, 0,
+ 3, 2, 2, 1, 3, 2, 0, 1,
+ 3, 3, 1, 2, 2, 2, 2, 0,
+ 3, 1, 2, 0, 1, 0, 1, 1 };
+ return DeBruijnBytePos[((U32)((val & -(S32)val) * 0x077CB531U)) >> 27];
+# endif
+ }
+ } else { /* Big Endian CPU */
+ if (MEM_64bits()) {
+# if (__GNUC__ >= 4)
+ return (__builtin_clzll(val) >> 3);
+# else
+ unsigned r;
+ const unsigned n32 = sizeof(size_t)*4; /* calculate this way due to compiler complaining in 32-bits mode */
+ if (!(val>>n32)) { r=4; } else { r=0; val>>=n32; }
+ if (!(val>>16)) { r+=2; val>>=8; } else { val>>=24; }
+ r += (!val);
+ return r;
+# endif
+ } else { /* 32 bits */
+# if (__GNUC__ >= 3)
+ return (__builtin_clz((U32)val) >> 3);
+# else
+ unsigned r;
+ if (!(val>>16)) { r=2; val>>=8; } else { r=0; val>>=24; }
+ r += (!val);
+ return r;
+# endif
+ } }
+}
+
+
+MEM_STATIC size_t ZSTD_count(const BYTE* pIn, const BYTE* pMatch, const BYTE* const pInLimit)
+{
+ const BYTE* const pStart = pIn;
+ const BYTE* const pInLoopLimit = pInLimit - (sizeof(size_t)-1);
+
+ if (pIn < pInLoopLimit) {
+ { size_t const diff = MEM_readST(pMatch) ^ MEM_readST(pIn);
+ if (diff) return ZSTD_NbCommonBytes(diff); }
+ pIn+=sizeof(size_t); pMatch+=sizeof(size_t);
+ while (pIn < pInLoopLimit) {
+ size_t const diff = MEM_readST(pMatch) ^ MEM_readST(pIn);
+ if (!diff) { pIn+=sizeof(size_t); pMatch+=sizeof(size_t); continue; }
+ pIn += ZSTD_NbCommonBytes(diff);
+ return (size_t)(pIn - pStart);
+ } }
+ if (MEM_64bits() && (pIn<(pInLimit-3)) && (MEM_read32(pMatch) == MEM_read32(pIn))) { pIn+=4; pMatch+=4; }
+ if ((pIn<(pInLimit-1)) && (MEM_read16(pMatch) == MEM_read16(pIn))) { pIn+=2; pMatch+=2; }
+ if ((pIn<pInLimit) && (*pMatch == *pIn)) pIn++;
+ return (size_t)(pIn - pStart);
+}
+
+/* ZSTD_count_2segments() :
+ * can count match length with `ip` & `match` in 2 different segments.
+ * convention : on reaching mEnd, match count continue starting from iStart
+ */
+MEM_STATIC size_t
+ZSTD_count_2segments(const BYTE* ip, const BYTE* match,
+ const BYTE* iEnd, const BYTE* mEnd, const BYTE* iStart)
+{
+ const BYTE* const vEnd = MIN( ip + (mEnd - match), iEnd);
+ size_t const matchLength = ZSTD_count(ip, match, vEnd);
+ if (match + matchLength != mEnd) return matchLength;
+ DEBUGLOG(7, "ZSTD_count_2segments: found a 2-parts match (current length==%zu)", matchLength);
+ DEBUGLOG(7, "distance from match beginning to end dictionary = %zi", mEnd - match);
+ DEBUGLOG(7, "distance from current pos to end buffer = %zi", iEnd - ip);
+ DEBUGLOG(7, "next byte : ip==%02X, istart==%02X", ip[matchLength], *iStart);
+ DEBUGLOG(7, "final match length = %zu", matchLength + ZSTD_count(ip+matchLength, iStart, iEnd));
+ return matchLength + ZSTD_count(ip+matchLength, iStart, iEnd);
+}
+
+
+/*-*************************************
+ * Hashes
+ ***************************************/
+static const U32 prime3bytes = 506832829U;
+static U32 ZSTD_hash3(U32 u, U32 h) { return ((u << (32-24)) * prime3bytes) >> (32-h) ; }
+MEM_STATIC size_t ZSTD_hash3Ptr(const void* ptr, U32 h) { return ZSTD_hash3(MEM_readLE32(ptr), h); } /* only in zstd_opt.h */
+
+static const U32 prime4bytes = 2654435761U;
+static U32 ZSTD_hash4(U32 u, U32 h) { return (u * prime4bytes) >> (32-h) ; }
+static size_t ZSTD_hash4Ptr(const void* ptr, U32 h) { return ZSTD_hash4(MEM_read32(ptr), h); }
+
+static const U64 prime5bytes = 889523592379ULL;
+static size_t ZSTD_hash5(U64 u, U32 h) { return (size_t)(((u << (64-40)) * prime5bytes) >> (64-h)) ; }
+static size_t ZSTD_hash5Ptr(const void* p, U32 h) { return ZSTD_hash5(MEM_readLE64(p), h); }
+
+static const U64 prime6bytes = 227718039650203ULL;
+static size_t ZSTD_hash6(U64 u, U32 h) { return (size_t)(((u << (64-48)) * prime6bytes) >> (64-h)) ; }
+static size_t ZSTD_hash6Ptr(const void* p, U32 h) { return ZSTD_hash6(MEM_readLE64(p), h); }
+
+static const U64 prime7bytes = 58295818150454627ULL;
+static size_t ZSTD_hash7(U64 u, U32 h) { return (size_t)(((u << (64-56)) * prime7bytes) >> (64-h)) ; }
+static size_t ZSTD_hash7Ptr(const void* p, U32 h) { return ZSTD_hash7(MEM_readLE64(p), h); }
+
+static const U64 prime8bytes = 0xCF1BBCDCB7A56463ULL;
+static size_t ZSTD_hash8(U64 u, U32 h) { return (size_t)(((u) * prime8bytes) >> (64-h)) ; }
+static size_t ZSTD_hash8Ptr(const void* p, U32 h) { return ZSTD_hash8(MEM_readLE64(p), h); }
+
+MEM_STATIC FORCE_INLINE_ATTR
+size_t ZSTD_hashPtr(const void* p, U32 hBits, U32 mls)
+{
+ switch(mls)
+ {
+ default:
+ case 4: return ZSTD_hash4Ptr(p, hBits);
+ case 5: return ZSTD_hash5Ptr(p, hBits);
+ case 6: return ZSTD_hash6Ptr(p, hBits);
+ case 7: return ZSTD_hash7Ptr(p, hBits);
+ case 8: return ZSTD_hash8Ptr(p, hBits);
+ }
+}
+
+/* ZSTD_ipow() :
+ * Return base^exponent.
+ */
+static U64 ZSTD_ipow(U64 base, U64 exponent)
+{
+ U64 power = 1;
+ while (exponent) {
+ if (exponent & 1) power *= base;
+ exponent >>= 1;
+ base *= base;
+ }
+ return power;
+}
+
+#define ZSTD_ROLL_HASH_CHAR_OFFSET 10
+
+/* ZSTD_rollingHash_append() :
+ * Add the buffer to the hash value.
+ */
+static U64 ZSTD_rollingHash_append(U64 hash, void const* buf, size_t size)
+{
+ BYTE const* istart = (BYTE const*)buf;
+ size_t pos;
+ for (pos = 0; pos < size; ++pos) {
+ hash *= prime8bytes;
+ hash += istart[pos] + ZSTD_ROLL_HASH_CHAR_OFFSET;
+ }
+ return hash;
+}
+
+/* ZSTD_rollingHash_compute() :
+ * Compute the rolling hash value of the buffer.
+ */
+MEM_STATIC U64 ZSTD_rollingHash_compute(void const* buf, size_t size)
+{
+ return ZSTD_rollingHash_append(0, buf, size);
+}
+
+/* ZSTD_rollingHash_primePower() :
+ * Compute the primePower to be passed to ZSTD_rollingHash_rotate() for a hash
+ * over a window of length bytes.
+ */
+MEM_STATIC U64 ZSTD_rollingHash_primePower(U32 length)
+{
+ return ZSTD_ipow(prime8bytes, length - 1);
+}
+
+/* ZSTD_rollingHash_rotate() :
+ * Rotate the rolling hash by one byte.
+ */
+MEM_STATIC U64 ZSTD_rollingHash_rotate(U64 hash, BYTE toRemove, BYTE toAdd, U64 primePower)
+{
+ hash -= (toRemove + ZSTD_ROLL_HASH_CHAR_OFFSET) * primePower;
+ hash *= prime8bytes;
+ hash += toAdd + ZSTD_ROLL_HASH_CHAR_OFFSET;
+ return hash;
+}
+
+/*-*************************************
+* Round buffer management
+***************************************/
+#if (ZSTD_WINDOWLOG_MAX_64 > 31)
+# error "ZSTD_WINDOWLOG_MAX is too large : would overflow ZSTD_CURRENT_MAX"
+#endif
+/* Max current allowed */
+#define ZSTD_CURRENT_MAX ((3U << 29) + (1U << ZSTD_WINDOWLOG_MAX))
+/* Maximum chunk size before overflow correction needs to be called again */
+#define ZSTD_CHUNKSIZE_MAX \
+ ( ((U32)-1) /* Maximum ending current index */ \
+ - ZSTD_CURRENT_MAX) /* Maximum beginning lowLimit */
+
+/*
+ * ZSTD_window_clear():
+ * Clears the window containing the history by simply setting it to empty.
+ */
+MEM_STATIC void ZSTD_window_clear(ZSTD_window_t* window)
+{
+ size_t const endT = (size_t)(window->nextSrc - window->base);
+ U32 const end = (U32)endT;
+
+ window->lowLimit = end;
+ window->dictLimit = end;
+}
+
+MEM_STATIC U32 ZSTD_window_isEmpty(ZSTD_window_t const window)
+{
+ return window.dictLimit == ZSTD_WINDOW_START_INDEX &&
+ window.lowLimit == ZSTD_WINDOW_START_INDEX &&
+ (window.nextSrc - window.base) == ZSTD_WINDOW_START_INDEX;
+}
+
+/*
+ * ZSTD_window_hasExtDict():
+ * Returns non-zero if the window has a non-empty extDict.
+ */
+MEM_STATIC U32 ZSTD_window_hasExtDict(ZSTD_window_t const window)
+{
+ return window.lowLimit < window.dictLimit;
+}
+
+/*
+ * ZSTD_matchState_dictMode():
+ * Inspects the provided matchState and figures out what dictMode should be
+ * passed to the compressor.
+ */
+MEM_STATIC ZSTD_dictMode_e ZSTD_matchState_dictMode(const ZSTD_matchState_t *ms)
+{
+ return ZSTD_window_hasExtDict(ms->window) ?
+ ZSTD_extDict :
+ ms->dictMatchState != NULL ?
+ (ms->dictMatchState->dedicatedDictSearch ? ZSTD_dedicatedDictSearch : ZSTD_dictMatchState) :
+ ZSTD_noDict;
+}
+
+/* Defining this macro to non-zero tells zstd to run the overflow correction
+ * code much more frequently. This is very inefficient, and should only be
+ * used for tests and fuzzers.
+ */
+#ifndef ZSTD_WINDOW_OVERFLOW_CORRECT_FREQUENTLY
+# ifdef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
+# define ZSTD_WINDOW_OVERFLOW_CORRECT_FREQUENTLY 1
+# else
+# define ZSTD_WINDOW_OVERFLOW_CORRECT_FREQUENTLY 0
+# endif
+#endif
+
+/*
+ * ZSTD_window_canOverflowCorrect():
+ * Returns non-zero if the indices are large enough for overflow correction
+ * to work correctly without impacting compression ratio.
+ */
+MEM_STATIC U32 ZSTD_window_canOverflowCorrect(ZSTD_window_t const window,
+ U32 cycleLog,
+ U32 maxDist,
+ U32 loadedDictEnd,
+ void const* src)
+{
+ U32 const cycleSize = 1u << cycleLog;
+ U32 const curr = (U32)((BYTE const*)src - window.base);
+ U32 const minIndexToOverflowCorrect = cycleSize
+ + MAX(maxDist, cycleSize)
+ + ZSTD_WINDOW_START_INDEX;
+
+ /* Adjust the min index to backoff the overflow correction frequency,
+ * so we don't waste too much CPU in overflow correction. If this
+ * computation overflows we don't really care, we just need to make
+ * sure it is at least minIndexToOverflowCorrect.
+ */
+ U32 const adjustment = window.nbOverflowCorrections + 1;
+ U32 const adjustedIndex = MAX(minIndexToOverflowCorrect * adjustment,
+ minIndexToOverflowCorrect);
+ U32 const indexLargeEnough = curr > adjustedIndex;
+
+ /* Only overflow correct early if the dictionary is invalidated already,
+ * so we don't hurt compression ratio.
+ */
+ U32 const dictionaryInvalidated = curr > maxDist + loadedDictEnd;
+
+ return indexLargeEnough && dictionaryInvalidated;
+}
+
+/*
+ * ZSTD_window_needOverflowCorrection():
+ * Returns non-zero if the indices are getting too large and need overflow
+ * protection.
+ */
+MEM_STATIC U32 ZSTD_window_needOverflowCorrection(ZSTD_window_t const window,
+ U32 cycleLog,
+ U32 maxDist,
+ U32 loadedDictEnd,
+ void const* src,
+ void const* srcEnd)
+{
+ U32 const curr = (U32)((BYTE const*)srcEnd - window.base);
+ if (ZSTD_WINDOW_OVERFLOW_CORRECT_FREQUENTLY) {
+ if (ZSTD_window_canOverflowCorrect(window, cycleLog, maxDist, loadedDictEnd, src)) {
+ return 1;
+ }
+ }
+ return curr > ZSTD_CURRENT_MAX;
+}
+
+/*
+ * ZSTD_window_correctOverflow():
+ * Reduces the indices to protect from index overflow.
+ * Returns the correction made to the indices, which must be applied to every
+ * stored index.
+ *
+ * The least significant cycleLog bits of the indices must remain the same,
+ * which may be 0. Every index up to maxDist in the past must be valid.
+ */
+MEM_STATIC U32 ZSTD_window_correctOverflow(ZSTD_window_t* window, U32 cycleLog,
+ U32 maxDist, void const* src)
+{
+ /* preemptive overflow correction:
+ * 1. correction is large enough:
+ * lowLimit > (3<<29) ==> current > 3<<29 + 1<<windowLog
+ * 1<<windowLog <= newCurrent < 1<<chainLog + 1<<windowLog
+ *
+ * current - newCurrent
+ * > (3<<29 + 1<<windowLog) - (1<<windowLog + 1<<chainLog)
+ * > (3<<29) - (1<<chainLog)
+ * > (3<<29) - (1<<30) (NOTE: chainLog <= 30)
+ * > 1<<29
+ *
+ * 2. (ip+ZSTD_CHUNKSIZE_MAX - cctx->base) doesn't overflow:
+ * After correction, current is less than (1<<chainLog + 1<<windowLog).
+ * In 64-bit mode we are safe, because we have 64-bit ptrdiff_t.
+ * In 32-bit mode we are safe, because (chainLog <= 29), so
+ * ip+ZSTD_CHUNKSIZE_MAX - cctx->base < 1<<32.
+ * 3. (cctx->lowLimit + 1<<windowLog) < 1<<32:
+ * windowLog <= 31 ==> 3<<29 + 1<<windowLog < 7<<29 < 1<<32.
+ */
+ U32 const cycleSize = 1u << cycleLog;
+ U32 const cycleMask = cycleSize - 1;
+ U32 const curr = (U32)((BYTE const*)src - window->base);
+ U32 const currentCycle = curr & cycleMask;
+ /* Ensure newCurrent - maxDist >= ZSTD_WINDOW_START_INDEX. */
+ U32 const currentCycleCorrection = currentCycle < ZSTD_WINDOW_START_INDEX
+ ? MAX(cycleSize, ZSTD_WINDOW_START_INDEX)
+ : 0;
+ U32 const newCurrent = currentCycle
+ + currentCycleCorrection
+ + MAX(maxDist, cycleSize);
+ U32 const correction = curr - newCurrent;
+ /* maxDist must be a power of two so that:
+ * (newCurrent & cycleMask) == (curr & cycleMask)
+ * This is required to not corrupt the chains / binary tree.
+ */
+ assert((maxDist & (maxDist - 1)) == 0);
+ assert((curr & cycleMask) == (newCurrent & cycleMask));
+ assert(curr > newCurrent);
+ if (!ZSTD_WINDOW_OVERFLOW_CORRECT_FREQUENTLY) {
+ /* Loose bound, should be around 1<<29 (see above) */
+ assert(correction > 1<<28);
+ }
+
+ window->base += correction;
+ window->dictBase += correction;
+ if (window->lowLimit < correction + ZSTD_WINDOW_START_INDEX) {
+ window->lowLimit = ZSTD_WINDOW_START_INDEX;
+ } else {
+ window->lowLimit -= correction;
+ }
+ if (window->dictLimit < correction + ZSTD_WINDOW_START_INDEX) {
+ window->dictLimit = ZSTD_WINDOW_START_INDEX;
+ } else {
+ window->dictLimit -= correction;
+ }
+
+ /* Ensure we can still reference the full window. */
+ assert(newCurrent >= maxDist);
+ assert(newCurrent - maxDist >= ZSTD_WINDOW_START_INDEX);
+ /* Ensure that lowLimit and dictLimit didn't underflow. */
+ assert(window->lowLimit <= newCurrent);
+ assert(window->dictLimit <= newCurrent);
+
+ ++window->nbOverflowCorrections;
+
+ DEBUGLOG(4, "Correction of 0x%x bytes to lowLimit=0x%x", correction,
+ window->lowLimit);
+ return correction;
+}
+
+/*
+ * ZSTD_window_enforceMaxDist():
+ * Updates lowLimit so that:
+ * (srcEnd - base) - lowLimit == maxDist + loadedDictEnd
+ *
+ * It ensures index is valid as long as index >= lowLimit.
+ * This must be called before a block compression call.
+ *
+ * loadedDictEnd is only defined if a dictionary is in use for current compression.
+ * As the name implies, loadedDictEnd represents the index at end of dictionary.
+ * The value lies within context's referential, it can be directly compared to blockEndIdx.
+ *
+ * If loadedDictEndPtr is NULL, no dictionary is in use, and we use loadedDictEnd == 0.
+ * If loadedDictEndPtr is not NULL, we set it to zero after updating lowLimit.
+ * This is because dictionaries are allowed to be referenced fully
+ * as long as the last byte of the dictionary is in the window.
+ * Once input has progressed beyond window size, dictionary cannot be referenced anymore.
+ *
+ * In normal dict mode, the dictionary lies between lowLimit and dictLimit.
+ * In dictMatchState mode, lowLimit and dictLimit are the same,
+ * and the dictionary is below them.
+ * forceWindow and dictMatchState are therefore incompatible.
+ */
+MEM_STATIC void
+ZSTD_window_enforceMaxDist(ZSTD_window_t* window,
+ const void* blockEnd,
+ U32 maxDist,
+ U32* loadedDictEndPtr,
+ const ZSTD_matchState_t** dictMatchStatePtr)
+{
+ U32 const blockEndIdx = (U32)((BYTE const*)blockEnd - window->base);
+ U32 const loadedDictEnd = (loadedDictEndPtr != NULL) ? *loadedDictEndPtr : 0;
+ DEBUGLOG(5, "ZSTD_window_enforceMaxDist: blockEndIdx=%u, maxDist=%u, loadedDictEnd=%u",
+ (unsigned)blockEndIdx, (unsigned)maxDist, (unsigned)loadedDictEnd);
+
+ /* - When there is no dictionary : loadedDictEnd == 0.
+ In which case, the test (blockEndIdx > maxDist) is merely to avoid
+ overflowing next operation `newLowLimit = blockEndIdx - maxDist`.
+ - When there is a standard dictionary :
+ Index referential is copied from the dictionary,
+ which means it starts from 0.
+ In which case, loadedDictEnd == dictSize,
+ and it makes sense to compare `blockEndIdx > maxDist + dictSize`
+ since `blockEndIdx` also starts from zero.
+ - When there is an attached dictionary :
+ loadedDictEnd is expressed within the referential of the context,
+ so it can be directly compared against blockEndIdx.
+ */
+ if (blockEndIdx > maxDist + loadedDictEnd) {
+ U32 const newLowLimit = blockEndIdx - maxDist;
+ if (window->lowLimit < newLowLimit) window->lowLimit = newLowLimit;
+ if (window->dictLimit < window->lowLimit) {
+ DEBUGLOG(5, "Update dictLimit to match lowLimit, from %u to %u",
+ (unsigned)window->dictLimit, (unsigned)window->lowLimit);
+ window->dictLimit = window->lowLimit;
+ }
+ /* On reaching window size, dictionaries are invalidated */
+ if (loadedDictEndPtr) *loadedDictEndPtr = 0;
+ if (dictMatchStatePtr) *dictMatchStatePtr = NULL;
+ }
+}
+
+/* Similar to ZSTD_window_enforceMaxDist(),
+ * but only invalidates dictionary
+ * when input progresses beyond window size.
+ * assumption : loadedDictEndPtr and dictMatchStatePtr are valid (non NULL)
+ * loadedDictEnd uses same referential as window->base
+ * maxDist is the window size */
+MEM_STATIC void
+ZSTD_checkDictValidity(const ZSTD_window_t* window,
+ const void* blockEnd,
+ U32 maxDist,
+ U32* loadedDictEndPtr,
+ const ZSTD_matchState_t** dictMatchStatePtr)
+{
+ assert(loadedDictEndPtr != NULL);
+ assert(dictMatchStatePtr != NULL);
+ { U32 const blockEndIdx = (U32)((BYTE const*)blockEnd - window->base);
+ U32 const loadedDictEnd = *loadedDictEndPtr;
+ DEBUGLOG(5, "ZSTD_checkDictValidity: blockEndIdx=%u, maxDist=%u, loadedDictEnd=%u",
+ (unsigned)blockEndIdx, (unsigned)maxDist, (unsigned)loadedDictEnd);
+ assert(blockEndIdx >= loadedDictEnd);
+
+ if (blockEndIdx > loadedDictEnd + maxDist) {
+ /* On reaching window size, dictionaries are invalidated.
+ * For simplification, if window size is reached anywhere within next block,
+ * the dictionary is invalidated for the full block.
+ */
+ DEBUGLOG(6, "invalidating dictionary for current block (distance > windowSize)");
+ *loadedDictEndPtr = 0;
+ *dictMatchStatePtr = NULL;
+ } else {
+ if (*loadedDictEndPtr != 0) {
+ DEBUGLOG(6, "dictionary considered valid for current block");
+ } } }
+}
+
+MEM_STATIC void ZSTD_window_init(ZSTD_window_t* window) {
+ ZSTD_memset(window, 0, sizeof(*window));
+ window->base = (BYTE const*)" ";
+ window->dictBase = (BYTE const*)" ";
+ ZSTD_STATIC_ASSERT(ZSTD_DUBT_UNSORTED_MARK < ZSTD_WINDOW_START_INDEX); /* Start above ZSTD_DUBT_UNSORTED_MARK */
+ window->dictLimit = ZSTD_WINDOW_START_INDEX; /* start from >0, so that 1st position is valid */
+ window->lowLimit = ZSTD_WINDOW_START_INDEX; /* it ensures first and later CCtx usages compress the same */
+ window->nextSrc = window->base + ZSTD_WINDOW_START_INDEX; /* see issue #1241 */
+ window->nbOverflowCorrections = 0;
+}
+
+/*
+ * ZSTD_window_update():
+ * Updates the window by appending [src, src + srcSize) to the window.
+ * If it is not contiguous, the current prefix becomes the extDict, and we
+ * forget about the extDict. Handles overlap of the prefix and extDict.
+ * Returns non-zero if the segment is contiguous.
+ */
+MEM_STATIC U32 ZSTD_window_update(ZSTD_window_t* window,
+ void const* src, size_t srcSize,
+ int forceNonContiguous)
+{
+ BYTE const* const ip = (BYTE const*)src;
+ U32 contiguous = 1;
+ DEBUGLOG(5, "ZSTD_window_update");
+ if (srcSize == 0)
+ return contiguous;
+ assert(window->base != NULL);
+ assert(window->dictBase != NULL);
+ /* Check if blocks follow each other */
+ if (src != window->nextSrc || forceNonContiguous) {
+ /* not contiguous */
+ size_t const distanceFromBase = (size_t)(window->nextSrc - window->base);
+ DEBUGLOG(5, "Non contiguous blocks, new segment starts at %u", window->dictLimit);
+ window->lowLimit = window->dictLimit;
+ assert(distanceFromBase == (size_t)(U32)distanceFromBase); /* should never overflow */
+ window->dictLimit = (U32)distanceFromBase;
+ window->dictBase = window->base;
+ window->base = ip - distanceFromBase;
+ /* ms->nextToUpdate = window->dictLimit; */
+ if (window->dictLimit - window->lowLimit < HASH_READ_SIZE) window->lowLimit = window->dictLimit; /* too small extDict */
+ contiguous = 0;
+ }
+ window->nextSrc = ip + srcSize;
+ /* if input and dictionary overlap : reduce dictionary (area presumed modified by input) */
+ if ( (ip+srcSize > window->dictBase + window->lowLimit)
+ & (ip < window->dictBase + window->dictLimit)) {
+ ptrdiff_t const highInputIdx = (ip + srcSize) - window->dictBase;
+ U32 const lowLimitMax = (highInputIdx > (ptrdiff_t)window->dictLimit) ? window->dictLimit : (U32)highInputIdx;
+ window->lowLimit = lowLimitMax;
+ DEBUGLOG(5, "Overlapping extDict and input : new lowLimit = %u", window->lowLimit);
+ }
+ return contiguous;
+}
+
+/*
+ * Returns the lowest allowed match index. It may either be in the ext-dict or the prefix.
+ */
+MEM_STATIC U32 ZSTD_getLowestMatchIndex(const ZSTD_matchState_t* ms, U32 curr, unsigned windowLog)
+{
+ U32 const maxDistance = 1U << windowLog;
+ U32 const lowestValid = ms->window.lowLimit;
+ U32 const withinWindow = (curr - lowestValid > maxDistance) ? curr - maxDistance : lowestValid;
+ U32 const isDictionary = (ms->loadedDictEnd != 0);
+ /* When using a dictionary the entire dictionary is valid if a single byte of the dictionary
+ * is within the window. We invalidate the dictionary (and set loadedDictEnd to 0) when it isn't
+ * valid for the entire block. So this check is sufficient to find the lowest valid match index.
+ */
+ U32 const matchLowest = isDictionary ? lowestValid : withinWindow;
+ return matchLowest;
+}
+
+/*
+ * Returns the lowest allowed match index in the prefix.
+ */
+MEM_STATIC U32 ZSTD_getLowestPrefixIndex(const ZSTD_matchState_t* ms, U32 curr, unsigned windowLog)
+{
+ U32 const maxDistance = 1U << windowLog;
+ U32 const lowestValid = ms->window.dictLimit;
+ U32 const withinWindow = (curr - lowestValid > maxDistance) ? curr - maxDistance : lowestValid;
+ U32 const isDictionary = (ms->loadedDictEnd != 0);
+ /* When computing the lowest prefix index we need to take the dictionary into account to handle
+ * the edge case where the dictionary and the source are contiguous in memory.
+ */
+ U32 const matchLowest = isDictionary ? lowestValid : withinWindow;
+ return matchLowest;
+}
+
+
+
+/* debug functions */
+#if (DEBUGLEVEL>=2)
+
+MEM_STATIC double ZSTD_fWeight(U32 rawStat)
+{
+ U32 const fp_accuracy = 8;
+ U32 const fp_multiplier = (1 << fp_accuracy);
+ U32 const newStat = rawStat + 1;
+ U32 const hb = ZSTD_highbit32(newStat);
+ U32 const BWeight = hb * fp_multiplier;
+ U32 const FWeight = (newStat << fp_accuracy) >> hb;
+ U32 const weight = BWeight + FWeight;
+ assert(hb + fp_accuracy < 31);
+ return (double)weight / fp_multiplier;
+}
+
+/* display a table content,
+ * listing each element, its frequency, and its predicted bit cost */
+MEM_STATIC void ZSTD_debugTable(const U32* table, U32 max)
+{
+ unsigned u, sum;
+ for (u=0, sum=0; u<=max; u++) sum += table[u];
+ DEBUGLOG(2, "total nb elts: %u", sum);
+ for (u=0; u<=max; u++) {
+ DEBUGLOG(2, "%2u: %5u (%.2f)",
+ u, table[u], ZSTD_fWeight(sum) - ZSTD_fWeight(table[u]) );
+ }
+}
+
+#endif
+
+
+
+/* ===============================================================
+ * Shared internal declarations
+ * These prototypes may be called from sources not in lib/compress
+ * =============================================================== */
+
+/* ZSTD_loadCEntropy() :
+ * dict : must point at beginning of a valid zstd dictionary.
+ * return : size of dictionary header (size of magic number + dict ID + entropy tables)
+ * assumptions : magic number supposed already checked
+ * and dictSize >= 8 */
+size_t ZSTD_loadCEntropy(ZSTD_compressedBlockState_t* bs, void* workspace,
+ const void* const dict, size_t dictSize);
+
+void ZSTD_reset_compressedBlockState(ZSTD_compressedBlockState_t* bs);
+
+/* ==============================================================
+ * Private declarations
+ * These prototypes shall only be called from within lib/compress
+ * ============================================================== */
+
+/* ZSTD_getCParamsFromCCtxParams() :
+ * cParams are built depending on compressionLevel, src size hints,
+ * LDM and manually set compression parameters.
+ * Note: srcSizeHint == 0 means 0!
+ */
+ZSTD_compressionParameters ZSTD_getCParamsFromCCtxParams(
+ const ZSTD_CCtx_params* CCtxParams, U64 srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode);
+
+/*! ZSTD_initCStream_internal() :
+ * Private use only. Init streaming operation.
+ * expects params to be valid.
+ * must receive dict, or cdict, or none, but not both.
+ * @return : 0, or an error code */
+size_t ZSTD_initCStream_internal(ZSTD_CStream* zcs,
+ const void* dict, size_t dictSize,
+ const ZSTD_CDict* cdict,
+ const ZSTD_CCtx_params* params, unsigned long long pledgedSrcSize);
+
+void ZSTD_resetSeqStore(seqStore_t* ssPtr);
+
+/*! ZSTD_getCParamsFromCDict() :
+ * as the name implies */
+ZSTD_compressionParameters ZSTD_getCParamsFromCDict(const ZSTD_CDict* cdict);
+
+/* ZSTD_compressBegin_advanced_internal() :
+ * Private use only. To be called from zstdmt_compress.c. */
+size_t ZSTD_compressBegin_advanced_internal(ZSTD_CCtx* cctx,
+ const void* dict, size_t dictSize,
+ ZSTD_dictContentType_e dictContentType,
+ ZSTD_dictTableLoadMethod_e dtlm,
+ const ZSTD_CDict* cdict,
+ const ZSTD_CCtx_params* params,
+ unsigned long long pledgedSrcSize);
+
+/* ZSTD_compress_advanced_internal() :
+ * Private use only. To be called from zstdmt_compress.c. */
+size_t ZSTD_compress_advanced_internal(ZSTD_CCtx* cctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ const void* dict,size_t dictSize,
+ const ZSTD_CCtx_params* params);
+
+
+/* ZSTD_writeLastEmptyBlock() :
+ * output an empty Block with end-of-frame mark to complete a frame
+ * @return : size of data written into `dst` (== ZSTD_blockHeaderSize (defined in zstd_internal.h))
+ * or an error code if `dstCapacity` is too small (<ZSTD_blockHeaderSize)
+ */
+size_t ZSTD_writeLastEmptyBlock(void* dst, size_t dstCapacity);
+
+
+/* ZSTD_referenceExternalSequences() :
+ * Must be called before starting a compression operation.
+ * seqs must parse a prefix of the source.
+ * This cannot be used when long range matching is enabled.
+ * Zstd will use these sequences, and pass the literals to a secondary block
+ * compressor.
+ * @return : An error code on failure.
+ * NOTE: seqs are not verified! Invalid sequences can cause out-of-bounds memory
+ * access and data corruption.
+ */
+size_t ZSTD_referenceExternalSequences(ZSTD_CCtx* cctx, rawSeq* seq, size_t nbSeq);
+
+/* ZSTD_cycleLog() :
+ * condition for correct operation : hashLog > 1 */
+U32 ZSTD_cycleLog(U32 hashLog, ZSTD_strategy strat);
+
+/* ZSTD_CCtx_trace() :
+ * Trace the end of a compression call.
+ */
+void ZSTD_CCtx_trace(ZSTD_CCtx* cctx, size_t extraCSize);
+
+#endif /* ZSTD_COMPRESS_H */
diff --git a/lib/zstd/compress/zstd_compress_literals.c b/lib/zstd/compress/zstd_compress_literals.c
new file mode 100644
index 000000000000..52b0a8059aba
--- /dev/null
+++ b/lib/zstd/compress/zstd_compress_literals.c
@@ -0,0 +1,159 @@
+/*
+ * Copyright (c) Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+ /*-*************************************
+ * Dependencies
+ ***************************************/
+#include "zstd_compress_literals.h"
+
+size_t ZSTD_noCompressLiterals (void* dst, size_t dstCapacity, const void* src, size_t srcSize)
+{
+ BYTE* const ostart = (BYTE*)dst;
+ U32 const flSize = 1 + (srcSize>31) + (srcSize>4095);
+
+ RETURN_ERROR_IF(srcSize + flSize > dstCapacity, dstSize_tooSmall, "");
+
+ switch(flSize)
+ {
+ case 1: /* 2 - 1 - 5 */
+ ostart[0] = (BYTE)((U32)set_basic + (srcSize<<3));
+ break;
+ case 2: /* 2 - 2 - 12 */
+ MEM_writeLE16(ostart, (U16)((U32)set_basic + (1<<2) + (srcSize<<4)));
+ break;
+ case 3: /* 2 - 2 - 20 */
+ MEM_writeLE32(ostart, (U32)((U32)set_basic + (3<<2) + (srcSize<<4)));
+ break;
+ default: /* not necessary : flSize is {1,2,3} */
+ assert(0);
+ }
+
+ ZSTD_memcpy(ostart + flSize, src, srcSize);
+ DEBUGLOG(5, "Raw literals: %u -> %u", (U32)srcSize, (U32)(srcSize + flSize));
+ return srcSize + flSize;
+}
+
+size_t ZSTD_compressRleLiteralsBlock (void* dst, size_t dstCapacity, const void* src, size_t srcSize)
+{
+ BYTE* const ostart = (BYTE*)dst;
+ U32 const flSize = 1 + (srcSize>31) + (srcSize>4095);
+
+ (void)dstCapacity; /* dstCapacity already guaranteed to be >=4, hence large enough */
+
+ switch(flSize)
+ {
+ case 1: /* 2 - 1 - 5 */
+ ostart[0] = (BYTE)((U32)set_rle + (srcSize<<3));
+ break;
+ case 2: /* 2 - 2 - 12 */
+ MEM_writeLE16(ostart, (U16)((U32)set_rle + (1<<2) + (srcSize<<4)));
+ break;
+ case 3: /* 2 - 2 - 20 */
+ MEM_writeLE32(ostart, (U32)((U32)set_rle + (3<<2) + (srcSize<<4)));
+ break;
+ default: /* not necessary : flSize is {1,2,3} */
+ assert(0);
+ }
+
+ ostart[flSize] = *(const BYTE*)src;
+ DEBUGLOG(5, "RLE literals: %u -> %u", (U32)srcSize, (U32)flSize + 1);
+ return flSize+1;
+}
+
+size_t ZSTD_compressLiterals (ZSTD_hufCTables_t const* prevHuf,
+ ZSTD_hufCTables_t* nextHuf,
+ ZSTD_strategy strategy, int disableLiteralCompression,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ void* entropyWorkspace, size_t entropyWorkspaceSize,
+ const int bmi2,
+ unsigned suspectUncompressible)
+{
+ size_t const minGain = ZSTD_minGain(srcSize, strategy);
+ size_t const lhSize = 3 + (srcSize >= 1 KB) + (srcSize >= 16 KB);
+ BYTE* const ostart = (BYTE*)dst;
+ U32 singleStream = srcSize < 256;
+ symbolEncodingType_e hType = set_compressed;
+ size_t cLitSize;
+
+ DEBUGLOG(5,"ZSTD_compressLiterals (disableLiteralCompression=%i srcSize=%u)",
+ disableLiteralCompression, (U32)srcSize);
+
+ /* Prepare nextEntropy assuming reusing the existing table */
+ ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
+
+ if (disableLiteralCompression)
+ return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize);
+
+ /* small ? don't even attempt compression (speed opt) */
+# define COMPRESS_LITERALS_SIZE_MIN 63
+ { size_t const minLitSize = (prevHuf->repeatMode == HUF_repeat_valid) ? 6 : COMPRESS_LITERALS_SIZE_MIN;
+ if (srcSize <= minLitSize) return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize);
+ }
+
+ RETURN_ERROR_IF(dstCapacity < lhSize+1, dstSize_tooSmall, "not enough space for compression");
+ { HUF_repeat repeat = prevHuf->repeatMode;
+ int const preferRepeat = strategy < ZSTD_lazy ? srcSize <= 1024 : 0;
+ if (repeat == HUF_repeat_valid && lhSize == 3) singleStream = 1;
+ cLitSize = singleStream ?
+ HUF_compress1X_repeat(
+ ostart+lhSize, dstCapacity-lhSize, src, srcSize,
+ HUF_SYMBOLVALUE_MAX, HUF_TABLELOG_DEFAULT, entropyWorkspace, entropyWorkspaceSize,
+ (HUF_CElt*)nextHuf->CTable, &repeat, preferRepeat, bmi2, suspectUncompressible) :
+ HUF_compress4X_repeat(
+ ostart+lhSize, dstCapacity-lhSize, src, srcSize,
+ HUF_SYMBOLVALUE_MAX, HUF_TABLELOG_DEFAULT, entropyWorkspace, entropyWorkspaceSize,
+ (HUF_CElt*)nextHuf->CTable, &repeat, preferRepeat, bmi2, suspectUncompressible);
+ if (repeat != HUF_repeat_none) {
+ /* reused the existing table */
+ DEBUGLOG(5, "Reusing previous huffman table");
+ hType = set_repeat;
+ }
+ }
+
+ if ((cLitSize==0) || (cLitSize >= srcSize - minGain) || ERR_isError(cLitSize)) {
+ ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
+ return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize);
+ }
+ if (cLitSize==1) {
+ ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
+ return ZSTD_compressRleLiteralsBlock(dst, dstCapacity, src, srcSize);
+ }
+
+ if (hType == set_compressed) {
+ /* using a newly constructed table */
+ nextHuf->repeatMode = HUF_repeat_check;
+ }
+
+ /* Build header */
+ switch(lhSize)
+ {
+ case 3: /* 2 - 2 - 10 - 10 */
+ { U32 const lhc = hType + ((!singleStream) << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<14);
+ MEM_writeLE24(ostart, lhc);
+ break;
+ }
+ case 4: /* 2 - 2 - 14 - 14 */
+ { U32 const lhc = hType + (2 << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<18);
+ MEM_writeLE32(ostart, lhc);
+ break;
+ }
+ case 5: /* 2 - 2 - 18 - 18 */
+ { U32 const lhc = hType + (3 << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<22);
+ MEM_writeLE32(ostart, lhc);
+ ostart[4] = (BYTE)(cLitSize >> 10);
+ break;
+ }
+ default: /* not possible : lhSize is {3,4,5} */
+ assert(0);
+ }
+ DEBUGLOG(5, "Compressed literals: %u -> %u", (U32)srcSize, (U32)(lhSize+cLitSize));
+ return lhSize+cLitSize;
+}
diff --git a/lib/zstd/compress/zstd_compress_literals.h b/lib/zstd/compress/zstd_compress_literals.h
new file mode 100644
index 000000000000..9775fb97cb70
--- /dev/null
+++ b/lib/zstd/compress/zstd_compress_literals.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#ifndef ZSTD_COMPRESS_LITERALS_H
+#define ZSTD_COMPRESS_LITERALS_H
+
+#include "zstd_compress_internal.h" /* ZSTD_hufCTables_t, ZSTD_minGain() */
+
+
+size_t ZSTD_noCompressLiterals (void* dst, size_t dstCapacity, const void* src, size_t srcSize);
+
+size_t ZSTD_compressRleLiteralsBlock (void* dst, size_t dstCapacity, const void* src, size_t srcSize);
+
+/* If suspectUncompressible then some sampling checks will be run to potentially skip huffman coding */
+size_t ZSTD_compressLiterals (ZSTD_hufCTables_t const* prevHuf,
+ ZSTD_hufCTables_t* nextHuf,
+ ZSTD_strategy strategy, int disableLiteralCompression,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ void* entropyWorkspace, size_t entropyWorkspaceSize,
+ const int bmi2,
+ unsigned suspectUncompressible);
+
+#endif /* ZSTD_COMPRESS_LITERALS_H */
diff --git a/lib/zstd/compress/zstd_compress_sequences.c b/lib/zstd/compress/zstd_compress_sequences.c
new file mode 100644
index 000000000000..21ddc1b37acf
--- /dev/null
+++ b/lib/zstd/compress/zstd_compress_sequences.c
@@ -0,0 +1,442 @@
+/*
+ * Copyright (c) Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+ /*-*************************************
+ * Dependencies
+ ***************************************/
+#include "zstd_compress_sequences.h"
+
+/*
+ * -log2(x / 256) lookup table for x in [0, 256).
+ * If x == 0: Return 0
+ * Else: Return floor(-log2(x / 256) * 256)
+ */
+static unsigned const kInverseProbabilityLog256[256] = {
+ 0, 2048, 1792, 1642, 1536, 1453, 1386, 1329, 1280, 1236, 1197, 1162,
+ 1130, 1100, 1073, 1047, 1024, 1001, 980, 960, 941, 923, 906, 889,
+ 874, 859, 844, 830, 817, 804, 791, 779, 768, 756, 745, 734,
+ 724, 714, 704, 694, 685, 676, 667, 658, 650, 642, 633, 626,
+ 618, 610, 603, 595, 588, 581, 574, 567, 561, 554, 548, 542,
+ 535, 529, 523, 517, 512, 506, 500, 495, 489, 484, 478, 473,
+ 468, 463, 458, 453, 448, 443, 438, 434, 429, 424, 420, 415,
+ 411, 407, 402, 398, 394, 390, 386, 382, 377, 373, 370, 366,
+ 362, 358, 354, 350, 347, 343, 339, 336, 332, 329, 325, 322,
+ 318, 315, 311, 308, 305, 302, 298, 295, 292, 289, 286, 282,
+ 279, 276, 273, 270, 267, 264, 261, 258, 256, 253, 250, 247,
+ 244, 241, 239, 236, 233, 230, 228, 225, 222, 220, 217, 215,
+ 212, 209, 207, 204, 202, 199, 197, 194, 192, 190, 187, 185,
+ 182, 180, 178, 175, 173, 171, 168, 166, 164, 162, 159, 157,
+ 155, 153, 151, 149, 146, 144, 142, 140, 138, 136, 134, 132,
+ 130, 128, 126, 123, 121, 119, 117, 115, 114, 112, 110, 108,
+ 106, 104, 102, 100, 98, 96, 94, 93, 91, 89, 87, 85,
+ 83, 82, 80, 78, 76, 74, 73, 71, 69, 67, 66, 64,
+ 62, 61, 59, 57, 55, 54, 52, 50, 49, 47, 46, 44,
+ 42, 41, 39, 37, 36, 34, 33, 31, 30, 28, 26, 25,
+ 23, 22, 20, 19, 17, 16, 14, 13, 11, 10, 8, 7,
+ 5, 4, 2, 1,
+};
+
+static unsigned ZSTD_getFSEMaxSymbolValue(FSE_CTable const* ctable) {
+ void const* ptr = ctable;
+ U16 const* u16ptr = (U16 const*)ptr;
+ U32 const maxSymbolValue = MEM_read16(u16ptr + 1);
+ return maxSymbolValue;
+}
+
+/*
+ * Returns true if we should use ncount=-1 else we should
+ * use ncount=1 for low probability symbols instead.
+ */
+static unsigned ZSTD_useLowProbCount(size_t const nbSeq)
+{
+ /* Heuristic: This should cover most blocks <= 16K and
+ * start to fade out after 16K to about 32K depending on
+ * comprssibility.
+ */
+ return nbSeq >= 2048;
+}
+
+/*
+ * Returns the cost in bytes of encoding the normalized count header.
+ * Returns an error if any of the helper functions return an error.
+ */
+static size_t ZSTD_NCountCost(unsigned const* count, unsigned const max,
+ size_t const nbSeq, unsigned const FSELog)
+{
+ BYTE wksp[FSE_NCOUNTBOUND];
+ S16 norm[MaxSeq + 1];
+ const U32 tableLog = FSE_optimalTableLog(FSELog, nbSeq, max);
+ FORWARD_IF_ERROR(FSE_normalizeCount(norm, tableLog, count, nbSeq, max, ZSTD_useLowProbCount(nbSeq)), "");
+ return FSE_writeNCount(wksp, sizeof(wksp), norm, max, tableLog);
+}
+
+/*
+ * Returns the cost in bits of encoding the distribution described by count
+ * using the entropy bound.
+ */
+static size_t ZSTD_entropyCost(unsigned const* count, unsigned const max, size_t const total)
+{
+ unsigned cost = 0;
+ unsigned s;
+
+ assert(total > 0);
+ for (s = 0; s <= max; ++s) {
+ unsigned norm = (unsigned)((256 * count[s]) / total);
+ if (count[s] != 0 && norm == 0)
+ norm = 1;
+ assert(count[s] < total);
+ cost += count[s] * kInverseProbabilityLog256[norm];
+ }
+ return cost >> 8;
+}
+
+/*
+ * Returns the cost in bits of encoding the distribution in count using ctable.
+ * Returns an error if ctable cannot represent all the symbols in count.
+ */
+size_t ZSTD_fseBitCost(
+ FSE_CTable const* ctable,
+ unsigned const* count,
+ unsigned const max)
+{
+ unsigned const kAccuracyLog = 8;
+ size_t cost = 0;
+ unsigned s;
+ FSE_CState_t cstate;
+ FSE_initCState(&cstate, ctable);
+ if (ZSTD_getFSEMaxSymbolValue(ctable) < max) {
+ DEBUGLOG(5, "Repeat FSE_CTable has maxSymbolValue %u < %u",
+ ZSTD_getFSEMaxSymbolValue(ctable), max);
+ return ERROR(GENERIC);
+ }
+ for (s = 0; s <= max; ++s) {
+ unsigned const tableLog = cstate.stateLog;
+ unsigned const badCost = (tableLog + 1) << kAccuracyLog;
+ unsigned const bitCost = FSE_bitCost(cstate.symbolTT, tableLog, s, kAccuracyLog);
+ if (count[s] == 0)
+ continue;
+ if (bitCost >= badCost) {
+ DEBUGLOG(5, "Repeat FSE_CTable has Prob[%u] == 0", s);
+ return ERROR(GENERIC);
+ }
+ cost += (size_t)count[s] * bitCost;
+ }
+ return cost >> kAccuracyLog;
+}
+
+/*
+ * Returns the cost in bits of encoding the distribution in count using the
+ * table described by norm. The max symbol support by norm is assumed >= max.
+ * norm must be valid for every symbol with non-zero probability in count.
+ */
+size_t ZSTD_crossEntropyCost(short const* norm, unsigned accuracyLog,
+ unsigned const* count, unsigned const max)
+{
+ unsigned const shift = 8 - accuracyLog;
+ size_t cost = 0;
+ unsigned s;
+ assert(accuracyLog <= 8);
+ for (s = 0; s <= max; ++s) {
+ unsigned const normAcc = (norm[s] != -1) ? (unsigned)norm[s] : 1;
+ unsigned const norm256 = normAcc << shift;
+ assert(norm256 > 0);
+ assert(norm256 < 256);
+ cost += count[s] * kInverseProbabilityLog256[norm256];
+ }
+ return cost >> 8;
+}
+
+symbolEncodingType_e
+ZSTD_selectEncodingType(
+ FSE_repeat* repeatMode, unsigned const* count, unsigned const max,
+ size_t const mostFrequent, size_t nbSeq, unsigned const FSELog,
+ FSE_CTable const* prevCTable,
+ short const* defaultNorm, U32 defaultNormLog,
+ ZSTD_defaultPolicy_e const isDefaultAllowed,
+ ZSTD_strategy const strategy)
+{
+ ZSTD_STATIC_ASSERT(ZSTD_defaultDisallowed == 0 && ZSTD_defaultAllowed != 0);
+ if (mostFrequent == nbSeq) {
+ *repeatMode = FSE_repeat_none;
+ if (isDefaultAllowed && nbSeq <= 2) {
+ /* Prefer set_basic over set_rle when there are 2 or less symbols,
+ * since RLE uses 1 byte, but set_basic uses 5-6 bits per symbol.
+ * If basic encoding isn't possible, always choose RLE.
+ */
+ DEBUGLOG(5, "Selected set_basic");
+ return set_basic;
+ }
+ DEBUGLOG(5, "Selected set_rle");
+ return set_rle;
+ }
+ if (strategy < ZSTD_lazy) {
+ if (isDefaultAllowed) {
+ size_t const staticFse_nbSeq_max = 1000;
+ size_t const mult = 10 - strategy;
+ size_t const baseLog = 3;
+ size_t const dynamicFse_nbSeq_min = (((size_t)1 << defaultNormLog) * mult) >> baseLog; /* 28-36 for offset, 56-72 for lengths */
+ assert(defaultNormLog >= 5 && defaultNormLog <= 6); /* xx_DEFAULTNORMLOG */
+ assert(mult <= 9 && mult >= 7);
+ if ( (*repeatMode == FSE_repeat_valid)
+ && (nbSeq < staticFse_nbSeq_max) ) {
+ DEBUGLOG(5, "Selected set_repeat");
+ return set_repeat;
+ }
+ if ( (nbSeq < dynamicFse_nbSeq_min)
+ || (mostFrequent < (nbSeq >> (defaultNormLog-1))) ) {
+ DEBUGLOG(5, "Selected set_basic");
+ /* The format allows default tables to be repeated, but it isn't useful.
+ * When using simple heuristics to select encoding type, we don't want
+ * to confuse these tables with dictionaries. When running more careful
+ * analysis, we don't need to waste time checking both repeating tables
+ * and default tables.
+ */
+ *repeatMode = FSE_repeat_none;
+ return set_basic;
+ }
+ }
+ } else {
+ size_t const basicCost = isDefaultAllowed ? ZSTD_crossEntropyCost(defaultNorm, defaultNormLog, count, max) : ERROR(GENERIC);
+ size_t const repeatCost = *repeatMode != FSE_repeat_none ? ZSTD_fseBitCost(prevCTable, count, max) : ERROR(GENERIC);
+ size_t const NCountCost = ZSTD_NCountCost(count, max, nbSeq, FSELog);
+ size_t const compressedCost = (NCountCost << 3) + ZSTD_entropyCost(count, max, nbSeq);
+
+ if (isDefaultAllowed) {
+ assert(!ZSTD_isError(basicCost));
+ assert(!(*repeatMode == FSE_repeat_valid && ZSTD_isError(repeatCost)));
+ }
+ assert(!ZSTD_isError(NCountCost));
+ assert(compressedCost < ERROR(maxCode));
+ DEBUGLOG(5, "Estimated bit costs: basic=%u\trepeat=%u\tcompressed=%u",
+ (unsigned)basicCost, (unsigned)repeatCost, (unsigned)compressedCost);
+ if (basicCost <= repeatCost && basicCost <= compressedCost) {
+ DEBUGLOG(5, "Selected set_basic");
+ assert(isDefaultAllowed);
+ *repeatMode = FSE_repeat_none;
+ return set_basic;
+ }
+ if (repeatCost <= compressedCost) {
+ DEBUGLOG(5, "Selected set_repeat");
+ assert(!ZSTD_isError(repeatCost));
+ return set_repeat;
+ }
+ assert(compressedCost < basicCost && compressedCost < repeatCost);
+ }
+ DEBUGLOG(5, "Selected set_compressed");
+ *repeatMode = FSE_repeat_check;
+ return set_compressed;
+}
+
+typedef struct {
+ S16 norm[MaxSeq + 1];
+ U32 wksp[FSE_BUILD_CTABLE_WORKSPACE_SIZE_U32(MaxSeq, MaxFSELog)];
+} ZSTD_BuildCTableWksp;
+
+size_t
+ZSTD_buildCTable(void* dst, size_t dstCapacity,
+ FSE_CTable* nextCTable, U32 FSELog, symbolEncodingType_e type,
+ unsigned* count, U32 max,
+ const BYTE* codeTable, size_t nbSeq,
+ const S16* defaultNorm, U32 defaultNormLog, U32 defaultMax,
+ const FSE_CTable* prevCTable, size_t prevCTableSize,
+ void* entropyWorkspace, size_t entropyWorkspaceSize)
+{
+ BYTE* op = (BYTE*)dst;
+ const BYTE* const oend = op + dstCapacity;
+ DEBUGLOG(6, "ZSTD_buildCTable (dstCapacity=%u)", (unsigned)dstCapacity);
+
+ switch (type) {
+ case set_rle:
+ FORWARD_IF_ERROR(FSE_buildCTable_rle(nextCTable, (BYTE)max), "");
+ RETURN_ERROR_IF(dstCapacity==0, dstSize_tooSmall, "not enough space");
+ *op = codeTable[0];
+ return 1;
+ case set_repeat:
+ ZSTD_memcpy(nextCTable, prevCTable, prevCTableSize);
+ return 0;
+ case set_basic:
+ FORWARD_IF_ERROR(FSE_buildCTable_wksp(nextCTable, defaultNorm, defaultMax, defaultNormLog, entropyWorkspace, entropyWorkspaceSize), ""); /* note : could be pre-calculated */
+ return 0;
+ case set_compressed: {
+ ZSTD_BuildCTableWksp* wksp = (ZSTD_BuildCTableWksp*)entropyWorkspace;
+ size_t nbSeq_1 = nbSeq;
+ const U32 tableLog = FSE_optimalTableLog(FSELog, nbSeq, max);
+ if (count[codeTable[nbSeq-1]] > 1) {
+ count[codeTable[nbSeq-1]]--;
+ nbSeq_1--;
+ }
+ assert(nbSeq_1 > 1);
+ assert(entropyWorkspaceSize >= sizeof(ZSTD_BuildCTableWksp));
+ (void)entropyWorkspaceSize;
+ FORWARD_IF_ERROR(FSE_normalizeCount(wksp->norm, tableLog, count, nbSeq_1, max, ZSTD_useLowProbCount(nbSeq_1)), "FSE_normalizeCount failed");
+ assert(oend >= op);
+ { size_t const NCountSize = FSE_writeNCount(op, (size_t)(oend - op), wksp->norm, max, tableLog); /* overflow protected */
+ FORWARD_IF_ERROR(NCountSize, "FSE_writeNCount failed");
+ FORWARD_IF_ERROR(FSE_buildCTable_wksp(nextCTable, wksp->norm, max, tableLog, wksp->wksp, sizeof(wksp->wksp)), "FSE_buildCTable_wksp failed");
+ return NCountSize;
+ }
+ }
+ default: assert(0); RETURN_ERROR(GENERIC, "impossible to reach");
+ }
+}
+
+FORCE_INLINE_TEMPLATE size_t
+ZSTD_encodeSequences_body(
+ void* dst, size_t dstCapacity,
+ FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable,
+ FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable,
+ FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable,
+ seqDef const* sequences, size_t nbSeq, int longOffsets)
+{
+ BIT_CStream_t blockStream;
+ FSE_CState_t stateMatchLength;
+ FSE_CState_t stateOffsetBits;
+ FSE_CState_t stateLitLength;
+
+ RETURN_ERROR_IF(
+ ERR_isError(BIT_initCStream(&blockStream, dst, dstCapacity)),
+ dstSize_tooSmall, "not enough space remaining");
+ DEBUGLOG(6, "available space for bitstream : %i (dstCapacity=%u)",
+ (int)(blockStream.endPtr - blockStream.startPtr),
+ (unsigned)dstCapacity);
+
+ /* first symbols */
+ FSE_initCState2(&stateMatchLength, CTable_MatchLength, mlCodeTable[nbSeq-1]);
+ FSE_initCState2(&stateOffsetBits, CTable_OffsetBits, ofCodeTable[nbSeq-1]);
+ FSE_initCState2(&stateLitLength, CTable_LitLength, llCodeTable[nbSeq-1]);
+ BIT_addBits(&blockStream, sequences[nbSeq-1].litLength, LL_bits[llCodeTable[nbSeq-1]]);
+ if (MEM_32bits()) BIT_flushBits(&blockStream);
+ BIT_addBits(&blockStream, sequences[nbSeq-1].mlBase, ML_bits[mlCodeTable[nbSeq-1]]);
+ if (MEM_32bits()) BIT_flushBits(&blockStream);
+ if (longOffsets) {
+ U32 const ofBits = ofCodeTable[nbSeq-1];
+ unsigned const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN-1);
+ if (extraBits) {
+ BIT_addBits(&blockStream, sequences[nbSeq-1].offBase, extraBits);
+ BIT_flushBits(&blockStream);
+ }
+ BIT_addBits(&blockStream, sequences[nbSeq-1].offBase >> extraBits,
+ ofBits - extraBits);
+ } else {
+ BIT_addBits(&blockStream, sequences[nbSeq-1].offBase, ofCodeTable[nbSeq-1]);
+ }
+ BIT_flushBits(&blockStream);
+
+ { size_t n;
+ for (n=nbSeq-2 ; n<nbSeq ; n--) { /* intentional underflow */
+ BYTE const llCode = llCodeTable[n];
+ BYTE const ofCode = ofCodeTable[n];
+ BYTE const mlCode = mlCodeTable[n];
+ U32 const llBits = LL_bits[llCode];
+ U32 const ofBits = ofCode;
+ U32 const mlBits = ML_bits[mlCode];
+ DEBUGLOG(6, "encoding: litlen:%2u - matchlen:%2u - offCode:%7u",
+ (unsigned)sequences[n].litLength,
+ (unsigned)sequences[n].mlBase + MINMATCH,
+ (unsigned)sequences[n].offBase);
+ /* 32b*/ /* 64b*/
+ /* (7)*/ /* (7)*/
+ FSE_encodeSymbol(&blockStream, &stateOffsetBits, ofCode); /* 15 */ /* 15 */
+ FSE_encodeSymbol(&blockStream, &stateMatchLength, mlCode); /* 24 */ /* 24 */
+ if (MEM_32bits()) BIT_flushBits(&blockStream); /* (7)*/
+ FSE_encodeSymbol(&blockStream, &stateLitLength, llCode); /* 16 */ /* 33 */
+ if (MEM_32bits() || (ofBits+mlBits+llBits >= 64-7-(LLFSELog+MLFSELog+OffFSELog)))
+ BIT_flushBits(&blockStream); /* (7)*/
+ BIT_addBits(&blockStream, sequences[n].litLength, llBits);
+ if (MEM_32bits() && ((llBits+mlBits)>24)) BIT_flushBits(&blockStream);
+ BIT_addBits(&blockStream, sequences[n].mlBase, mlBits);
+ if (MEM_32bits() || (ofBits+mlBits+llBits > 56)) BIT_flushBits(&blockStream);
+ if (longOffsets) {
+ unsigned const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN-1);
+ if (extraBits) {
+ BIT_addBits(&blockStream, sequences[n].offBase, extraBits);
+ BIT_flushBits(&blockStream); /* (7)*/
+ }
+ BIT_addBits(&blockStream, sequences[n].offBase >> extraBits,
+ ofBits - extraBits); /* 31 */
+ } else {
+ BIT_addBits(&blockStream, sequences[n].offBase, ofBits); /* 31 */
+ }
+ BIT_flushBits(&blockStream); /* (7)*/
+ DEBUGLOG(7, "remaining space : %i", (int)(blockStream.endPtr - blockStream.ptr));
+ } }
+
+ DEBUGLOG(6, "ZSTD_encodeSequences: flushing ML state with %u bits", stateMatchLength.stateLog);
+ FSE_flushCState(&blockStream, &stateMatchLength);
+ DEBUGLOG(6, "ZSTD_encodeSequences: flushing Off state with %u bits", stateOffsetBits.stateLog);
+ FSE_flushCState(&blockStream, &stateOffsetBits);
+ DEBUGLOG(6, "ZSTD_encodeSequences: flushing LL state with %u bits", stateLitLength.stateLog);
+ FSE_flushCState(&blockStream, &stateLitLength);
+
+ { size_t const streamSize = BIT_closeCStream(&blockStream);
+ RETURN_ERROR_IF(streamSize==0, dstSize_tooSmall, "not enough space");
+ return streamSize;
+ }
+}
+
+static size_t
+ZSTD_encodeSequences_default(
+ void* dst, size_t dstCapacity,
+ FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable,
+ FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable,
+ FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable,
+ seqDef const* sequences, size_t nbSeq, int longOffsets)
+{
+ return ZSTD_encodeSequences_body(dst, dstCapacity,
+ CTable_MatchLength, mlCodeTable,
+ CTable_OffsetBits, ofCodeTable,
+ CTable_LitLength, llCodeTable,
+ sequences, nbSeq, longOffsets);
+}
+
+
+#if DYNAMIC_BMI2
+
+static BMI2_TARGET_ATTRIBUTE size_t
+ZSTD_encodeSequences_bmi2(
+ void* dst, size_t dstCapacity,
+ FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable,
+ FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable,
+ FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable,
+ seqDef const* sequences, size_t nbSeq, int longOffsets)
+{
+ return ZSTD_encodeSequences_body(dst, dstCapacity,
+ CTable_MatchLength, mlCodeTable,
+ CTable_OffsetBits, ofCodeTable,
+ CTable_LitLength, llCodeTable,
+ sequences, nbSeq, longOffsets);
+}
+
+#endif
+
+size_t ZSTD_encodeSequences(
+ void* dst, size_t dstCapacity,
+ FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable,
+ FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable,
+ FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable,
+ seqDef const* sequences, size_t nbSeq, int longOffsets, int bmi2)
+{
+ DEBUGLOG(5, "ZSTD_encodeSequences: dstCapacity = %u", (unsigned)dstCapacity);
+#if DYNAMIC_BMI2
+ if (bmi2) {
+ return ZSTD_encodeSequences_bmi2(dst, dstCapacity,
+ CTable_MatchLength, mlCodeTable,
+ CTable_OffsetBits, ofCodeTable,
+ CTable_LitLength, llCodeTable,
+ sequences, nbSeq, longOffsets);
+ }
+#endif
+ (void)bmi2;
+ return ZSTD_encodeSequences_default(dst, dstCapacity,
+ CTable_MatchLength, mlCodeTable,
+ CTable_OffsetBits, ofCodeTable,
+ CTable_LitLength, llCodeTable,
+ sequences, nbSeq, longOffsets);
+}
diff --git a/lib/zstd/compress/zstd_compress_sequences.h b/lib/zstd/compress/zstd_compress_sequences.h
new file mode 100644
index 000000000000..7991364c2f71
--- /dev/null
+++ b/lib/zstd/compress/zstd_compress_sequences.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#ifndef ZSTD_COMPRESS_SEQUENCES_H
+#define ZSTD_COMPRESS_SEQUENCES_H
+
+#include "../common/fse.h" /* FSE_repeat, FSE_CTable */
+#include "../common/zstd_internal.h" /* symbolEncodingType_e, ZSTD_strategy */
+
+typedef enum {
+ ZSTD_defaultDisallowed = 0,
+ ZSTD_defaultAllowed = 1
+} ZSTD_defaultPolicy_e;
+
+symbolEncodingType_e
+ZSTD_selectEncodingType(
+ FSE_repeat* repeatMode, unsigned const* count, unsigned const max,
+ size_t const mostFrequent, size_t nbSeq, unsigned const FSELog,
+ FSE_CTable const* prevCTable,
+ short const* defaultNorm, U32 defaultNormLog,
+ ZSTD_defaultPolicy_e const isDefaultAllowed,
+ ZSTD_strategy const strategy);
+
+size_t
+ZSTD_buildCTable(void* dst, size_t dstCapacity,
+ FSE_CTable* nextCTable, U32 FSELog, symbolEncodingType_e type,
+ unsigned* count, U32 max,
+ const BYTE* codeTable, size_t nbSeq,
+ const S16* defaultNorm, U32 defaultNormLog, U32 defaultMax,
+ const FSE_CTable* prevCTable, size_t prevCTableSize,
+ void* entropyWorkspace, size_t entropyWorkspaceSize);
+
+size_t ZSTD_encodeSequences(
+ void* dst, size_t dstCapacity,
+ FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable,
+ FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable,
+ FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable,
+ seqDef const* sequences, size_t nbSeq, int longOffsets, int bmi2);
+
+size_t ZSTD_fseBitCost(
+ FSE_CTable const* ctable,
+ unsigned const* count,
+ unsigned const max);
+
+size_t ZSTD_crossEntropyCost(short const* norm, unsigned accuracyLog,
+ unsigned const* count, unsigned const max);
+#endif /* ZSTD_COMPRESS_SEQUENCES_H */
diff --git a/lib/zstd/compress/zstd_compress_superblock.c b/lib/zstd/compress/zstd_compress_superblock.c
new file mode 100644
index 000000000000..17d836cc84e8
--- /dev/null
+++ b/lib/zstd/compress/zstd_compress_superblock.c
@@ -0,0 +1,573 @@
+/*
+ * Copyright (c) Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+ /*-*************************************
+ * Dependencies
+ ***************************************/
+#include "zstd_compress_superblock.h"
+
+#include "../common/zstd_internal.h" /* ZSTD_getSequenceLength */
+#include "hist.h" /* HIST_countFast_wksp */
+#include "zstd_compress_internal.h" /* ZSTD_[huf|fse|entropy]CTablesMetadata_t */
+#include "zstd_compress_sequences.h"
+#include "zstd_compress_literals.h"
+
+/* ZSTD_compressSubBlock_literal() :
+ * Compresses literals section for a sub-block.
+ * When we have to write the Huffman table we will sometimes choose a header
+ * size larger than necessary. This is because we have to pick the header size
+ * before we know the table size + compressed size, so we have a bound on the
+ * table size. If we guessed incorrectly, we fall back to uncompressed literals.
+ *
+ * We write the header when writeEntropy=1 and set entropyWritten=1 when we succeeded
+ * in writing the header, otherwise it is set to 0.
+ *
+ * hufMetadata->hType has literals block type info.
+ * If it is set_basic, all sub-blocks literals section will be Raw_Literals_Block.
+ * If it is set_rle, all sub-blocks literals section will be RLE_Literals_Block.
+ * If it is set_compressed, first sub-block's literals section will be Compressed_Literals_Block
+ * If it is set_compressed, first sub-block's literals section will be Treeless_Literals_Block
+ * and the following sub-blocks' literals sections will be Treeless_Literals_Block.
+ * @return : compressed size of literals section of a sub-block
+ * Or 0 if it unable to compress.
+ * Or error code */
+static size_t ZSTD_compressSubBlock_literal(const HUF_CElt* hufTable,
+ const ZSTD_hufCTablesMetadata_t* hufMetadata,
+ const BYTE* literals, size_t litSize,
+ void* dst, size_t dstSize,
+ const int bmi2, int writeEntropy, int* entropyWritten)
+{
+ size_t const header = writeEntropy ? 200 : 0;
+ size_t const lhSize = 3 + (litSize >= (1 KB - header)) + (litSize >= (16 KB - header));
+ BYTE* const ostart = (BYTE*)dst;
+ BYTE* const oend = ostart + dstSize;
+ BYTE* op = ostart + lhSize;
+ U32 const singleStream = lhSize == 3;
+ symbolEncodingType_e hType = writeEntropy ? hufMetadata->hType : set_repeat;
+ size_t cLitSize = 0;
+
+ (void)bmi2; /* TODO bmi2... */
+
+ DEBUGLOG(5, "ZSTD_compressSubBlock_literal (litSize=%zu, lhSize=%zu, writeEntropy=%d)", litSize, lhSize, writeEntropy);
+
+ *entropyWritten = 0;
+ if (litSize == 0 || hufMetadata->hType == set_basic) {
+ DEBUGLOG(5, "ZSTD_compressSubBlock_literal using raw literal");
+ return ZSTD_noCompressLiterals(dst, dstSize, literals, litSize);
+ } else if (hufMetadata->hType == set_rle) {
+ DEBUGLOG(5, "ZSTD_compressSubBlock_literal using rle literal");
+ return ZSTD_compressRleLiteralsBlock(dst, dstSize, literals, litSize);
+ }
+
+ assert(litSize > 0);
+ assert(hufMetadata->hType == set_compressed || hufMetadata->hType == set_repeat);
+
+ if (writeEntropy && hufMetadata->hType == set_compressed) {
+ ZSTD_memcpy(op, hufMetadata->hufDesBuffer, hufMetadata->hufDesSize);
+ op += hufMetadata->hufDesSize;
+ cLitSize += hufMetadata->hufDesSize;
+ DEBUGLOG(5, "ZSTD_compressSubBlock_literal (hSize=%zu)", hufMetadata->hufDesSize);
+ }
+
+ /* TODO bmi2 */
+ { const size_t cSize = singleStream ? HUF_compress1X_usingCTable(op, oend-op, literals, litSize, hufTable)
+ : HUF_compress4X_usingCTable(op, oend-op, literals, litSize, hufTable);
+ op += cSize;
+ cLitSize += cSize;
+ if (cSize == 0 || ERR_isError(cSize)) {
+ DEBUGLOG(5, "Failed to write entropy tables %s", ZSTD_getErrorName(cSize));
+ return 0;
+ }
+ /* If we expand and we aren't writing a header then emit uncompressed */
+ if (!writeEntropy && cLitSize >= litSize) {
+ DEBUGLOG(5, "ZSTD_compressSubBlock_literal using raw literal because uncompressible");
+ return ZSTD_noCompressLiterals(dst, dstSize, literals, litSize);
+ }
+ /* If we are writing headers then allow expansion that doesn't change our header size. */
+ if (lhSize < (size_t)(3 + (cLitSize >= 1 KB) + (cLitSize >= 16 KB))) {
+ assert(cLitSize > litSize);
+ DEBUGLOG(5, "Literals expanded beyond allowed header size");
+ return ZSTD_noCompressLiterals(dst, dstSize, literals, litSize);
+ }
+ DEBUGLOG(5, "ZSTD_compressSubBlock_literal (cSize=%zu)", cSize);
+ }
+
+ /* Build header */
+ switch(lhSize)
+ {
+ case 3: /* 2 - 2 - 10 - 10 */
+ { U32 const lhc = hType + ((!singleStream) << 2) + ((U32)litSize<<4) + ((U32)cLitSize<<14);
+ MEM_writeLE24(ostart, lhc);
+ break;
+ }
+ case 4: /* 2 - 2 - 14 - 14 */
+ { U32 const lhc = hType + (2 << 2) + ((U32)litSize<<4) + ((U32)cLitSize<<18);
+ MEM_writeLE32(ostart, lhc);
+ break;
+ }
+ case 5: /* 2 - 2 - 18 - 18 */
+ { U32 const lhc = hType + (3 << 2) + ((U32)litSize<<4) + ((U32)cLitSize<<22);
+ MEM_writeLE32(ostart, lhc);
+ ostart[4] = (BYTE)(cLitSize >> 10);
+ break;
+ }
+ default: /* not possible : lhSize is {3,4,5} */
+ assert(0);
+ }
+ *entropyWritten = 1;
+ DEBUGLOG(5, "Compressed literals: %u -> %u", (U32)litSize, (U32)(op-ostart));
+ return op-ostart;
+}
+
+static size_t ZSTD_seqDecompressedSize(seqStore_t const* seqStore, const seqDef* sequences, size_t nbSeq, size_t litSize, int lastSequence) {
+ const seqDef* const sstart = sequences;
+ const seqDef* const send = sequences + nbSeq;
+ const seqDef* sp = sstart;
+ size_t matchLengthSum = 0;
+ size_t litLengthSum = 0;
+ (void)(litLengthSum); /* suppress unused variable warning on some environments */
+ while (send-sp > 0) {
+ ZSTD_sequenceLength const seqLen = ZSTD_getSequenceLength(seqStore, sp);
+ litLengthSum += seqLen.litLength;
+ matchLengthSum += seqLen.matchLength;
+ sp++;
+ }
+ assert(litLengthSum <= litSize);
+ if (!lastSequence) {
+ assert(litLengthSum == litSize);
+ }
+ return matchLengthSum + litSize;
+}
+
+/* ZSTD_compressSubBlock_sequences() :
+ * Compresses sequences section for a sub-block.
+ * fseMetadata->llType, fseMetadata->ofType, and fseMetadata->mlType have
+ * symbol compression modes for the super-block.
+ * The first successfully compressed block will have these in its header.
+ * We set entropyWritten=1 when we succeed in compressing the sequences.
+ * The following sub-blocks will always have repeat mode.
+ * @return : compressed size of sequences section of a sub-block
+ * Or 0 if it is unable to compress
+ * Or error code. */
+static size_t ZSTD_compressSubBlock_sequences(const ZSTD_fseCTables_t* fseTables,
+ const ZSTD_fseCTablesMetadata_t* fseMetadata,
+ const seqDef* sequences, size_t nbSeq,
+ const BYTE* llCode, const BYTE* mlCode, const BYTE* ofCode,
+ const ZSTD_CCtx_params* cctxParams,
+ void* dst, size_t dstCapacity,
+ const int bmi2, int writeEntropy, int* entropyWritten)
+{
+ const int longOffsets = cctxParams->cParams.windowLog > STREAM_ACCUMULATOR_MIN;
+ BYTE* const ostart = (BYTE*)dst;
+ BYTE* const oend = ostart + dstCapacity;
+ BYTE* op = ostart;
+ BYTE* seqHead;
+
+ DEBUGLOG(5, "ZSTD_compressSubBlock_sequences (nbSeq=%zu, writeEntropy=%d, longOffsets=%d)", nbSeq, writeEntropy, longOffsets);
+
+ *entropyWritten = 0;
+ /* Sequences Header */
+ RETURN_ERROR_IF((oend-op) < 3 /*max nbSeq Size*/ + 1 /*seqHead*/,
+ dstSize_tooSmall, "");
+ if (nbSeq < 0x7F)
+ *op++ = (BYTE)nbSeq;
+ else if (nbSeq < LONGNBSEQ)
+ op[0] = (BYTE)((nbSeq>>8) + 0x80), op[1] = (BYTE)nbSeq, op+=2;
+ else
+ op[0]=0xFF, MEM_writeLE16(op+1, (U16)(nbSeq - LONGNBSEQ)), op+=3;
+ if (nbSeq==0) {
+ return op - ostart;
+ }
+
+ /* seqHead : flags for FSE encoding type */
+ seqHead = op++;
+
+ DEBUGLOG(5, "ZSTD_compressSubBlock_sequences (seqHeadSize=%u)", (unsigned)(op-ostart));
+
+ if (writeEntropy) {
+ const U32 LLtype = fseMetadata->llType;
+ const U32 Offtype = fseMetadata->ofType;
+ const U32 MLtype = fseMetadata->mlType;
+ DEBUGLOG(5, "ZSTD_compressSubBlock_sequences (fseTablesSize=%zu)", fseMetadata->fseTablesSize);
+ *seqHead = (BYTE)((LLtype<<6) + (Offtype<<4) + (MLtype<<2));
+ ZSTD_memcpy(op, fseMetadata->fseTablesBuffer, fseMetadata->fseTablesSize);
+ op += fseMetadata->fseTablesSize;
+ } else {
+ const U32 repeat = set_repeat;
+ *seqHead = (BYTE)((repeat<<6) + (repeat<<4) + (repeat<<2));
+ }
+
+ { size_t const bitstreamSize = ZSTD_encodeSequences(
+ op, oend - op,
+ fseTables->matchlengthCTable, mlCode,
+ fseTables->offcodeCTable, ofCode,
+ fseTables->litlengthCTable, llCode,
+ sequences, nbSeq,
+ longOffsets, bmi2);
+ FORWARD_IF_ERROR(bitstreamSize, "ZSTD_encodeSequences failed");
+ op += bitstreamSize;
+ /* zstd versions <= 1.3.4 mistakenly report corruption when
+ * FSE_readNCount() receives a buffer < 4 bytes.
+ * Fixed by https://github.com/facebook/zstd/pull/1146.
+ * This can happen when the last set_compressed table present is 2
+ * bytes and the bitstream is only one byte.
+ * In this exceedingly rare case, we will simply emit an uncompressed
+ * block, since it isn't worth optimizing.
+ */
+#ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
+ if (writeEntropy && fseMetadata->lastCountSize && fseMetadata->lastCountSize + bitstreamSize < 4) {
+ /* NCountSize >= 2 && bitstreamSize > 0 ==> lastCountSize == 3 */
+ assert(fseMetadata->lastCountSize + bitstreamSize == 3);
+ DEBUGLOG(5, "Avoiding bug in zstd decoder in versions <= 1.3.4 by "
+ "emitting an uncompressed block.");
+ return 0;
+ }
+#endif
+ DEBUGLOG(5, "ZSTD_compressSubBlock_sequences (bitstreamSize=%zu)", bitstreamSize);
+ }
+
+ /* zstd versions <= 1.4.0 mistakenly report error when
+ * sequences section body size is less than 3 bytes.
+ * Fixed by https://github.com/facebook/zstd/pull/1664.
+ * This can happen when the previous sequences section block is compressed
+ * with rle mode and the current block's sequences section is compressed
+ * with repeat mode where sequences section body size can be 1 byte.
+ */
+#ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
+ if (op-seqHead < 4) {
+ DEBUGLOG(5, "Avoiding bug in zstd decoder in versions <= 1.4.0 by emitting "
+ "an uncompressed block when sequences are < 4 bytes");
+ return 0;
+ }
+#endif
+
+ *entropyWritten = 1;
+ return op - ostart;
+}
+
+/* ZSTD_compressSubBlock() :
+ * Compresses a single sub-block.
+ * @return : compressed size of the sub-block
+ * Or 0 if it failed to compress. */
+static size_t ZSTD_compressSubBlock(const ZSTD_entropyCTables_t* entropy,
+ const ZSTD_entropyCTablesMetadata_t* entropyMetadata,
+ const seqDef* sequences, size_t nbSeq,
+ const BYTE* literals, size_t litSize,
+ const BYTE* llCode, const BYTE* mlCode, const BYTE* ofCode,
+ const ZSTD_CCtx_params* cctxParams,
+ void* dst, size_t dstCapacity,
+ const int bmi2,
+ int writeLitEntropy, int writeSeqEntropy,
+ int* litEntropyWritten, int* seqEntropyWritten,
+ U32 lastBlock)
+{
+ BYTE* const ostart = (BYTE*)dst;
+ BYTE* const oend = ostart + dstCapacity;
+ BYTE* op = ostart + ZSTD_blockHeaderSize;
+ DEBUGLOG(5, "ZSTD_compressSubBlock (litSize=%zu, nbSeq=%zu, writeLitEntropy=%d, writeSeqEntropy=%d, lastBlock=%d)",
+ litSize, nbSeq, writeLitEntropy, writeSeqEntropy, lastBlock);
+ { size_t cLitSize = ZSTD_compressSubBlock_literal((const HUF_CElt*)entropy->huf.CTable,
+ &entropyMetadata->hufMetadata, literals, litSize,
+ op, oend-op, bmi2, writeLitEntropy, litEntropyWritten);
+ FORWARD_IF_ERROR(cLitSize, "ZSTD_compressSubBlock_literal failed");
+ if (cLitSize == 0) return 0;
+ op += cLitSize;
+ }
+ { size_t cSeqSize = ZSTD_compressSubBlock_sequences(&entropy->fse,
+ &entropyMetadata->fseMetadata,
+ sequences, nbSeq,
+ llCode, mlCode, ofCode,
+ cctxParams,
+ op, oend-op,
+ bmi2, writeSeqEntropy, seqEntropyWritten);
+ FORWARD_IF_ERROR(cSeqSize, "ZSTD_compressSubBlock_sequences failed");
+ if (cSeqSize == 0) return 0;
+ op += cSeqSize;
+ }
+ /* Write block header */
+ { size_t cSize = (op-ostart)-ZSTD_blockHeaderSize;
+ U32 const cBlockHeader24 = lastBlock + (((U32)bt_compressed)<<1) + (U32)(cSize << 3);
+ MEM_writeLE24(ostart, cBlockHeader24);
+ }
+ return op-ostart;
+}
+
+static size_t ZSTD_estimateSubBlockSize_literal(const BYTE* literals, size_t litSize,
+ const ZSTD_hufCTables_t* huf,
+ const ZSTD_hufCTablesMetadata_t* hufMetadata,
+ void* workspace, size_t wkspSize,
+ int writeEntropy)
+{
+ unsigned* const countWksp = (unsigned*)workspace;
+ unsigned maxSymbolValue = 255;
+ size_t literalSectionHeaderSize = 3; /* Use hard coded size of 3 bytes */
+
+ if (hufMetadata->hType == set_basic) return litSize;
+ else if (hufMetadata->hType == set_rle) return 1;
+ else if (hufMetadata->hType == set_compressed || hufMetadata->hType == set_repeat) {
+ size_t const largest = HIST_count_wksp (countWksp, &maxSymbolValue, (const BYTE*)literals, litSize, workspace, wkspSize);
+ if (ZSTD_isError(largest)) return litSize;
+ { size_t cLitSizeEstimate = HUF_estimateCompressedSize((const HUF_CElt*)huf->CTable, countWksp, maxSymbolValue);
+ if (writeEntropy) cLitSizeEstimate += hufMetadata->hufDesSize;
+ return cLitSizeEstimate + literalSectionHeaderSize;
+ } }
+ assert(0); /* impossible */
+ return 0;
+}
+
+static size_t ZSTD_estimateSubBlockSize_symbolType(symbolEncodingType_e type,
+ const BYTE* codeTable, unsigned maxCode,
+ size_t nbSeq, const FSE_CTable* fseCTable,
+ const U8* additionalBits,
+ short const* defaultNorm, U32 defaultNormLog, U32 defaultMax,
+ void* workspace, size_t wkspSize)
+{
+ unsigned* const countWksp = (unsigned*)workspace;
+ const BYTE* ctp = codeTable;
+ const BYTE* const ctStart = ctp;
+ const BYTE* const ctEnd = ctStart + nbSeq;
+ size_t cSymbolTypeSizeEstimateInBits = 0;
+ unsigned max = maxCode;
+
+ HIST_countFast_wksp(countWksp, &max, codeTable, nbSeq, workspace, wkspSize); /* can't fail */
+ if (type == set_basic) {
+ /* We selected this encoding type, so it must be valid. */
+ assert(max <= defaultMax);
+ cSymbolTypeSizeEstimateInBits = max <= defaultMax
+ ? ZSTD_crossEntropyCost(defaultNorm, defaultNormLog, countWksp, max)
+ : ERROR(GENERIC);
+ } else if (type == set_rle) {
+ cSymbolTypeSizeEstimateInBits = 0;
+ } else if (type == set_compressed || type == set_repeat) {
+ cSymbolTypeSizeEstimateInBits = ZSTD_fseBitCost(fseCTable, countWksp, max);
+ }
+ if (ZSTD_isError(cSymbolTypeSizeEstimateInBits)) return nbSeq * 10;
+ while (ctp < ctEnd) {
+ if (additionalBits) cSymbolTypeSizeEstimateInBits += additionalBits[*ctp];
+ else cSymbolTypeSizeEstimateInBits += *ctp; /* for offset, offset code is also the number of additional bits */
+ ctp++;
+ }
+ return cSymbolTypeSizeEstimateInBits / 8;
+}
+
+static size_t ZSTD_estimateSubBlockSize_sequences(const BYTE* ofCodeTable,
+ const BYTE* llCodeTable,
+ const BYTE* mlCodeTable,
+ size_t nbSeq,
+ const ZSTD_fseCTables_t* fseTables,
+ const ZSTD_fseCTablesMetadata_t* fseMetadata,
+ void* workspace, size_t wkspSize,
+ int writeEntropy)
+{
+ size_t const sequencesSectionHeaderSize = 3; /* Use hard coded size of 3 bytes */
+ size_t cSeqSizeEstimate = 0;
+ if (nbSeq == 0) return sequencesSectionHeaderSize;
+ cSeqSizeEstimate += ZSTD_estimateSubBlockSize_symbolType(fseMetadata->ofType, ofCodeTable, MaxOff,
+ nbSeq, fseTables->offcodeCTable, NULL,
+ OF_defaultNorm, OF_defaultNormLog, DefaultMaxOff,
+ workspace, wkspSize);
+ cSeqSizeEstimate += ZSTD_estimateSubBlockSize_symbolType(fseMetadata->llType, llCodeTable, MaxLL,
+ nbSeq, fseTables->litlengthCTable, LL_bits,
+ LL_defaultNorm, LL_defaultNormLog, MaxLL,
+ workspace, wkspSize);
+ cSeqSizeEstimate += ZSTD_estimateSubBlockSize_symbolType(fseMetadata->mlType, mlCodeTable, MaxML,
+ nbSeq, fseTables->matchlengthCTable, ML_bits,
+ ML_defaultNorm, ML_defaultNormLog, MaxML,
+ workspace, wkspSize);
+ if (writeEntropy) cSeqSizeEstimate += fseMetadata->fseTablesSize;
+ return cSeqSizeEstimate + sequencesSectionHeaderSize;
+}
+
+static size_t ZSTD_estimateSubBlockSize(const BYTE* literals, size_t litSize,
+ const BYTE* ofCodeTable,
+ const BYTE* llCodeTable,
+ const BYTE* mlCodeTable,
+ size_t nbSeq,
+ const ZSTD_entropyCTables_t* entropy,
+ const ZSTD_entropyCTablesMetadata_t* entropyMetadata,
+ void* workspace, size_t wkspSize,
+ int writeLitEntropy, int writeSeqEntropy) {
+ size_t cSizeEstimate = 0;
+ cSizeEstimate += ZSTD_estimateSubBlockSize_literal(literals, litSize,
+ &entropy->huf, &entropyMetadata->hufMetadata,
+ workspace, wkspSize, writeLitEntropy);
+ cSizeEstimate += ZSTD_estimateSubBlockSize_sequences(ofCodeTable, llCodeTable, mlCodeTable,
+ nbSeq, &entropy->fse, &entropyMetadata->fseMetadata,
+ workspace, wkspSize, writeSeqEntropy);
+ return cSizeEstimate + ZSTD_blockHeaderSize;
+}
+
+static int ZSTD_needSequenceEntropyTables(ZSTD_fseCTablesMetadata_t const* fseMetadata)
+{
+ if (fseMetadata->llType == set_compressed || fseMetadata->llType == set_rle)
+ return 1;
+ if (fseMetadata->mlType == set_compressed || fseMetadata->mlType == set_rle)
+ return 1;
+ if (fseMetadata->ofType == set_compressed || fseMetadata->ofType == set_rle)
+ return 1;
+ return 0;
+}
+
+/* ZSTD_compressSubBlock_multi() :
+ * Breaks super-block into multiple sub-blocks and compresses them.
+ * Entropy will be written to the first block.
+ * The following blocks will use repeat mode to compress.
+ * All sub-blocks are compressed blocks (no raw or rle blocks).
+ * @return : compressed size of the super block (which is multiple ZSTD blocks)
+ * Or 0 if it failed to compress. */
+static size_t ZSTD_compressSubBlock_multi(const seqStore_t* seqStorePtr,
+ const ZSTD_compressedBlockState_t* prevCBlock,
+ ZSTD_compressedBlockState_t* nextCBlock,
+ const ZSTD_entropyCTablesMetadata_t* entropyMetadata,
+ const ZSTD_CCtx_params* cctxParams,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ const int bmi2, U32 lastBlock,
+ void* workspace, size_t wkspSize)
+{
+ const seqDef* const sstart = seqStorePtr->sequencesStart;
+ const seqDef* const send = seqStorePtr->sequences;
+ const seqDef* sp = sstart;
+ const BYTE* const lstart = seqStorePtr->litStart;
+ const BYTE* const lend = seqStorePtr->lit;
+ const BYTE* lp = lstart;
+ BYTE const* ip = (BYTE const*)src;
+ BYTE const* const iend = ip + srcSize;
+ BYTE* const ostart = (BYTE*)dst;
+ BYTE* const oend = ostart + dstCapacity;
+ BYTE* op = ostart;
+ const BYTE* llCodePtr = seqStorePtr->llCode;
+ const BYTE* mlCodePtr = seqStorePtr->mlCode;
+ const BYTE* ofCodePtr = seqStorePtr->ofCode;
+ size_t targetCBlockSize = cctxParams->targetCBlockSize;
+ size_t litSize, seqCount;
+ int writeLitEntropy = entropyMetadata->hufMetadata.hType == set_compressed;
+ int writeSeqEntropy = 1;
+ int lastSequence = 0;
+
+ DEBUGLOG(5, "ZSTD_compressSubBlock_multi (litSize=%u, nbSeq=%u)",
+ (unsigned)(lend-lp), (unsigned)(send-sstart));
+
+ litSize = 0;
+ seqCount = 0;
+ do {
+ size_t cBlockSizeEstimate = 0;
+ if (sstart == send) {
+ lastSequence = 1;
+ } else {
+ const seqDef* const sequence = sp + seqCount;
+ lastSequence = sequence == send - 1;
+ litSize += ZSTD_getSequenceLength(seqStorePtr, sequence).litLength;
+ seqCount++;
+ }
+ if (lastSequence) {
+ assert(lp <= lend);
+ assert(litSize <= (size_t)(lend - lp));
+ litSize = (size_t)(lend - lp);
+ }
+ /* I think there is an optimization opportunity here.
+ * Calling ZSTD_estimateSubBlockSize for every sequence can be wasteful
+ * since it recalculates estimate from scratch.
+ * For example, it would recount literal distribution and symbol codes every time.
+ */
+ cBlockSizeEstimate = ZSTD_estimateSubBlockSize(lp, litSize, ofCodePtr, llCodePtr, mlCodePtr, seqCount,
+ &nextCBlock->entropy, entropyMetadata,
+ workspace, wkspSize, writeLitEntropy, writeSeqEntropy);
+ if (cBlockSizeEstimate > targetCBlockSize || lastSequence) {
+ int litEntropyWritten = 0;
+ int seqEntropyWritten = 0;
+ const size_t decompressedSize = ZSTD_seqDecompressedSize(seqStorePtr, sp, seqCount, litSize, lastSequence);
+ const size_t cSize = ZSTD_compressSubBlock(&nextCBlock->entropy, entropyMetadata,
+ sp, seqCount,
+ lp, litSize,
+ llCodePtr, mlCodePtr, ofCodePtr,
+ cctxParams,
+ op, oend-op,
+ bmi2, writeLitEntropy, writeSeqEntropy,
+ &litEntropyWritten, &seqEntropyWritten,
+ lastBlock && lastSequence);
+ FORWARD_IF_ERROR(cSize, "ZSTD_compressSubBlock failed");
+ if (cSize > 0 && cSize < decompressedSize) {
+ DEBUGLOG(5, "Committed the sub-block");
+ assert(ip + decompressedSize <= iend);
+ ip += decompressedSize;
+ sp += seqCount;
+ lp += litSize;
+ op += cSize;
+ llCodePtr += seqCount;
+ mlCodePtr += seqCount;
+ ofCodePtr += seqCount;
+ litSize = 0;
+ seqCount = 0;
+ /* Entropy only needs to be written once */
+ if (litEntropyWritten) {
+ writeLitEntropy = 0;
+ }
+ if (seqEntropyWritten) {
+ writeSeqEntropy = 0;
+ }
+ }
+ }
+ } while (!lastSequence);
+ if (writeLitEntropy) {
+ DEBUGLOG(5, "ZSTD_compressSubBlock_multi has literal entropy tables unwritten");
+ ZSTD_memcpy(&nextCBlock->entropy.huf, &prevCBlock->entropy.huf, sizeof(prevCBlock->entropy.huf));
+ }
+ if (writeSeqEntropy && ZSTD_needSequenceEntropyTables(&entropyMetadata->fseMetadata)) {
+ /* If we haven't written our entropy tables, then we've violated our contract and
+ * must emit an uncompressed block.
+ */
+ DEBUGLOG(5, "ZSTD_compressSubBlock_multi has sequence entropy tables unwritten");
+ return 0;
+ }
+ if (ip < iend) {
+ size_t const cSize = ZSTD_noCompressBlock(op, oend - op, ip, iend - ip, lastBlock);
+ DEBUGLOG(5, "ZSTD_compressSubBlock_multi last sub-block uncompressed, %zu bytes", (size_t)(iend - ip));
+ FORWARD_IF_ERROR(cSize, "ZSTD_noCompressBlock failed");
+ assert(cSize != 0);
+ op += cSize;
+ /* We have to regenerate the repcodes because we've skipped some sequences */
+ if (sp < send) {
+ seqDef const* seq;
+ repcodes_t rep;
+ ZSTD_memcpy(&rep, prevCBlock->rep, sizeof(rep));
+ for (seq = sstart; seq < sp; ++seq) {
+ ZSTD_updateRep(rep.rep, seq->offBase - 1, ZSTD_getSequenceLength(seqStorePtr, seq).litLength == 0);
+ }
+ ZSTD_memcpy(nextCBlock->rep, &rep, sizeof(rep));
+ }
+ }
+ DEBUGLOG(5, "ZSTD_compressSubBlock_multi compressed");
+ return op-ostart;
+}
+
+size_t ZSTD_compressSuperBlock(ZSTD_CCtx* zc,
+ void* dst, size_t dstCapacity,
+ void const* src, size_t srcSize,
+ unsigned lastBlock) {
+ ZSTD_entropyCTablesMetadata_t entropyMetadata;
+
+ FORWARD_IF_ERROR(ZSTD_buildBlockEntropyStats(&zc->seqStore,
+ &zc->blockState.prevCBlock->entropy,
+ &zc->blockState.nextCBlock->entropy,
+ &zc->appliedParams,
+ &entropyMetadata,
+ zc->entropyWorkspace, ENTROPY_WORKSPACE_SIZE /* statically allocated in resetCCtx */), "");
+
+ return ZSTD_compressSubBlock_multi(&zc->seqStore,
+ zc->blockState.prevCBlock,
+ zc->blockState.nextCBlock,
+ &entropyMetadata,
+ &zc->appliedParams,
+ dst, dstCapacity,
+ src, srcSize,
+ zc->bmi2, lastBlock,
+ zc->entropyWorkspace, ENTROPY_WORKSPACE_SIZE /* statically allocated in resetCCtx */);
+}
diff --git a/lib/zstd/compress/zstd_compress_superblock.h b/lib/zstd/compress/zstd_compress_superblock.h
new file mode 100644
index 000000000000..224ece79546e
--- /dev/null
+++ b/lib/zstd/compress/zstd_compress_superblock.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#ifndef ZSTD_COMPRESS_ADVANCED_H
+#define ZSTD_COMPRESS_ADVANCED_H
+
+/*-*************************************
+* Dependencies
+***************************************/
+
+#include <linux/zstd.h> /* ZSTD_CCtx */
+
+/*-*************************************
+* Target Compressed Block Size
+***************************************/
+
+/* ZSTD_compressSuperBlock() :
+ * Used to compress a super block when targetCBlockSize is being used.
+ * The given block will be compressed into multiple sub blocks that are around targetCBlockSize. */
+size_t ZSTD_compressSuperBlock(ZSTD_CCtx* zc,
+ void* dst, size_t dstCapacity,
+ void const* src, size_t srcSize,
+ unsigned lastBlock);
+
+#endif /* ZSTD_COMPRESS_ADVANCED_H */
diff --git a/lib/zstd/compress/zstd_cwksp.h b/lib/zstd/compress/zstd_cwksp.h
new file mode 100644
index 000000000000..349fc923c355
--- /dev/null
+++ b/lib/zstd/compress/zstd_cwksp.h
@@ -0,0 +1,595 @@
+/*
+ * Copyright (c) Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#ifndef ZSTD_CWKSP_H
+#define ZSTD_CWKSP_H
+
+/*-*************************************
+* Dependencies
+***************************************/
+#include "../common/zstd_internal.h"
+
+
+/*-*************************************
+* Constants
+***************************************/
+
+/* Since the workspace is effectively its own little malloc implementation /
+ * arena, when we run under ASAN, we should similarly insert redzones between
+ * each internal element of the workspace, so ASAN will catch overruns that
+ * reach outside an object but that stay inside the workspace.
+ *
+ * This defines the size of that redzone.
+ */
+#ifndef ZSTD_CWKSP_ASAN_REDZONE_SIZE
+#define ZSTD_CWKSP_ASAN_REDZONE_SIZE 128
+#endif
+
+
+/* Set our tables and aligneds to align by 64 bytes */
+#define ZSTD_CWKSP_ALIGNMENT_BYTES 64
+
+/*-*************************************
+* Structures
+***************************************/
+typedef enum {
+ ZSTD_cwksp_alloc_objects,
+ ZSTD_cwksp_alloc_buffers,
+ ZSTD_cwksp_alloc_aligned
+} ZSTD_cwksp_alloc_phase_e;
+
+/*
+ * Used to describe whether the workspace is statically allocated (and will not
+ * necessarily ever be freed), or if it's dynamically allocated and we can
+ * expect a well-formed caller to free this.
+ */
+typedef enum {
+ ZSTD_cwksp_dynamic_alloc,
+ ZSTD_cwksp_static_alloc
+} ZSTD_cwksp_static_alloc_e;
+
+/*
+ * Zstd fits all its internal datastructures into a single continuous buffer,
+ * so that it only needs to perform a single OS allocation (or so that a buffer
+ * can be provided to it and it can perform no allocations at all). This buffer
+ * is called the workspace.
+ *
+ * Several optimizations complicate that process of allocating memory ranges
+ * from this workspace for each internal datastructure:
+ *
+ * - These different internal datastructures have different setup requirements:
+ *
+ * - The static objects need to be cleared once and can then be trivially
+ * reused for each compression.
+ *
+ * - Various buffers don't need to be initialized at all--they are always
+ * written into before they're read.
+ *
+ * - The matchstate tables have a unique requirement that they don't need
+ * their memory to be totally cleared, but they do need the memory to have
+ * some bound, i.e., a guarantee that all values in the memory they've been
+ * allocated is less than some maximum value (which is the starting value
+ * for the indices that they will then use for compression). When this
+ * guarantee is provided to them, they can use the memory without any setup
+ * work. When it can't, they have to clear the area.
+ *
+ * - These buffers also have different alignment requirements.
+ *
+ * - We would like to reuse the objects in the workspace for multiple
+ * compressions without having to perform any expensive reallocation or
+ * reinitialization work.
+ *
+ * - We would like to be able to efficiently reuse the workspace across
+ * multiple compressions **even when the compression parameters change** and
+ * we need to resize some of the objects (where possible).
+ *
+ * To attempt to manage this buffer, given these constraints, the ZSTD_cwksp
+ * abstraction was created. It works as follows:
+ *
+ * Workspace Layout:
+ *
+ * [ ... workspace ... ]
+ * [objects][tables ... ->] free space [<- ... aligned][<- ... buffers]
+ *
+ * The various objects that live in the workspace are divided into the
+ * following categories, and are allocated separately:
+ *
+ * - Static objects: this is optionally the enclosing ZSTD_CCtx or ZSTD_CDict,
+ * so that literally everything fits in a single buffer. Note: if present,
+ * this must be the first object in the workspace, since ZSTD_customFree{CCtx,
+ * CDict}() rely on a pointer comparison to see whether one or two frees are
+ * required.
+ *
+ * - Fixed size objects: these are fixed-size, fixed-count objects that are
+ * nonetheless "dynamically" allocated in the workspace so that we can
+ * control how they're initialized separately from the broader ZSTD_CCtx.
+ * Examples:
+ * - Entropy Workspace
+ * - 2 x ZSTD_compressedBlockState_t
+ * - CDict dictionary contents
+ *
+ * - Tables: these are any of several different datastructures (hash tables,
+ * chain tables, binary trees) that all respect a common format: they are
+ * uint32_t arrays, all of whose values are between 0 and (nextSrc - base).
+ * Their sizes depend on the cparams. These tables are 64-byte aligned.
+ *
+ * - Aligned: these buffers are used for various purposes that require 4 byte
+ * alignment, but don't require any initialization before they're used. These
+ * buffers are each aligned to 64 bytes.
+ *
+ * - Buffers: these buffers are used for various purposes that don't require
+ * any alignment or initialization before they're used. This means they can
+ * be moved around at no cost for a new compression.
+ *
+ * Allocating Memory:
+ *
+ * The various types of objects must be allocated in order, so they can be
+ * correctly packed into the workspace buffer. That order is:
+ *
+ * 1. Objects
+ * 2. Buffers
+ * 3. Aligned/Tables
+ *
+ * Attempts to reserve objects of different types out of order will fail.
+ */
+typedef struct {
+ void* workspace;
+ void* workspaceEnd;
+
+ void* objectEnd;
+ void* tableEnd;
+ void* tableValidEnd;
+ void* allocStart;
+
+ BYTE allocFailed;
+ int workspaceOversizedDuration;
+ ZSTD_cwksp_alloc_phase_e phase;
+ ZSTD_cwksp_static_alloc_e isStatic;
+} ZSTD_cwksp;
+
+/*-*************************************
+* Functions
+***************************************/
+
+MEM_STATIC size_t ZSTD_cwksp_available_space(ZSTD_cwksp* ws);
+
+MEM_STATIC void ZSTD_cwksp_assert_internal_consistency(ZSTD_cwksp* ws) {
+ (void)ws;
+ assert(ws->workspace <= ws->objectEnd);
+ assert(ws->objectEnd <= ws->tableEnd);
+ assert(ws->objectEnd <= ws->tableValidEnd);
+ assert(ws->tableEnd <= ws->allocStart);
+ assert(ws->tableValidEnd <= ws->allocStart);
+ assert(ws->allocStart <= ws->workspaceEnd);
+}
+
+/*
+ * Align must be a power of 2.
+ */
+MEM_STATIC size_t ZSTD_cwksp_align(size_t size, size_t const align) {
+ size_t const mask = align - 1;
+ assert((align & mask) == 0);
+ return (size + mask) & ~mask;
+}
+
+/*
+ * Use this to determine how much space in the workspace we will consume to
+ * allocate this object. (Normally it should be exactly the size of the object,
+ * but under special conditions, like ASAN, where we pad each object, it might
+ * be larger.)
+ *
+ * Since tables aren't currently redzoned, you don't need to call through this
+ * to figure out how much space you need for the matchState tables. Everything
+ * else is though.
+ *
+ * Do not use for sizing aligned buffers. Instead, use ZSTD_cwksp_aligned_alloc_size().
+ */
+MEM_STATIC size_t ZSTD_cwksp_alloc_size(size_t size) {
+ if (size == 0)
+ return 0;
+ return size;
+}
+
+/*
+ * Returns an adjusted alloc size that is the nearest larger multiple of 64 bytes.
+ * Used to determine the number of bytes required for a given "aligned".
+ */
+MEM_STATIC size_t ZSTD_cwksp_aligned_alloc_size(size_t size) {
+ return ZSTD_cwksp_alloc_size(ZSTD_cwksp_align(size, ZSTD_CWKSP_ALIGNMENT_BYTES));
+}
+
+/*
+ * Returns the amount of additional space the cwksp must allocate
+ * for internal purposes (currently only alignment).
+ */
+MEM_STATIC size_t ZSTD_cwksp_slack_space_required(void) {
+ /* For alignment, the wksp will always allocate an additional n_1=[1, 64] bytes
+ * to align the beginning of tables section, as well as another n_2=[0, 63] bytes
+ * to align the beginning of the aligned section.
+ *
+ * n_1 + n_2 == 64 bytes if the cwksp is freshly allocated, due to tables and
+ * aligneds being sized in multiples of 64 bytes.
+ */
+ size_t const slackSpace = ZSTD_CWKSP_ALIGNMENT_BYTES;
+ return slackSpace;
+}
+
+
+/*
+ * Return the number of additional bytes required to align a pointer to the given number of bytes.
+ * alignBytes must be a power of two.
+ */
+MEM_STATIC size_t ZSTD_cwksp_bytes_to_align_ptr(void* ptr, const size_t alignBytes) {
+ size_t const alignBytesMask = alignBytes - 1;
+ size_t const bytes = (alignBytes - ((size_t)ptr & (alignBytesMask))) & alignBytesMask;
+ assert((alignBytes & alignBytesMask) == 0);
+ assert(bytes != ZSTD_CWKSP_ALIGNMENT_BYTES);
+ return bytes;
+}
+
+/*
+ * Internal function. Do not use directly.
+ * Reserves the given number of bytes within the aligned/buffer segment of the wksp,
+ * which counts from the end of the wksp (as opposed to the object/table segment).
+ *
+ * Returns a pointer to the beginning of that space.
+ */
+MEM_STATIC void*
+ZSTD_cwksp_reserve_internal_buffer_space(ZSTD_cwksp* ws, size_t const bytes)
+{
+ void* const alloc = (BYTE*)ws->allocStart - bytes;
+ void* const bottom = ws->tableEnd;
+ DEBUGLOG(5, "cwksp: reserving %p %zd bytes, %zd bytes remaining",
+ alloc, bytes, ZSTD_cwksp_available_space(ws) - bytes);
+ ZSTD_cwksp_assert_internal_consistency(ws);
+ assert(alloc >= bottom);
+ if (alloc < bottom) {
+ DEBUGLOG(4, "cwksp: alloc failed!");
+ ws->allocFailed = 1;
+ return NULL;
+ }
+ /* the area is reserved from the end of wksp.
+ * If it overlaps with tableValidEnd, it voids guarantees on values' range */
+ if (alloc < ws->tableValidEnd) {
+ ws->tableValidEnd = alloc;
+ }
+ ws->allocStart = alloc;
+ return alloc;
+}
+
+/*
+ * Moves the cwksp to the next phase, and does any necessary allocations.
+ * cwksp initialization must necessarily go through each phase in order.
+ * Returns a 0 on success, or zstd error
+ */
+MEM_STATIC size_t
+ZSTD_cwksp_internal_advance_phase(ZSTD_cwksp* ws, ZSTD_cwksp_alloc_phase_e phase)
+{
+ assert(phase >= ws->phase);
+ if (phase > ws->phase) {
+ /* Going from allocating objects to allocating buffers */
+ if (ws->phase < ZSTD_cwksp_alloc_buffers &&
+ phase >= ZSTD_cwksp_alloc_buffers) {
+ ws->tableValidEnd = ws->objectEnd;
+ }
+
+ /* Going from allocating buffers to allocating aligneds/tables */
+ if (ws->phase < ZSTD_cwksp_alloc_aligned &&
+ phase >= ZSTD_cwksp_alloc_aligned) {
+ { /* Align the start of the "aligned" to 64 bytes. Use [1, 64] bytes. */
+ size_t const bytesToAlign =
+ ZSTD_CWKSP_ALIGNMENT_BYTES - ZSTD_cwksp_bytes_to_align_ptr(ws->allocStart, ZSTD_CWKSP_ALIGNMENT_BYTES);
+ DEBUGLOG(5, "reserving aligned alignment addtl space: %zu", bytesToAlign);
+ ZSTD_STATIC_ASSERT((ZSTD_CWKSP_ALIGNMENT_BYTES & (ZSTD_CWKSP_ALIGNMENT_BYTES - 1)) == 0); /* power of 2 */
+ RETURN_ERROR_IF(!ZSTD_cwksp_reserve_internal_buffer_space(ws, bytesToAlign),
+ memory_allocation, "aligned phase - alignment initial allocation failed!");
+ }
+ { /* Align the start of the tables to 64 bytes. Use [0, 63] bytes */
+ void* const alloc = ws->objectEnd;
+ size_t const bytesToAlign = ZSTD_cwksp_bytes_to_align_ptr(alloc, ZSTD_CWKSP_ALIGNMENT_BYTES);
+ void* const objectEnd = (BYTE*)alloc + bytesToAlign;
+ DEBUGLOG(5, "reserving table alignment addtl space: %zu", bytesToAlign);
+ RETURN_ERROR_IF(objectEnd > ws->workspaceEnd, memory_allocation,
+ "table phase - alignment initial allocation failed!");
+ ws->objectEnd = objectEnd;
+ ws->tableEnd = objectEnd; /* table area starts being empty */
+ if (ws->tableValidEnd < ws->tableEnd) {
+ ws->tableValidEnd = ws->tableEnd;
+ } } }
+ ws->phase = phase;
+ ZSTD_cwksp_assert_internal_consistency(ws);
+ }
+ return 0;
+}
+
+/*
+ * Returns whether this object/buffer/etc was allocated in this workspace.
+ */
+MEM_STATIC int ZSTD_cwksp_owns_buffer(const ZSTD_cwksp* ws, const void* ptr)
+{
+ return (ptr != NULL) && (ws->workspace <= ptr) && (ptr <= ws->workspaceEnd);
+}
+
+/*
+ * Internal function. Do not use directly.
+ */
+MEM_STATIC void*
+ZSTD_cwksp_reserve_internal(ZSTD_cwksp* ws, size_t bytes, ZSTD_cwksp_alloc_phase_e phase)
+{
+ void* alloc;
+ if (ZSTD_isError(ZSTD_cwksp_internal_advance_phase(ws, phase)) || bytes == 0) {
+ return NULL;
+ }
+
+
+ alloc = ZSTD_cwksp_reserve_internal_buffer_space(ws, bytes);
+
+
+ return alloc;
+}
+
+/*
+ * Reserves and returns unaligned memory.
+ */
+MEM_STATIC BYTE* ZSTD_cwksp_reserve_buffer(ZSTD_cwksp* ws, size_t bytes)
+{
+ return (BYTE*)ZSTD_cwksp_reserve_internal(ws, bytes, ZSTD_cwksp_alloc_buffers);
+}
+
+/*
+ * Reserves and returns memory sized on and aligned on ZSTD_CWKSP_ALIGNMENT_BYTES (64 bytes).
+ */
+MEM_STATIC void* ZSTD_cwksp_reserve_aligned(ZSTD_cwksp* ws, size_t bytes)
+{
+ void* ptr = ZSTD_cwksp_reserve_internal(ws, ZSTD_cwksp_align(bytes, ZSTD_CWKSP_ALIGNMENT_BYTES),
+ ZSTD_cwksp_alloc_aligned);
+ assert(((size_t)ptr & (ZSTD_CWKSP_ALIGNMENT_BYTES-1))== 0);
+ return ptr;
+}
+
+/*
+ * Aligned on 64 bytes. These buffers have the special property that
+ * their values remain constrained, allowing us to re-use them without
+ * memset()-ing them.
+ */
+MEM_STATIC void* ZSTD_cwksp_reserve_table(ZSTD_cwksp* ws, size_t bytes)
+{
+ const ZSTD_cwksp_alloc_phase_e phase = ZSTD_cwksp_alloc_aligned;
+ void* alloc;
+ void* end;
+ void* top;
+
+ if (ZSTD_isError(ZSTD_cwksp_internal_advance_phase(ws, phase))) {
+ return NULL;
+ }
+ alloc = ws->tableEnd;
+ end = (BYTE *)alloc + bytes;
+ top = ws->allocStart;
+
+ DEBUGLOG(5, "cwksp: reserving %p table %zd bytes, %zd bytes remaining",
+ alloc, bytes, ZSTD_cwksp_available_space(ws) - bytes);
+ assert((bytes & (sizeof(U32)-1)) == 0);
+ ZSTD_cwksp_assert_internal_consistency(ws);
+ assert(end <= top);
+ if (end > top) {
+ DEBUGLOG(4, "cwksp: table alloc failed!");
+ ws->allocFailed = 1;
+ return NULL;
+ }
+ ws->tableEnd = end;
+
+
+ assert((bytes & (ZSTD_CWKSP_ALIGNMENT_BYTES-1)) == 0);
+ assert(((size_t)alloc & (ZSTD_CWKSP_ALIGNMENT_BYTES-1))== 0);
+ return alloc;
+}
+
+/*
+ * Aligned on sizeof(void*).
+ * Note : should happen only once, at workspace first initialization
+ */
+MEM_STATIC void* ZSTD_cwksp_reserve_object(ZSTD_cwksp* ws, size_t bytes)
+{
+ size_t const roundedBytes = ZSTD_cwksp_align(bytes, sizeof(void*));
+ void* alloc = ws->objectEnd;
+ void* end = (BYTE*)alloc + roundedBytes;
+
+
+ DEBUGLOG(4,
+ "cwksp: reserving %p object %zd bytes (rounded to %zd), %zd bytes remaining",
+ alloc, bytes, roundedBytes, ZSTD_cwksp_available_space(ws) - roundedBytes);
+ assert((size_t)alloc % ZSTD_ALIGNOF(void*) == 0);
+ assert(bytes % ZSTD_ALIGNOF(void*) == 0);
+ ZSTD_cwksp_assert_internal_consistency(ws);
+ /* we must be in the first phase, no advance is possible */
+ if (ws->phase != ZSTD_cwksp_alloc_objects || end > ws->workspaceEnd) {
+ DEBUGLOG(3, "cwksp: object alloc failed!");
+ ws->allocFailed = 1;
+ return NULL;
+ }
+ ws->objectEnd = end;
+ ws->tableEnd = end;
+ ws->tableValidEnd = end;
+
+
+ return alloc;
+}
+
+MEM_STATIC void ZSTD_cwksp_mark_tables_dirty(ZSTD_cwksp* ws)
+{
+ DEBUGLOG(4, "cwksp: ZSTD_cwksp_mark_tables_dirty");
+
+
+ assert(ws->tableValidEnd >= ws->objectEnd);
+ assert(ws->tableValidEnd <= ws->allocStart);
+ ws->tableValidEnd = ws->objectEnd;
+ ZSTD_cwksp_assert_internal_consistency(ws);
+}
+
+MEM_STATIC void ZSTD_cwksp_mark_tables_clean(ZSTD_cwksp* ws) {
+ DEBUGLOG(4, "cwksp: ZSTD_cwksp_mark_tables_clean");
+ assert(ws->tableValidEnd >= ws->objectEnd);
+ assert(ws->tableValidEnd <= ws->allocStart);
+ if (ws->tableValidEnd < ws->tableEnd) {
+ ws->tableValidEnd = ws->tableEnd;
+ }
+ ZSTD_cwksp_assert_internal_consistency(ws);
+}
+
+/*
+ * Zero the part of the allocated tables not already marked clean.
+ */
+MEM_STATIC void ZSTD_cwksp_clean_tables(ZSTD_cwksp* ws) {
+ DEBUGLOG(4, "cwksp: ZSTD_cwksp_clean_tables");
+ assert(ws->tableValidEnd >= ws->objectEnd);
+ assert(ws->tableValidEnd <= ws->allocStart);
+ if (ws->tableValidEnd < ws->tableEnd) {
+ ZSTD_memset(ws->tableValidEnd, 0, (BYTE*)ws->tableEnd - (BYTE*)ws->tableValidEnd);
+ }
+ ZSTD_cwksp_mark_tables_clean(ws);
+}
+
+/*
+ * Invalidates table allocations.
+ * All other allocations remain valid.
+ */
+MEM_STATIC void ZSTD_cwksp_clear_tables(ZSTD_cwksp* ws) {
+ DEBUGLOG(4, "cwksp: clearing tables!");
+
+
+ ws->tableEnd = ws->objectEnd;
+ ZSTD_cwksp_assert_internal_consistency(ws);
+}
+
+/*
+ * Invalidates all buffer, aligned, and table allocations.
+ * Object allocations remain valid.
+ */
+MEM_STATIC void ZSTD_cwksp_clear(ZSTD_cwksp* ws) {
+ DEBUGLOG(4, "cwksp: clearing!");
+
+
+
+ ws->tableEnd = ws->objectEnd;
+ ws->allocStart = ws->workspaceEnd;
+ ws->allocFailed = 0;
+ if (ws->phase > ZSTD_cwksp_alloc_buffers) {
+ ws->phase = ZSTD_cwksp_alloc_buffers;
+ }
+ ZSTD_cwksp_assert_internal_consistency(ws);
+}
+
+/*
+ * The provided workspace takes ownership of the buffer [start, start+size).
+ * Any existing values in the workspace are ignored (the previously managed
+ * buffer, if present, must be separately freed).
+ */
+MEM_STATIC void ZSTD_cwksp_init(ZSTD_cwksp* ws, void* start, size_t size, ZSTD_cwksp_static_alloc_e isStatic) {
+ DEBUGLOG(4, "cwksp: init'ing workspace with %zd bytes", size);
+ assert(((size_t)start & (sizeof(void*)-1)) == 0); /* ensure correct alignment */
+ ws->workspace = start;
+ ws->workspaceEnd = (BYTE*)start + size;
+ ws->objectEnd = ws->workspace;
+ ws->tableValidEnd = ws->objectEnd;
+ ws->phase = ZSTD_cwksp_alloc_objects;
+ ws->isStatic = isStatic;
+ ZSTD_cwksp_clear(ws);
+ ws->workspaceOversizedDuration = 0;
+ ZSTD_cwksp_assert_internal_consistency(ws);
+}
+
+MEM_STATIC size_t ZSTD_cwksp_create(ZSTD_cwksp* ws, size_t size, ZSTD_customMem customMem) {
+ void* workspace = ZSTD_customMalloc(size, customMem);
+ DEBUGLOG(4, "cwksp: creating new workspace with %zd bytes", size);
+ RETURN_ERROR_IF(workspace == NULL, memory_allocation, "NULL pointer!");
+ ZSTD_cwksp_init(ws, workspace, size, ZSTD_cwksp_dynamic_alloc);
+ return 0;
+}
+
+MEM_STATIC void ZSTD_cwksp_free(ZSTD_cwksp* ws, ZSTD_customMem customMem) {
+ void *ptr = ws->workspace;
+ DEBUGLOG(4, "cwksp: freeing workspace");
+ ZSTD_memset(ws, 0, sizeof(ZSTD_cwksp));
+ ZSTD_customFree(ptr, customMem);
+}
+
+/*
+ * Moves the management of a workspace from one cwksp to another. The src cwksp
+ * is left in an invalid state (src must be re-init()'ed before it's used again).
+ */
+MEM_STATIC void ZSTD_cwksp_move(ZSTD_cwksp* dst, ZSTD_cwksp* src) {
+ *dst = *src;
+ ZSTD_memset(src, 0, sizeof(ZSTD_cwksp));
+}
+
+MEM_STATIC size_t ZSTD_cwksp_sizeof(const ZSTD_cwksp* ws) {
+ return (size_t)((BYTE*)ws->workspaceEnd - (BYTE*)ws->workspace);
+}
+
+MEM_STATIC size_t ZSTD_cwksp_used(const ZSTD_cwksp* ws) {
+ return (size_t)((BYTE*)ws->tableEnd - (BYTE*)ws->workspace)
+ + (size_t)((BYTE*)ws->workspaceEnd - (BYTE*)ws->allocStart);
+}
+
+MEM_STATIC int ZSTD_cwksp_reserve_failed(const ZSTD_cwksp* ws) {
+ return ws->allocFailed;
+}
+
+/*-*************************************
+* Functions Checking Free Space
+***************************************/
+
+/* ZSTD_alignmentSpaceWithinBounds() :
+ * Returns if the estimated space needed for a wksp is within an acceptable limit of the
+ * actual amount of space used.
+ */
+MEM_STATIC int ZSTD_cwksp_estimated_space_within_bounds(const ZSTD_cwksp* const ws,
+ size_t const estimatedSpace, int resizedWorkspace) {
+ if (resizedWorkspace) {
+ /* Resized/newly allocated wksp should have exact bounds */
+ return ZSTD_cwksp_used(ws) == estimatedSpace;
+ } else {
+ /* Due to alignment, when reusing a workspace, we can actually consume 63 fewer or more bytes
+ * than estimatedSpace. See the comments in zstd_cwksp.h for details.
+ */
+ return (ZSTD_cwksp_used(ws) >= estimatedSpace - 63) && (ZSTD_cwksp_used(ws) <= estimatedSpace + 63);
+ }
+}
+
+
+MEM_STATIC size_t ZSTD_cwksp_available_space(ZSTD_cwksp* ws) {
+ return (size_t)((BYTE*)ws->allocStart - (BYTE*)ws->tableEnd);
+}
+
+MEM_STATIC int ZSTD_cwksp_check_available(ZSTD_cwksp* ws, size_t additionalNeededSpace) {
+ return ZSTD_cwksp_available_space(ws) >= additionalNeededSpace;
+}
+
+MEM_STATIC int ZSTD_cwksp_check_too_large(ZSTD_cwksp* ws, size_t additionalNeededSpace) {
+ return ZSTD_cwksp_check_available(
+ ws, additionalNeededSpace * ZSTD_WORKSPACETOOLARGE_FACTOR);
+}
+
+MEM_STATIC int ZSTD_cwksp_check_wasteful(ZSTD_cwksp* ws, size_t additionalNeededSpace) {
+ return ZSTD_cwksp_check_too_large(ws, additionalNeededSpace)
+ && ws->workspaceOversizedDuration > ZSTD_WORKSPACETOOLARGE_MAXDURATION;
+}
+
+MEM_STATIC void ZSTD_cwksp_bump_oversized_duration(
+ ZSTD_cwksp* ws, size_t additionalNeededSpace) {
+ if (ZSTD_cwksp_check_too_large(ws, additionalNeededSpace)) {
+ ws->workspaceOversizedDuration++;
+ } else {
+ ws->workspaceOversizedDuration = 0;
+ }
+}
+
+
+#endif /* ZSTD_CWKSP_H */
diff --git a/lib/zstd/compress/zstd_double_fast.c b/lib/zstd/compress/zstd_double_fast.c
new file mode 100644
index 000000000000..76933dea2624
--- /dev/null
+++ b/lib/zstd/compress/zstd_double_fast.c
@@ -0,0 +1,696 @@
+/*
+ * Copyright (c) Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#include "zstd_compress_internal.h"
+#include "zstd_double_fast.h"
+
+
+void ZSTD_fillDoubleHashTable(ZSTD_matchState_t* ms,
+ void const* end, ZSTD_dictTableLoadMethod_e dtlm)
+{
+ const ZSTD_compressionParameters* const cParams = &ms->cParams;
+ U32* const hashLarge = ms->hashTable;
+ U32 const hBitsL = cParams->hashLog;
+ U32 const mls = cParams->minMatch;
+ U32* const hashSmall = ms->chainTable;
+ U32 const hBitsS = cParams->chainLog;
+ const BYTE* const base = ms->window.base;
+ const BYTE* ip = base + ms->nextToUpdate;
+ const BYTE* const iend = ((const BYTE*)end) - HASH_READ_SIZE;
+ const U32 fastHashFillStep = 3;
+
+ /* Always insert every fastHashFillStep position into the hash tables.
+ * Insert the other positions into the large hash table if their entry
+ * is empty.
+ */
+ for (; ip + fastHashFillStep - 1 <= iend; ip += fastHashFillStep) {
+ U32 const curr = (U32)(ip - base);
+ U32 i;
+ for (i = 0; i < fastHashFillStep; ++i) {
+ size_t const smHash = ZSTD_hashPtr(ip + i, hBitsS, mls);
+ size_t const lgHash = ZSTD_hashPtr(ip + i, hBitsL, 8);
+ if (i == 0)
+ hashSmall[smHash] = curr + i;
+ if (i == 0 || hashLarge[lgHash] == 0)
+ hashLarge[lgHash] = curr + i;
+ /* Only load extra positions for ZSTD_dtlm_full */
+ if (dtlm == ZSTD_dtlm_fast)
+ break;
+ } }
+}
+
+
+FORCE_INLINE_TEMPLATE
+size_t ZSTD_compressBlock_doubleFast_noDict_generic(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize, U32 const mls /* template */)
+{
+ ZSTD_compressionParameters const* cParams = &ms->cParams;
+ U32* const hashLong = ms->hashTable;
+ const U32 hBitsL = cParams->hashLog;
+ U32* const hashSmall = ms->chainTable;
+ const U32 hBitsS = cParams->chainLog;
+ const BYTE* const base = ms->window.base;
+ const BYTE* const istart = (const BYTE*)src;
+ const BYTE* anchor = istart;
+ const U32 endIndex = (U32)((size_t)(istart - base) + srcSize);
+ /* presumes that, if there is a dictionary, it must be using Attach mode */
+ const U32 prefixLowestIndex = ZSTD_getLowestPrefixIndex(ms, endIndex, cParams->windowLog);
+ const BYTE* const prefixLowest = base + prefixLowestIndex;
+ const BYTE* const iend = istart + srcSize;
+ const BYTE* const ilimit = iend - HASH_READ_SIZE;
+ U32 offset_1=rep[0], offset_2=rep[1];
+ U32 offsetSaved = 0;
+
+ size_t mLength;
+ U32 offset;
+ U32 curr;
+
+ /* how many positions to search before increasing step size */
+ const size_t kStepIncr = 1 << kSearchStrength;
+ /* the position at which to increment the step size if no match is found */
+ const BYTE* nextStep;
+ size_t step; /* the current step size */
+
+ size_t hl0; /* the long hash at ip */
+ size_t hl1; /* the long hash at ip1 */
+
+ U32 idxl0; /* the long match index for ip */
+ U32 idxl1; /* the long match index for ip1 */
+
+ const BYTE* matchl0; /* the long match for ip */
+ const BYTE* matchs0; /* the short match for ip */
+ const BYTE* matchl1; /* the long match for ip1 */
+
+ const BYTE* ip = istart; /* the current position */
+ const BYTE* ip1; /* the next position */
+
+ DEBUGLOG(5, "ZSTD_compressBlock_doubleFast_noDict_generic");
+
+ /* init */
+ ip += ((ip - prefixLowest) == 0);
+ {
+ U32 const current = (U32)(ip - base);
+ U32 const windowLow = ZSTD_getLowestPrefixIndex(ms, current, cParams->windowLog);
+ U32 const maxRep = current - windowLow;
+ if (offset_2 > maxRep) offsetSaved = offset_2, offset_2 = 0;
+ if (offset_1 > maxRep) offsetSaved = offset_1, offset_1 = 0;
+ }
+
+ /* Outer Loop: one iteration per match found and stored */
+ while (1) {
+ step = 1;
+ nextStep = ip + kStepIncr;
+ ip1 = ip + step;
+
+ if (ip1 > ilimit) {
+ goto _cleanup;
+ }
+
+ hl0 = ZSTD_hashPtr(ip, hBitsL, 8);
+ idxl0 = hashLong[hl0];
+ matchl0 = base + idxl0;
+
+ /* Inner Loop: one iteration per search / position */
+ do {
+ const size_t hs0 = ZSTD_hashPtr(ip, hBitsS, mls);
+ const U32 idxs0 = hashSmall[hs0];
+ curr = (U32)(ip-base);
+ matchs0 = base + idxs0;
+
+ hashLong[hl0] = hashSmall[hs0] = curr; /* update hash tables */
+
+ /* check noDict repcode */
+ if ((offset_1 > 0) & (MEM_read32(ip+1-offset_1) == MEM_read32(ip+1))) {
+ mLength = ZSTD_count(ip+1+4, ip+1+4-offset_1, iend) + 4;
+ ip++;
+ ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, STORE_REPCODE_1, mLength);
+ goto _match_stored;
+ }
+
+ hl1 = ZSTD_hashPtr(ip1, hBitsL, 8);
+
+ if (idxl0 > prefixLowestIndex) {
+ /* check prefix long match */
+ if (MEM_read64(matchl0) == MEM_read64(ip)) {
+ mLength = ZSTD_count(ip+8, matchl0+8, iend) + 8;
+ offset = (U32)(ip-matchl0);
+ while (((ip>anchor) & (matchl0>prefixLowest)) && (ip[-1] == matchl0[-1])) { ip--; matchl0--; mLength++; } /* catch up */
+ goto _match_found;
+ }
+ }
+
+ idxl1 = hashLong[hl1];
+ matchl1 = base + idxl1;
+
+ if (idxs0 > prefixLowestIndex) {
+ /* check prefix short match */
+ if (MEM_read32(matchs0) == MEM_read32(ip)) {
+ goto _search_next_long;
+ }
+ }
+
+ if (ip1 >= nextStep) {
+ PREFETCH_L1(ip1 + 64);
+ PREFETCH_L1(ip1 + 128);
+ step++;
+ nextStep += kStepIncr;
+ }
+ ip = ip1;
+ ip1 += step;
+
+ hl0 = hl1;
+ idxl0 = idxl1;
+ matchl0 = matchl1;
+ #if defined(__aarch64__)
+ PREFETCH_L1(ip+256);
+ #endif
+ } while (ip1 <= ilimit);
+
+_cleanup:
+ /* save reps for next block */
+ rep[0] = offset_1 ? offset_1 : offsetSaved;
+ rep[1] = offset_2 ? offset_2 : offsetSaved;
+
+ /* Return the last literals size */
+ return (size_t)(iend - anchor);
+
+_search_next_long:
+
+ /* check prefix long +1 match */
+ if (idxl1 > prefixLowestIndex) {
+ if (MEM_read64(matchl1) == MEM_read64(ip1)) {
+ ip = ip1;
+ mLength = ZSTD_count(ip+8, matchl1+8, iend) + 8;
+ offset = (U32)(ip-matchl1);
+ while (((ip>anchor) & (matchl1>prefixLowest)) && (ip[-1] == matchl1[-1])) { ip--; matchl1--; mLength++; } /* catch up */
+ goto _match_found;
+ }
+ }
+
+ /* if no long +1 match, explore the short match we found */
+ mLength = ZSTD_count(ip+4, matchs0+4, iend) + 4;
+ offset = (U32)(ip - matchs0);
+ while (((ip>anchor) & (matchs0>prefixLowest)) && (ip[-1] == matchs0[-1])) { ip--; matchs0--; mLength++; } /* catch up */
+
+ /* fall-through */
+
+_match_found: /* requires ip, offset, mLength */
+ offset_2 = offset_1;
+ offset_1 = offset;
+
+ if (step < 4) {
+ /* It is unsafe to write this value back to the hashtable when ip1 is
+ * greater than or equal to the new ip we will have after we're done
+ * processing this match. Rather than perform that test directly
+ * (ip1 >= ip + mLength), which costs speed in practice, we do a simpler
+ * more predictable test. The minmatch even if we take a short match is
+ * 4 bytes, so as long as step, the distance between ip and ip1
+ * (initially) is less than 4, we know ip1 < new ip. */
+ hashLong[hl1] = (U32)(ip1 - base);
+ }
+
+ ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, STORE_OFFSET(offset), mLength);
+
+_match_stored:
+ /* match found */
+ ip += mLength;
+ anchor = ip;
+
+ if (ip <= ilimit) {
+ /* Complementary insertion */
+ /* done after iLimit test, as candidates could be > iend-8 */
+ { U32 const indexToInsert = curr+2;
+ hashLong[ZSTD_hashPtr(base+indexToInsert, hBitsL, 8)] = indexToInsert;
+ hashLong[ZSTD_hashPtr(ip-2, hBitsL, 8)] = (U32)(ip-2-base);
+ hashSmall[ZSTD_hashPtr(base+indexToInsert, hBitsS, mls)] = indexToInsert;
+ hashSmall[ZSTD_hashPtr(ip-1, hBitsS, mls)] = (U32)(ip-1-base);
+ }
+
+ /* check immediate repcode */
+ while ( (ip <= ilimit)
+ && ( (offset_2>0)
+ & (MEM_read32(ip) == MEM_read32(ip - offset_2)) )) {
+ /* store sequence */
+ size_t const rLength = ZSTD_count(ip+4, ip+4-offset_2, iend) + 4;
+ U32 const tmpOff = offset_2; offset_2 = offset_1; offset_1 = tmpOff; /* swap offset_2 <=> offset_1 */
+ hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = (U32)(ip-base);
+ hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = (U32)(ip-base);
+ ZSTD_storeSeq(seqStore, 0, anchor, iend, STORE_REPCODE_1, rLength);
+ ip += rLength;
+ anchor = ip;
+ continue; /* faster when present ... (?) */
+ }
+ }
+ }
+}
+
+
+FORCE_INLINE_TEMPLATE
+size_t ZSTD_compressBlock_doubleFast_dictMatchState_generic(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize,
+ U32 const mls /* template */)
+{
+ ZSTD_compressionParameters const* cParams = &ms->cParams;
+ U32* const hashLong = ms->hashTable;
+ const U32 hBitsL = cParams->hashLog;
+ U32* const hashSmall = ms->chainTable;
+ const U32 hBitsS = cParams->chainLog;
+ const BYTE* const base = ms->window.base;
+ const BYTE* const istart = (const BYTE*)src;
+ const BYTE* ip = istart;
+ const BYTE* anchor = istart;
+ const U32 endIndex = (U32)((size_t)(istart - base) + srcSize);
+ /* presumes that, if there is a dictionary, it must be using Attach mode */
+ const U32 prefixLowestIndex = ZSTD_getLowestPrefixIndex(ms, endIndex, cParams->windowLog);
+ const BYTE* const prefixLowest = base + prefixLowestIndex;
+ const BYTE* const iend = istart + srcSize;
+ const BYTE* const ilimit = iend - HASH_READ_SIZE;
+ U32 offset_1=rep[0], offset_2=rep[1];
+ U32 offsetSaved = 0;
+
+ const ZSTD_matchState_t* const dms = ms->dictMatchState;
+ const ZSTD_compressionParameters* const dictCParams = &dms->cParams;
+ const U32* const dictHashLong = dms->hashTable;
+ const U32* const dictHashSmall = dms->chainTable;
+ const U32 dictStartIndex = dms->window.dictLimit;
+ const BYTE* const dictBase = dms->window.base;
+ const BYTE* const dictStart = dictBase + dictStartIndex;
+ const BYTE* const dictEnd = dms->window.nextSrc;
+ const U32 dictIndexDelta = prefixLowestIndex - (U32)(dictEnd - dictBase);
+ const U32 dictHBitsL = dictCParams->hashLog;
+ const U32 dictHBitsS = dictCParams->chainLog;
+ const U32 dictAndPrefixLength = (U32)((ip - prefixLowest) + (dictEnd - dictStart));
+
+ DEBUGLOG(5, "ZSTD_compressBlock_doubleFast_dictMatchState_generic");
+
+ /* if a dictionary is attached, it must be within window range */
+ assert(ms->window.dictLimit + (1U << cParams->windowLog) >= endIndex);
+
+ /* init */
+ ip += (dictAndPrefixLength == 0);
+
+ /* dictMatchState repCode checks don't currently handle repCode == 0
+ * disabling. */
+ assert(offset_1 <= dictAndPrefixLength);
+ assert(offset_2 <= dictAndPrefixLength);
+
+ /* Main Search Loop */
+ while (ip < ilimit) { /* < instead of <=, because repcode check at (ip+1) */
+ size_t mLength;
+ U32 offset;
+ size_t const h2 = ZSTD_hashPtr(ip, hBitsL, 8);
+ size_t const h = ZSTD_hashPtr(ip, hBitsS, mls);
+ size_t const dictHL = ZSTD_hashPtr(ip, dictHBitsL, 8);
+ size_t const dictHS = ZSTD_hashPtr(ip, dictHBitsS, mls);
+ U32 const curr = (U32)(ip-base);
+ U32 const matchIndexL = hashLong[h2];
+ U32 matchIndexS = hashSmall[h];
+ const BYTE* matchLong = base + matchIndexL;
+ const BYTE* match = base + matchIndexS;
+ const U32 repIndex = curr + 1 - offset_1;
+ const BYTE* repMatch = (repIndex < prefixLowestIndex) ?
+ dictBase + (repIndex - dictIndexDelta) :
+ base + repIndex;
+ hashLong[h2] = hashSmall[h] = curr; /* update hash tables */
+
+ /* check repcode */
+ if (((U32)((prefixLowestIndex-1) - repIndex) >= 3 /* intentional underflow */)
+ && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {
+ const BYTE* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend;
+ mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixLowest) + 4;
+ ip++;
+ ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, STORE_REPCODE_1, mLength);
+ goto _match_stored;
+ }
+
+ if (matchIndexL > prefixLowestIndex) {
+ /* check prefix long match */
+ if (MEM_read64(matchLong) == MEM_read64(ip)) {
+ mLength = ZSTD_count(ip+8, matchLong+8, iend) + 8;
+ offset = (U32)(ip-matchLong);
+ while (((ip>anchor) & (matchLong>prefixLowest)) && (ip[-1] == matchLong[-1])) { ip--; matchLong--; mLength++; } /* catch up */
+ goto _match_found;
+ }
+ } else {
+ /* check dictMatchState long match */
+ U32 const dictMatchIndexL = dictHashLong[dictHL];
+ const BYTE* dictMatchL = dictBase + dictMatchIndexL;
+ assert(dictMatchL < dictEnd);
+
+ if (dictMatchL > dictStart && MEM_read64(dictMatchL) == MEM_read64(ip)) {
+ mLength = ZSTD_count_2segments(ip+8, dictMatchL+8, iend, dictEnd, prefixLowest) + 8;
+ offset = (U32)(curr - dictMatchIndexL - dictIndexDelta);
+ while (((ip>anchor) & (dictMatchL>dictStart)) && (ip[-1] == dictMatchL[-1])) { ip--; dictMatchL--; mLength++; } /* catch up */
+ goto _match_found;
+ } }
+
+ if (matchIndexS > prefixLowestIndex) {
+ /* check prefix short match */
+ if (MEM_read32(match) == MEM_read32(ip)) {
+ goto _search_next_long;
+ }
+ } else {
+ /* check dictMatchState short match */
+ U32 const dictMatchIndexS = dictHashSmall[dictHS];
+ match = dictBase + dictMatchIndexS;
+ matchIndexS = dictMatchIndexS + dictIndexDelta;
+
+ if (match > dictStart && MEM_read32(match) == MEM_read32(ip)) {
+ goto _search_next_long;
+ } }
+
+ ip += ((ip-anchor) >> kSearchStrength) + 1;
+#if defined(__aarch64__)
+ PREFETCH_L1(ip+256);
+#endif
+ continue;
+
+_search_next_long:
+
+ { size_t const hl3 = ZSTD_hashPtr(ip+1, hBitsL, 8);
+ size_t const dictHLNext = ZSTD_hashPtr(ip+1, dictHBitsL, 8);
+ U32 const matchIndexL3 = hashLong[hl3];
+ const BYTE* matchL3 = base + matchIndexL3;
+ hashLong[hl3] = curr + 1;
+
+ /* check prefix long +1 match */
+ if (matchIndexL3 > prefixLowestIndex) {
+ if (MEM_read64(matchL3) == MEM_read64(ip+1)) {
+ mLength = ZSTD_count(ip+9, matchL3+8, iend) + 8;
+ ip++;
+ offset = (U32)(ip-matchL3);
+ while (((ip>anchor) & (matchL3>prefixLowest)) && (ip[-1] == matchL3[-1])) { ip--; matchL3--; mLength++; } /* catch up */
+ goto _match_found;
+ }
+ } else {
+ /* check dict long +1 match */
+ U32 const dictMatchIndexL3 = dictHashLong[dictHLNext];
+ const BYTE* dictMatchL3 = dictBase + dictMatchIndexL3;
+ assert(dictMatchL3 < dictEnd);
+ if (dictMatchL3 > dictStart && MEM_read64(dictMatchL3) == MEM_read64(ip+1)) {
+ mLength = ZSTD_count_2segments(ip+1+8, dictMatchL3+8, iend, dictEnd, prefixLowest) + 8;
+ ip++;
+ offset = (U32)(curr + 1 - dictMatchIndexL3 - dictIndexDelta);
+ while (((ip>anchor) & (dictMatchL3>dictStart)) && (ip[-1] == dictMatchL3[-1])) { ip--; dictMatchL3--; mLength++; } /* catch up */
+ goto _match_found;
+ } } }
+
+ /* if no long +1 match, explore the short match we found */
+ if (matchIndexS < prefixLowestIndex) {
+ mLength = ZSTD_count_2segments(ip+4, match+4, iend, dictEnd, prefixLowest) + 4;
+ offset = (U32)(curr - matchIndexS);
+ while (((ip>anchor) & (match>dictStart)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */
+ } else {
+ mLength = ZSTD_count(ip+4, match+4, iend) + 4;
+ offset = (U32)(ip - match);
+ while (((ip>anchor) & (match>prefixLowest)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */
+ }
+
+_match_found:
+ offset_2 = offset_1;
+ offset_1 = offset;
+
+ ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, STORE_OFFSET(offset), mLength);
+
+_match_stored:
+ /* match found */
+ ip += mLength;
+ anchor = ip;
+
+ if (ip <= ilimit) {
+ /* Complementary insertion */
+ /* done after iLimit test, as candidates could be > iend-8 */
+ { U32 const indexToInsert = curr+2;
+ hashLong[ZSTD_hashPtr(base+indexToInsert, hBitsL, 8)] = indexToInsert;
+ hashLong[ZSTD_hashPtr(ip-2, hBitsL, 8)] = (U32)(ip-2-base);
+ hashSmall[ZSTD_hashPtr(base+indexToInsert, hBitsS, mls)] = indexToInsert;
+ hashSmall[ZSTD_hashPtr(ip-1, hBitsS, mls)] = (U32)(ip-1-base);
+ }
+
+ /* check immediate repcode */
+ while (ip <= ilimit) {
+ U32 const current2 = (U32)(ip-base);
+ U32 const repIndex2 = current2 - offset_2;
+ const BYTE* repMatch2 = repIndex2 < prefixLowestIndex ?
+ dictBase + repIndex2 - dictIndexDelta :
+ base + repIndex2;
+ if ( ((U32)((prefixLowestIndex-1) - (U32)repIndex2) >= 3 /* intentional overflow */)
+ && (MEM_read32(repMatch2) == MEM_read32(ip)) ) {
+ const BYTE* const repEnd2 = repIndex2 < prefixLowestIndex ? dictEnd : iend;
+ size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixLowest) + 4;
+ U32 tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */
+ ZSTD_storeSeq(seqStore, 0, anchor, iend, STORE_REPCODE_1, repLength2);
+ hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = current2;
+ hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = current2;
+ ip += repLength2;
+ anchor = ip;
+ continue;
+ }
+ break;
+ }
+ }
+ } /* while (ip < ilimit) */
+
+ /* save reps for next block */
+ rep[0] = offset_1 ? offset_1 : offsetSaved;
+ rep[1] = offset_2 ? offset_2 : offsetSaved;
+
+ /* Return the last literals size */
+ return (size_t)(iend - anchor);
+}
+
+#define ZSTD_GEN_DFAST_FN(dictMode, mls) \
+ static size_t ZSTD_compressBlock_doubleFast_##dictMode##_##mls( \
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], \
+ void const* src, size_t srcSize) \
+ { \
+ return ZSTD_compressBlock_doubleFast_##dictMode##_generic(ms, seqStore, rep, src, srcSize, mls); \
+ }
+
+ZSTD_GEN_DFAST_FN(noDict, 4)
+ZSTD_GEN_DFAST_FN(noDict, 5)
+ZSTD_GEN_DFAST_FN(noDict, 6)
+ZSTD_GEN_DFAST_FN(noDict, 7)
+
+ZSTD_GEN_DFAST_FN(dictMatchState, 4)
+ZSTD_GEN_DFAST_FN(dictMatchState, 5)
+ZSTD_GEN_DFAST_FN(dictMatchState, 6)
+ZSTD_GEN_DFAST_FN(dictMatchState, 7)
+
+
+size_t ZSTD_compressBlock_doubleFast(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+{
+ const U32 mls = ms->cParams.minMatch;
+ switch(mls)
+ {
+ default: /* includes case 3 */
+ case 4 :
+ return ZSTD_compressBlock_doubleFast_noDict_4(ms, seqStore, rep, src, srcSize);
+ case 5 :
+ return ZSTD_compressBlock_doubleFast_noDict_5(ms, seqStore, rep, src, srcSize);
+ case 6 :
+ return ZSTD_compressBlock_doubleFast_noDict_6(ms, seqStore, rep, src, srcSize);
+ case 7 :
+ return ZSTD_compressBlock_doubleFast_noDict_7(ms, seqStore, rep, src, srcSize);
+ }
+}
+
+
+size_t ZSTD_compressBlock_doubleFast_dictMatchState(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+{
+ const U32 mls = ms->cParams.minMatch;
+ switch(mls)
+ {
+ default: /* includes case 3 */
+ case 4 :
+ return ZSTD_compressBlock_doubleFast_dictMatchState_4(ms, seqStore, rep, src, srcSize);
+ case 5 :
+ return ZSTD_compressBlock_doubleFast_dictMatchState_5(ms, seqStore, rep, src, srcSize);
+ case 6 :
+ return ZSTD_compressBlock_doubleFast_dictMatchState_6(ms, seqStore, rep, src, srcSize);
+ case 7 :
+ return ZSTD_compressBlock_doubleFast_dictMatchState_7(ms, seqStore, rep, src, srcSize);
+ }
+}
+
+
+static size_t ZSTD_compressBlock_doubleFast_extDict_generic(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize,
+ U32 const mls /* template */)
+{
+ ZSTD_compressionParameters const* cParams = &ms->cParams;
+ U32* const hashLong = ms->hashTable;
+ U32 const hBitsL = cParams->hashLog;
+ U32* const hashSmall = ms->chainTable;
+ U32 const hBitsS = cParams->chainLog;
+ const BYTE* const istart = (const BYTE*)src;
+ const BYTE* ip = istart;
+ const BYTE* anchor = istart;
+ const BYTE* const iend = istart + srcSize;
+ const BYTE* const ilimit = iend - 8;
+ const BYTE* const base = ms->window.base;
+ const U32 endIndex = (U32)((size_t)(istart - base) + srcSize);
+ const U32 lowLimit = ZSTD_getLowestMatchIndex(ms, endIndex, cParams->windowLog);
+ const U32 dictStartIndex = lowLimit;
+ const U32 dictLimit = ms->window.dictLimit;
+ const U32 prefixStartIndex = (dictLimit > lowLimit) ? dictLimit : lowLimit;
+ const BYTE* const prefixStart = base + prefixStartIndex;
+ const BYTE* const dictBase = ms->window.dictBase;
+ const BYTE* const dictStart = dictBase + dictStartIndex;
+ const BYTE* const dictEnd = dictBase + prefixStartIndex;
+ U32 offset_1=rep[0], offset_2=rep[1];
+
+ DEBUGLOG(5, "ZSTD_compressBlock_doubleFast_extDict_generic (srcSize=%zu)", srcSize);
+
+ /* if extDict is invalidated due to maxDistance, switch to "regular" variant */
+ if (prefixStartIndex == dictStartIndex)
+ return ZSTD_compressBlock_doubleFast(ms, seqStore, rep, src, srcSize);
+
+ /* Search Loop */
+ while (ip < ilimit) { /* < instead of <=, because (ip+1) */
+ const size_t hSmall = ZSTD_hashPtr(ip, hBitsS, mls);
+ const U32 matchIndex = hashSmall[hSmall];
+ const BYTE* const matchBase = matchIndex < prefixStartIndex ? dictBase : base;
+ const BYTE* match = matchBase + matchIndex;
+
+ const size_t hLong = ZSTD_hashPtr(ip, hBitsL, 8);
+ const U32 matchLongIndex = hashLong[hLong];
+ const BYTE* const matchLongBase = matchLongIndex < prefixStartIndex ? dictBase : base;
+ const BYTE* matchLong = matchLongBase + matchLongIndex;
+
+ const U32 curr = (U32)(ip-base);
+ const U32 repIndex = curr + 1 - offset_1; /* offset_1 expected <= curr +1 */
+ const BYTE* const repBase = repIndex < prefixStartIndex ? dictBase : base;
+ const BYTE* const repMatch = repBase + repIndex;
+ size_t mLength;
+ hashSmall[hSmall] = hashLong[hLong] = curr; /* update hash table */
+
+ if ((((U32)((prefixStartIndex-1) - repIndex) >= 3) /* intentional underflow : ensure repIndex doesn't overlap dict + prefix */
+ & (offset_1 <= curr+1 - dictStartIndex)) /* note: we are searching at curr+1 */
+ && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {
+ const BYTE* repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend;
+ mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixStart) + 4;
+ ip++;
+ ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, STORE_REPCODE_1, mLength);
+ } else {
+ if ((matchLongIndex > dictStartIndex) && (MEM_read64(matchLong) == MEM_read64(ip))) {
+ const BYTE* const matchEnd = matchLongIndex < prefixStartIndex ? dictEnd : iend;
+ const BYTE* const lowMatchPtr = matchLongIndex < prefixStartIndex ? dictStart : prefixStart;
+ U32 offset;
+ mLength = ZSTD_count_2segments(ip+8, matchLong+8, iend, matchEnd, prefixStart) + 8;
+ offset = curr - matchLongIndex;
+ while (((ip>anchor) & (matchLong>lowMatchPtr)) && (ip[-1] == matchLong[-1])) { ip--; matchLong--; mLength++; } /* catch up */
+ offset_2 = offset_1;
+ offset_1 = offset;
+ ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, STORE_OFFSET(offset), mLength);
+
+ } else if ((matchIndex > dictStartIndex) && (MEM_read32(match) == MEM_read32(ip))) {
+ size_t const h3 = ZSTD_hashPtr(ip+1, hBitsL, 8);
+ U32 const matchIndex3 = hashLong[h3];
+ const BYTE* const match3Base = matchIndex3 < prefixStartIndex ? dictBase : base;
+ const BYTE* match3 = match3Base + matchIndex3;
+ U32 offset;
+ hashLong[h3] = curr + 1;
+ if ( (matchIndex3 > dictStartIndex) && (MEM_read64(match3) == MEM_read64(ip+1)) ) {
+ const BYTE* const matchEnd = matchIndex3 < prefixStartIndex ? dictEnd : iend;
+ const BYTE* const lowMatchPtr = matchIndex3 < prefixStartIndex ? dictStart : prefixStart;
+ mLength = ZSTD_count_2segments(ip+9, match3+8, iend, matchEnd, prefixStart) + 8;
+ ip++;
+ offset = curr+1 - matchIndex3;
+ while (((ip>anchor) & (match3>lowMatchPtr)) && (ip[-1] == match3[-1])) { ip--; match3--; mLength++; } /* catch up */
+ } else {
+ const BYTE* const matchEnd = matchIndex < prefixStartIndex ? dictEnd : iend;
+ const BYTE* const lowMatchPtr = matchIndex < prefixStartIndex ? dictStart : prefixStart;
+ mLength = ZSTD_count_2segments(ip+4, match+4, iend, matchEnd, prefixStart) + 4;
+ offset = curr - matchIndex;
+ while (((ip>anchor) & (match>lowMatchPtr)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */
+ }
+ offset_2 = offset_1;
+ offset_1 = offset;
+ ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, STORE_OFFSET(offset), mLength);
+
+ } else {
+ ip += ((ip-anchor) >> kSearchStrength) + 1;
+ continue;
+ } }
+
+ /* move to next sequence start */
+ ip += mLength;
+ anchor = ip;
+
+ if (ip <= ilimit) {
+ /* Complementary insertion */
+ /* done after iLimit test, as candidates could be > iend-8 */
+ { U32 const indexToInsert = curr+2;
+ hashLong[ZSTD_hashPtr(base+indexToInsert, hBitsL, 8)] = indexToInsert;
+ hashLong[ZSTD_hashPtr(ip-2, hBitsL, 8)] = (U32)(ip-2-base);
+ hashSmall[ZSTD_hashPtr(base+indexToInsert, hBitsS, mls)] = indexToInsert;
+ hashSmall[ZSTD_hashPtr(ip-1, hBitsS, mls)] = (U32)(ip-1-base);
+ }
+
+ /* check immediate repcode */
+ while (ip <= ilimit) {
+ U32 const current2 = (U32)(ip-base);
+ U32 const repIndex2 = current2 - offset_2;
+ const BYTE* repMatch2 = repIndex2 < prefixStartIndex ? dictBase + repIndex2 : base + repIndex2;
+ if ( (((U32)((prefixStartIndex-1) - repIndex2) >= 3) /* intentional overflow : ensure repIndex2 doesn't overlap dict + prefix */
+ & (offset_2 <= current2 - dictStartIndex))
+ && (MEM_read32(repMatch2) == MEM_read32(ip)) ) {
+ const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend;
+ size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixStart) + 4;
+ U32 const tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */
+ ZSTD_storeSeq(seqStore, 0, anchor, iend, STORE_REPCODE_1, repLength2);
+ hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = current2;
+ hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = current2;
+ ip += repLength2;
+ anchor = ip;
+ continue;
+ }
+ break;
+ } } }
+
+ /* save reps for next block */
+ rep[0] = offset_1;
+ rep[1] = offset_2;
+
+ /* Return the last literals size */
+ return (size_t)(iend - anchor);
+}
+
+ZSTD_GEN_DFAST_FN(extDict, 4)
+ZSTD_GEN_DFAST_FN(extDict, 5)
+ZSTD_GEN_DFAST_FN(extDict, 6)
+ZSTD_GEN_DFAST_FN(extDict, 7)
+
+size_t ZSTD_compressBlock_doubleFast_extDict(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+{
+ U32 const mls = ms->cParams.minMatch;
+ switch(mls)
+ {
+ default: /* includes case 3 */
+ case 4 :
+ return ZSTD_compressBlock_doubleFast_extDict_4(ms, seqStore, rep, src, srcSize);
+ case 5 :
+ return ZSTD_compressBlock_doubleFast_extDict_5(ms, seqStore, rep, src, srcSize);
+ case 6 :
+ return ZSTD_compressBlock_doubleFast_extDict_6(ms, seqStore, rep, src, srcSize);
+ case 7 :
+ return ZSTD_compressBlock_doubleFast_extDict_7(ms, seqStore, rep, src, srcSize);
+ }
+}
diff --git a/lib/zstd/compress/zstd_double_fast.h b/lib/zstd/compress/zstd_double_fast.h
new file mode 100644
index 000000000000..6822bde65a1d
--- /dev/null
+++ b/lib/zstd/compress/zstd_double_fast.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#ifndef ZSTD_DOUBLE_FAST_H
+#define ZSTD_DOUBLE_FAST_H
+
+
+#include "../common/mem.h" /* U32 */
+#include "zstd_compress_internal.h" /* ZSTD_CCtx, size_t */
+
+void ZSTD_fillDoubleHashTable(ZSTD_matchState_t* ms,
+ void const* end, ZSTD_dictTableLoadMethod_e dtlm);
+size_t ZSTD_compressBlock_doubleFast(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+size_t ZSTD_compressBlock_doubleFast_dictMatchState(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+size_t ZSTD_compressBlock_doubleFast_extDict(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+
+
+
+#endif /* ZSTD_DOUBLE_FAST_H */
diff --git a/lib/zstd/compress/zstd_fast.c b/lib/zstd/compress/zstd_fast.c
new file mode 100644
index 000000000000..a752e6beab52
--- /dev/null
+++ b/lib/zstd/compress/zstd_fast.c
@@ -0,0 +1,675 @@
+/*
+ * Copyright (c) Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#include "zstd_compress_internal.h" /* ZSTD_hashPtr, ZSTD_count, ZSTD_storeSeq */
+#include "zstd_fast.h"
+
+
+void ZSTD_fillHashTable(ZSTD_matchState_t* ms,
+ const void* const end,
+ ZSTD_dictTableLoadMethod_e dtlm)
+{
+ const ZSTD_compressionParameters* const cParams = &ms->cParams;
+ U32* const hashTable = ms->hashTable;
+ U32 const hBits = cParams->hashLog;
+ U32 const mls = cParams->minMatch;
+ const BYTE* const base = ms->window.base;
+ const BYTE* ip = base + ms->nextToUpdate;
+ const BYTE* const iend = ((const BYTE*)end) - HASH_READ_SIZE;
+ const U32 fastHashFillStep = 3;
+
+ /* Always insert every fastHashFillStep position into the hash table.
+ * Insert the other positions if their hash entry is empty.
+ */
+ for ( ; ip + fastHashFillStep < iend + 2; ip += fastHashFillStep) {
+ U32 const curr = (U32)(ip - base);
+ size_t const hash0 = ZSTD_hashPtr(ip, hBits, mls);
+ hashTable[hash0] = curr;
+ if (dtlm == ZSTD_dtlm_fast) continue;
+ /* Only load extra positions for ZSTD_dtlm_full */
+ { U32 p;
+ for (p = 1; p < fastHashFillStep; ++p) {
+ size_t const hash = ZSTD_hashPtr(ip + p, hBits, mls);
+ if (hashTable[hash] == 0) { /* not yet filled */
+ hashTable[hash] = curr + p;
+ } } } }
+}
+
+
+/*
+ * If you squint hard enough (and ignore repcodes), the search operation at any
+ * given position is broken into 4 stages:
+ *
+ * 1. Hash (map position to hash value via input read)
+ * 2. Lookup (map hash val to index via hashtable read)
+ * 3. Load (map index to value at that position via input read)
+ * 4. Compare
+ *
+ * Each of these steps involves a memory read at an address which is computed
+ * from the previous step. This means these steps must be sequenced and their
+ * latencies are cumulative.
+ *
+ * Rather than do 1->2->3->4 sequentially for a single position before moving
+ * onto the next, this implementation interleaves these operations across the
+ * next few positions:
+ *
+ * R = Repcode Read & Compare
+ * H = Hash
+ * T = Table Lookup
+ * M = Match Read & Compare
+ *
+ * Pos | Time -->
+ * ----+-------------------
+ * N | ... M
+ * N+1 | ... TM
+ * N+2 | R H T M
+ * N+3 | H TM
+ * N+4 | R H T M
+ * N+5 | H ...
+ * N+6 | R ...
+ *
+ * This is very much analogous to the pipelining of execution in a CPU. And just
+ * like a CPU, we have to dump the pipeline when we find a match (i.e., take a
+ * branch).
+ *
+ * When this happens, we throw away our current state, and do the following prep
+ * to re-enter the loop:
+ *
+ * Pos | Time -->
+ * ----+-------------------
+ * N | H T
+ * N+1 | H
+ *
+ * This is also the work we do at the beginning to enter the loop initially.
+ */
+FORCE_INLINE_TEMPLATE size_t
+ZSTD_compressBlock_fast_noDict_generic(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize,
+ U32 const mls, U32 const hasStep)
+{
+ const ZSTD_compressionParameters* const cParams = &ms->cParams;
+ U32* const hashTable = ms->hashTable;
+ U32 const hlog = cParams->hashLog;
+ /* support stepSize of 0 */
+ size_t const stepSize = hasStep ? (cParams->targetLength + !(cParams->targetLength) + 1) : 2;
+ const BYTE* const base = ms->window.base;
+ const BYTE* const istart = (const BYTE*)src;
+ const U32 endIndex = (U32)((size_t)(istart - base) + srcSize);
+ const U32 prefixStartIndex = ZSTD_getLowestPrefixIndex(ms, endIndex, cParams->windowLog);
+ const BYTE* const prefixStart = base + prefixStartIndex;
+ const BYTE* const iend = istart + srcSize;
+ const BYTE* const ilimit = iend - HASH_READ_SIZE;
+
+ const BYTE* anchor = istart;
+ const BYTE* ip0 = istart;
+ const BYTE* ip1;
+ const BYTE* ip2;
+ const BYTE* ip3;
+ U32 current0;
+
+ U32 rep_offset1 = rep[0];
+ U32 rep_offset2 = rep[1];
+ U32 offsetSaved = 0;
+
+ size_t hash0; /* hash for ip0 */
+ size_t hash1; /* hash for ip1 */
+ U32 idx; /* match idx for ip0 */
+ U32 mval; /* src value at match idx */
+
+ U32 offcode;
+ const BYTE* match0;
+ size_t mLength;
+
+ /* ip0 and ip1 are always adjacent. The targetLength skipping and
+ * uncompressibility acceleration is applied to every other position,
+ * matching the behavior of #1562. step therefore represents the gap
+ * between pairs of positions, from ip0 to ip2 or ip1 to ip3. */
+ size_t step;
+ const BYTE* nextStep;
+ const size_t kStepIncr = (1 << (kSearchStrength - 1));
+
+ DEBUGLOG(5, "ZSTD_compressBlock_fast_generic");
+ ip0 += (ip0 == prefixStart);
+ { U32 const curr = (U32)(ip0 - base);
+ U32 const windowLow = ZSTD_getLowestPrefixIndex(ms, curr, cParams->windowLog);
+ U32 const maxRep = curr - windowLow;
+ if (rep_offset2 > maxRep) offsetSaved = rep_offset2, rep_offset2 = 0;
+ if (rep_offset1 > maxRep) offsetSaved = rep_offset1, rep_offset1 = 0;
+ }
+
+ /* start each op */
+_start: /* Requires: ip0 */
+
+ step = stepSize;
+ nextStep = ip0 + kStepIncr;
+
+ /* calculate positions, ip0 - anchor == 0, so we skip step calc */
+ ip1 = ip0 + 1;
+ ip2 = ip0 + step;
+ ip3 = ip2 + 1;
+
+ if (ip3 >= ilimit) {
+ goto _cleanup;
+ }
+
+ hash0 = ZSTD_hashPtr(ip0, hlog, mls);
+ hash1 = ZSTD_hashPtr(ip1, hlog, mls);
+
+ idx = hashTable[hash0];
+
+ do {
+ /* load repcode match for ip[2]*/
+ const U32 rval = MEM_read32(ip2 - rep_offset1);
+
+ /* write back hash table entry */
+ current0 = (U32)(ip0 - base);
+ hashTable[hash0] = current0;
+
+ /* check repcode at ip[2] */
+ if ((MEM_read32(ip2) == rval) & (rep_offset1 > 0)) {
+ ip0 = ip2;
+ match0 = ip0 - rep_offset1;
+ mLength = ip0[-1] == match0[-1];
+ ip0 -= mLength;
+ match0 -= mLength;
+ offcode = STORE_REPCODE_1;
+ mLength += 4;
+ goto _match;
+ }
+
+ /* load match for ip[0] */
+ if (idx >= prefixStartIndex) {
+ mval = MEM_read32(base + idx);
+ } else {
+ mval = MEM_read32(ip0) ^ 1; /* guaranteed to not match. */
+ }
+
+ /* check match at ip[0] */
+ if (MEM_read32(ip0) == mval) {
+ /* found a match! */
+ goto _offset;
+ }
+
+ /* lookup ip[1] */
+ idx = hashTable[hash1];
+
+ /* hash ip[2] */
+ hash0 = hash1;
+ hash1 = ZSTD_hashPtr(ip2, hlog, mls);
+
+ /* advance to next positions */
+ ip0 = ip1;
+ ip1 = ip2;
+ ip2 = ip3;
+
+ /* write back hash table entry */
+ current0 = (U32)(ip0 - base);
+ hashTable[hash0] = current0;
+
+ /* load match for ip[0] */
+ if (idx >= prefixStartIndex) {
+ mval = MEM_read32(base + idx);
+ } else {
+ mval = MEM_read32(ip0) ^ 1; /* guaranteed to not match. */
+ }
+
+ /* check match at ip[0] */
+ if (MEM_read32(ip0) == mval) {
+ /* found a match! */
+ goto _offset;
+ }
+
+ /* lookup ip[1] */
+ idx = hashTable[hash1];
+
+ /* hash ip[2] */
+ hash0 = hash1;
+ hash1 = ZSTD_hashPtr(ip2, hlog, mls);
+
+ /* advance to next positions */
+ ip0 = ip1;
+ ip1 = ip2;
+ ip2 = ip0 + step;
+ ip3 = ip1 + step;
+
+ /* calculate step */
+ if (ip2 >= nextStep) {
+ step++;
+ PREFETCH_L1(ip1 + 64);
+ PREFETCH_L1(ip1 + 128);
+ nextStep += kStepIncr;
+ }
+ } while (ip3 < ilimit);
+
+_cleanup:
+ /* Note that there are probably still a couple positions we could search.
+ * However, it seems to be a meaningful performance hit to try to search
+ * them. So let's not. */
+
+ /* save reps for next block */
+ rep[0] = rep_offset1 ? rep_offset1 : offsetSaved;
+ rep[1] = rep_offset2 ? rep_offset2 : offsetSaved;
+
+ /* Return the last literals size */
+ return (size_t)(iend - anchor);
+
+_offset: /* Requires: ip0, idx */
+
+ /* Compute the offset code. */
+ match0 = base + idx;
+ rep_offset2 = rep_offset1;
+ rep_offset1 = (U32)(ip0-match0);
+ offcode = STORE_OFFSET(rep_offset1);
+ mLength = 4;
+
+ /* Count the backwards match length. */
+ while (((ip0>anchor) & (match0>prefixStart)) && (ip0[-1] == match0[-1])) {
+ ip0--;
+ match0--;
+ mLength++;
+ }
+
+_match: /* Requires: ip0, match0, offcode */
+
+ /* Count the forward length. */
+ mLength += ZSTD_count(ip0 + mLength, match0 + mLength, iend);
+
+ ZSTD_storeSeq(seqStore, (size_t)(ip0 - anchor), anchor, iend, offcode, mLength);
+
+ ip0 += mLength;
+ anchor = ip0;
+
+ /* write next hash table entry */
+ if (ip1 < ip0) {
+ hashTable[hash1] = (U32)(ip1 - base);
+ }
+
+ /* Fill table and check for immediate repcode. */
+ if (ip0 <= ilimit) {
+ /* Fill Table */
+ assert(base+current0+2 > istart); /* check base overflow */
+ hashTable[ZSTD_hashPtr(base+current0+2, hlog, mls)] = current0+2; /* here because current+2 could be > iend-8 */
+ hashTable[ZSTD_hashPtr(ip0-2, hlog, mls)] = (U32)(ip0-2-base);
+
+ if (rep_offset2 > 0) { /* rep_offset2==0 means rep_offset2 is invalidated */
+ while ( (ip0 <= ilimit) && (MEM_read32(ip0) == MEM_read32(ip0 - rep_offset2)) ) {
+ /* store sequence */
+ size_t const rLength = ZSTD_count(ip0+4, ip0+4-rep_offset2, iend) + 4;
+ { U32 const tmpOff = rep_offset2; rep_offset2 = rep_offset1; rep_offset1 = tmpOff; } /* swap rep_offset2 <=> rep_offset1 */
+ hashTable[ZSTD_hashPtr(ip0, hlog, mls)] = (U32)(ip0-base);
+ ip0 += rLength;
+ ZSTD_storeSeq(seqStore, 0 /*litLen*/, anchor, iend, STORE_REPCODE_1, rLength);
+ anchor = ip0;
+ continue; /* faster when present (confirmed on gcc-8) ... (?) */
+ } } }
+
+ goto _start;
+}
+
+#define ZSTD_GEN_FAST_FN(dictMode, mls, step) \
+ static size_t ZSTD_compressBlock_fast_##dictMode##_##mls##_##step( \
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], \
+ void const* src, size_t srcSize) \
+ { \
+ return ZSTD_compressBlock_fast_##dictMode##_generic(ms, seqStore, rep, src, srcSize, mls, step); \
+ }
+
+ZSTD_GEN_FAST_FN(noDict, 4, 1)
+ZSTD_GEN_FAST_FN(noDict, 5, 1)
+ZSTD_GEN_FAST_FN(noDict, 6, 1)
+ZSTD_GEN_FAST_FN(noDict, 7, 1)
+
+ZSTD_GEN_FAST_FN(noDict, 4, 0)
+ZSTD_GEN_FAST_FN(noDict, 5, 0)
+ZSTD_GEN_FAST_FN(noDict, 6, 0)
+ZSTD_GEN_FAST_FN(noDict, 7, 0)
+
+size_t ZSTD_compressBlock_fast(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+{
+ U32 const mls = ms->cParams.minMatch;
+ assert(ms->dictMatchState == NULL);
+ if (ms->cParams.targetLength > 1) {
+ switch(mls)
+ {
+ default: /* includes case 3 */
+ case 4 :
+ return ZSTD_compressBlock_fast_noDict_4_1(ms, seqStore, rep, src, srcSize);
+ case 5 :
+ return ZSTD_compressBlock_fast_noDict_5_1(ms, seqStore, rep, src, srcSize);
+ case 6 :
+ return ZSTD_compressBlock_fast_noDict_6_1(ms, seqStore, rep, src, srcSize);
+ case 7 :
+ return ZSTD_compressBlock_fast_noDict_7_1(ms, seqStore, rep, src, srcSize);
+ }
+ } else {
+ switch(mls)
+ {
+ default: /* includes case 3 */
+ case 4 :
+ return ZSTD_compressBlock_fast_noDict_4_0(ms, seqStore, rep, src, srcSize);
+ case 5 :
+ return ZSTD_compressBlock_fast_noDict_5_0(ms, seqStore, rep, src, srcSize);
+ case 6 :
+ return ZSTD_compressBlock_fast_noDict_6_0(ms, seqStore, rep, src, srcSize);
+ case 7 :
+ return ZSTD_compressBlock_fast_noDict_7_0(ms, seqStore, rep, src, srcSize);
+ }
+
+ }
+}
+
+FORCE_INLINE_TEMPLATE
+size_t ZSTD_compressBlock_fast_dictMatchState_generic(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize, U32 const mls, U32 const hasStep)
+{
+ const ZSTD_compressionParameters* const cParams = &ms->cParams;
+ U32* const hashTable = ms->hashTable;
+ U32 const hlog = cParams->hashLog;
+ /* support stepSize of 0 */
+ U32 const stepSize = cParams->targetLength + !(cParams->targetLength);
+ const BYTE* const base = ms->window.base;
+ const BYTE* const istart = (const BYTE*)src;
+ const BYTE* ip = istart;
+ const BYTE* anchor = istart;
+ const U32 prefixStartIndex = ms->window.dictLimit;
+ const BYTE* const prefixStart = base + prefixStartIndex;
+ const BYTE* const iend = istart + srcSize;
+ const BYTE* const ilimit = iend - HASH_READ_SIZE;
+ U32 offset_1=rep[0], offset_2=rep[1];
+ U32 offsetSaved = 0;
+
+ const ZSTD_matchState_t* const dms = ms->dictMatchState;
+ const ZSTD_compressionParameters* const dictCParams = &dms->cParams ;
+ const U32* const dictHashTable = dms->hashTable;
+ const U32 dictStartIndex = dms->window.dictLimit;
+ const BYTE* const dictBase = dms->window.base;
+ const BYTE* const dictStart = dictBase + dictStartIndex;
+ const BYTE* const dictEnd = dms->window.nextSrc;
+ const U32 dictIndexDelta = prefixStartIndex - (U32)(dictEnd - dictBase);
+ const U32 dictAndPrefixLength = (U32)(ip - prefixStart + dictEnd - dictStart);
+ const U32 dictHLog = dictCParams->hashLog;
+
+ /* if a dictionary is still attached, it necessarily means that
+ * it is within window size. So we just check it. */
+ const U32 maxDistance = 1U << cParams->windowLog;
+ const U32 endIndex = (U32)((size_t)(ip - base) + srcSize);
+ assert(endIndex - prefixStartIndex <= maxDistance);
+ (void)maxDistance; (void)endIndex; /* these variables are not used when assert() is disabled */
+
+ (void)hasStep; /* not currently specialized on whether it's accelerated */
+
+ /* ensure there will be no underflow
+ * when translating a dict index into a local index */
+ assert(prefixStartIndex >= (U32)(dictEnd - dictBase));
+
+ /* init */
+ DEBUGLOG(5, "ZSTD_compressBlock_fast_dictMatchState_generic");
+ ip += (dictAndPrefixLength == 0);
+ /* dictMatchState repCode checks don't currently handle repCode == 0
+ * disabling. */
+ assert(offset_1 <= dictAndPrefixLength);
+ assert(offset_2 <= dictAndPrefixLength);
+
+ /* Main Search Loop */
+ while (ip < ilimit) { /* < instead of <=, because repcode check at (ip+1) */
+ size_t mLength;
+ size_t const h = ZSTD_hashPtr(ip, hlog, mls);
+ U32 const curr = (U32)(ip-base);
+ U32 const matchIndex = hashTable[h];
+ const BYTE* match = base + matchIndex;
+ const U32 repIndex = curr + 1 - offset_1;
+ const BYTE* repMatch = (repIndex < prefixStartIndex) ?
+ dictBase + (repIndex - dictIndexDelta) :
+ base + repIndex;
+ hashTable[h] = curr; /* update hash table */
+
+ if ( ((U32)((prefixStartIndex-1) - repIndex) >= 3) /* intentional underflow : ensure repIndex isn't overlapping dict + prefix */
+ && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {
+ const BYTE* const repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend;
+ mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixStart) + 4;
+ ip++;
+ ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, STORE_REPCODE_1, mLength);
+ } else if ( (matchIndex <= prefixStartIndex) ) {
+ size_t const dictHash = ZSTD_hashPtr(ip, dictHLog, mls);
+ U32 const dictMatchIndex = dictHashTable[dictHash];
+ const BYTE* dictMatch = dictBase + dictMatchIndex;
+ if (dictMatchIndex <= dictStartIndex ||
+ MEM_read32(dictMatch) != MEM_read32(ip)) {
+ assert(stepSize >= 1);
+ ip += ((ip-anchor) >> kSearchStrength) + stepSize;
+ continue;
+ } else {
+ /* found a dict match */
+ U32 const offset = (U32)(curr-dictMatchIndex-dictIndexDelta);
+ mLength = ZSTD_count_2segments(ip+4, dictMatch+4, iend, dictEnd, prefixStart) + 4;
+ while (((ip>anchor) & (dictMatch>dictStart))
+ && (ip[-1] == dictMatch[-1])) {
+ ip--; dictMatch--; mLength++;
+ } /* catch up */
+ offset_2 = offset_1;
+ offset_1 = offset;
+ ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, STORE_OFFSET(offset), mLength);
+ }
+ } else if (MEM_read32(match) != MEM_read32(ip)) {
+ /* it's not a match, and we're not going to check the dictionary */
+ assert(stepSize >= 1);
+ ip += ((ip-anchor) >> kSearchStrength) + stepSize;
+ continue;
+ } else {
+ /* found a regular match */
+ U32 const offset = (U32)(ip-match);
+ mLength = ZSTD_count(ip+4, match+4, iend) + 4;
+ while (((ip>anchor) & (match>prefixStart))
+ && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */
+ offset_2 = offset_1;
+ offset_1 = offset;
+ ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, STORE_OFFSET(offset), mLength);
+ }
+
+ /* match found */
+ ip += mLength;
+ anchor = ip;
+
+ if (ip <= ilimit) {
+ /* Fill Table */
+ assert(base+curr+2 > istart); /* check base overflow */
+ hashTable[ZSTD_hashPtr(base+curr+2, hlog, mls)] = curr+2; /* here because curr+2 could be > iend-8 */
+ hashTable[ZSTD_hashPtr(ip-2, hlog, mls)] = (U32)(ip-2-base);
+
+ /* check immediate repcode */
+ while (ip <= ilimit) {
+ U32 const current2 = (U32)(ip-base);
+ U32 const repIndex2 = current2 - offset_2;
+ const BYTE* repMatch2 = repIndex2 < prefixStartIndex ?
+ dictBase - dictIndexDelta + repIndex2 :
+ base + repIndex2;
+ if ( ((U32)((prefixStartIndex-1) - (U32)repIndex2) >= 3 /* intentional overflow */)
+ && (MEM_read32(repMatch2) == MEM_read32(ip)) ) {
+ const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend;
+ size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixStart) + 4;
+ U32 tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */
+ ZSTD_storeSeq(seqStore, 0, anchor, iend, STORE_REPCODE_1, repLength2);
+ hashTable[ZSTD_hashPtr(ip, hlog, mls)] = current2;
+ ip += repLength2;
+ anchor = ip;
+ continue;
+ }
+ break;
+ }
+ }
+ }
+
+ /* save reps for next block */
+ rep[0] = offset_1 ? offset_1 : offsetSaved;
+ rep[1] = offset_2 ? offset_2 : offsetSaved;
+
+ /* Return the last literals size */
+ return (size_t)(iend - anchor);
+}
+
+
+ZSTD_GEN_FAST_FN(dictMatchState, 4, 0)
+ZSTD_GEN_FAST_FN(dictMatchState, 5, 0)
+ZSTD_GEN_FAST_FN(dictMatchState, 6, 0)
+ZSTD_GEN_FAST_FN(dictMatchState, 7, 0)
+
+size_t ZSTD_compressBlock_fast_dictMatchState(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+{
+ U32 const mls = ms->cParams.minMatch;
+ assert(ms->dictMatchState != NULL);
+ switch(mls)
+ {
+ default: /* includes case 3 */
+ case 4 :
+ return ZSTD_compressBlock_fast_dictMatchState_4_0(ms, seqStore, rep, src, srcSize);
+ case 5 :
+ return ZSTD_compressBlock_fast_dictMatchState_5_0(ms, seqStore, rep, src, srcSize);
+ case 6 :
+ return ZSTD_compressBlock_fast_dictMatchState_6_0(ms, seqStore, rep, src, srcSize);
+ case 7 :
+ return ZSTD_compressBlock_fast_dictMatchState_7_0(ms, seqStore, rep, src, srcSize);
+ }
+}
+
+
+static size_t ZSTD_compressBlock_fast_extDict_generic(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize, U32 const mls, U32 const hasStep)
+{
+ const ZSTD_compressionParameters* const cParams = &ms->cParams;
+ U32* const hashTable = ms->hashTable;
+ U32 const hlog = cParams->hashLog;
+ /* support stepSize of 0 */
+ U32 const stepSize = cParams->targetLength + !(cParams->targetLength);
+ const BYTE* const base = ms->window.base;
+ const BYTE* const dictBase = ms->window.dictBase;
+ const BYTE* const istart = (const BYTE*)src;
+ const BYTE* ip = istart;
+ const BYTE* anchor = istart;
+ const U32 endIndex = (U32)((size_t)(istart - base) + srcSize);
+ const U32 lowLimit = ZSTD_getLowestMatchIndex(ms, endIndex, cParams->windowLog);
+ const U32 dictStartIndex = lowLimit;
+ const BYTE* const dictStart = dictBase + dictStartIndex;
+ const U32 dictLimit = ms->window.dictLimit;
+ const U32 prefixStartIndex = dictLimit < lowLimit ? lowLimit : dictLimit;
+ const BYTE* const prefixStart = base + prefixStartIndex;
+ const BYTE* const dictEnd = dictBase + prefixStartIndex;
+ const BYTE* const iend = istart + srcSize;
+ const BYTE* const ilimit = iend - 8;
+ U32 offset_1=rep[0], offset_2=rep[1];
+
+ (void)hasStep; /* not currently specialized on whether it's accelerated */
+
+ DEBUGLOG(5, "ZSTD_compressBlock_fast_extDict_generic (offset_1=%u)", offset_1);
+
+ /* switch to "regular" variant if extDict is invalidated due to maxDistance */
+ if (prefixStartIndex == dictStartIndex)
+ return ZSTD_compressBlock_fast(ms, seqStore, rep, src, srcSize);
+
+ /* Search Loop */
+ while (ip < ilimit) { /* < instead of <=, because (ip+1) */
+ const size_t h = ZSTD_hashPtr(ip, hlog, mls);
+ const U32 matchIndex = hashTable[h];
+ const BYTE* const matchBase = matchIndex < prefixStartIndex ? dictBase : base;
+ const BYTE* match = matchBase + matchIndex;
+ const U32 curr = (U32)(ip-base);
+ const U32 repIndex = curr + 1 - offset_1;
+ const BYTE* const repBase = repIndex < prefixStartIndex ? dictBase : base;
+ const BYTE* const repMatch = repBase + repIndex;
+ hashTable[h] = curr; /* update hash table */
+ DEBUGLOG(7, "offset_1 = %u , curr = %u", offset_1, curr);
+
+ if ( ( ((U32)((prefixStartIndex-1) - repIndex) >= 3) /* intentional underflow */
+ & (offset_1 <= curr+1 - dictStartIndex) ) /* note: we are searching at curr+1 */
+ && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {
+ const BYTE* const repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend;
+ size_t const rLength = ZSTD_count_2segments(ip+1 +4, repMatch +4, iend, repMatchEnd, prefixStart) + 4;
+ ip++;
+ ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, STORE_REPCODE_1, rLength);
+ ip += rLength;
+ anchor = ip;
+ } else {
+ if ( (matchIndex < dictStartIndex) ||
+ (MEM_read32(match) != MEM_read32(ip)) ) {
+ assert(stepSize >= 1);
+ ip += ((ip-anchor) >> kSearchStrength) + stepSize;
+ continue;
+ }
+ { const BYTE* const matchEnd = matchIndex < prefixStartIndex ? dictEnd : iend;
+ const BYTE* const lowMatchPtr = matchIndex < prefixStartIndex ? dictStart : prefixStart;
+ U32 const offset = curr - matchIndex;
+ size_t mLength = ZSTD_count_2segments(ip+4, match+4, iend, matchEnd, prefixStart) + 4;
+ while (((ip>anchor) & (match>lowMatchPtr)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */
+ offset_2 = offset_1; offset_1 = offset; /* update offset history */
+ ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, STORE_OFFSET(offset), mLength);
+ ip += mLength;
+ anchor = ip;
+ } }
+
+ if (ip <= ilimit) {
+ /* Fill Table */
+ hashTable[ZSTD_hashPtr(base+curr+2, hlog, mls)] = curr+2;
+ hashTable[ZSTD_hashPtr(ip-2, hlog, mls)] = (U32)(ip-2-base);
+ /* check immediate repcode */
+ while (ip <= ilimit) {
+ U32 const current2 = (U32)(ip-base);
+ U32 const repIndex2 = current2 - offset_2;
+ const BYTE* const repMatch2 = repIndex2 < prefixStartIndex ? dictBase + repIndex2 : base + repIndex2;
+ if ( (((U32)((prefixStartIndex-1) - repIndex2) >= 3) & (offset_2 <= curr - dictStartIndex)) /* intentional overflow */
+ && (MEM_read32(repMatch2) == MEM_read32(ip)) ) {
+ const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend;
+ size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixStart) + 4;
+ { U32 const tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; } /* swap offset_2 <=> offset_1 */
+ ZSTD_storeSeq(seqStore, 0 /*litlen*/, anchor, iend, STORE_REPCODE_1, repLength2);
+ hashTable[ZSTD_hashPtr(ip, hlog, mls)] = current2;
+ ip += repLength2;
+ anchor = ip;
+ continue;
+ }
+ break;
+ } } }
+
+ /* save reps for next block */
+ rep[0] = offset_1;
+ rep[1] = offset_2;
+
+ /* Return the last literals size */
+ return (size_t)(iend - anchor);
+}
+
+ZSTD_GEN_FAST_FN(extDict, 4, 0)
+ZSTD_GEN_FAST_FN(extDict, 5, 0)
+ZSTD_GEN_FAST_FN(extDict, 6, 0)
+ZSTD_GEN_FAST_FN(extDict, 7, 0)
+
+size_t ZSTD_compressBlock_fast_extDict(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+{
+ U32 const mls = ms->cParams.minMatch;
+ switch(mls)
+ {
+ default: /* includes case 3 */
+ case 4 :
+ return ZSTD_compressBlock_fast_extDict_4_0(ms, seqStore, rep, src, srcSize);
+ case 5 :
+ return ZSTD_compressBlock_fast_extDict_5_0(ms, seqStore, rep, src, srcSize);
+ case 6 :
+ return ZSTD_compressBlock_fast_extDict_6_0(ms, seqStore, rep, src, srcSize);
+ case 7 :
+ return ZSTD_compressBlock_fast_extDict_7_0(ms, seqStore, rep, src, srcSize);
+ }
+}
diff --git a/lib/zstd/compress/zstd_fast.h b/lib/zstd/compress/zstd_fast.h
new file mode 100644
index 000000000000..fddc2f532d21
--- /dev/null
+++ b/lib/zstd/compress/zstd_fast.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#ifndef ZSTD_FAST_H
+#define ZSTD_FAST_H
+
+
+#include "../common/mem.h" /* U32 */
+#include "zstd_compress_internal.h"
+
+void ZSTD_fillHashTable(ZSTD_matchState_t* ms,
+ void const* end, ZSTD_dictTableLoadMethod_e dtlm);
+size_t ZSTD_compressBlock_fast(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+size_t ZSTD_compressBlock_fast_dictMatchState(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+size_t ZSTD_compressBlock_fast_extDict(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+
+
+#endif /* ZSTD_FAST_H */
diff --git a/lib/zstd/compress/zstd_lazy.c b/lib/zstd/compress/zstd_lazy.c
new file mode 100644
index 000000000000..0298a01a7504
--- /dev/null
+++ b/lib/zstd/compress/zstd_lazy.c
@@ -0,0 +1,2102 @@
+/*
+ * Copyright (c) Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#include "zstd_compress_internal.h"
+#include "zstd_lazy.h"
+
+
+/*-*************************************
+* Binary Tree search
+***************************************/
+
+static void
+ZSTD_updateDUBT(ZSTD_matchState_t* ms,
+ const BYTE* ip, const BYTE* iend,
+ U32 mls)
+{
+ const ZSTD_compressionParameters* const cParams = &ms->cParams;
+ U32* const hashTable = ms->hashTable;
+ U32 const hashLog = cParams->hashLog;
+
+ U32* const bt = ms->chainTable;
+ U32 const btLog = cParams->chainLog - 1;
+ U32 const btMask = (1 << btLog) - 1;
+
+ const BYTE* const base = ms->window.base;
+ U32 const target = (U32)(ip - base);
+ U32 idx = ms->nextToUpdate;
+
+ if (idx != target)
+ DEBUGLOG(7, "ZSTD_updateDUBT, from %u to %u (dictLimit:%u)",
+ idx, target, ms->window.dictLimit);
+ assert(ip + 8 <= iend); /* condition for ZSTD_hashPtr */
+ (void)iend;
+
+ assert(idx >= ms->window.dictLimit); /* condition for valid base+idx */
+ for ( ; idx < target ; idx++) {
+ size_t const h = ZSTD_hashPtr(base + idx, hashLog, mls); /* assumption : ip + 8 <= iend */
+ U32 const matchIndex = hashTable[h];
+
+ U32* const nextCandidatePtr = bt + 2*(idx&btMask);
+ U32* const sortMarkPtr = nextCandidatePtr + 1;
+
+ DEBUGLOG(8, "ZSTD_updateDUBT: insert %u", idx);
+ hashTable[h] = idx; /* Update Hash Table */
+ *nextCandidatePtr = matchIndex; /* update BT like a chain */
+ *sortMarkPtr = ZSTD_DUBT_UNSORTED_MARK;
+ }
+ ms->nextToUpdate = target;
+}
+
+
+/* ZSTD_insertDUBT1() :
+ * sort one already inserted but unsorted position
+ * assumption : curr >= btlow == (curr - btmask)
+ * doesn't fail */
+static void
+ZSTD_insertDUBT1(const ZSTD_matchState_t* ms,
+ U32 curr, const BYTE* inputEnd,
+ U32 nbCompares, U32 btLow,
+ const ZSTD_dictMode_e dictMode)
+{
+ const ZSTD_compressionParameters* const cParams = &ms->cParams;
+ U32* const bt = ms->chainTable;
+ U32 const btLog = cParams->chainLog - 1;
+ U32 const btMask = (1 << btLog) - 1;
+ size_t commonLengthSmaller=0, commonLengthLarger=0;
+ const BYTE* const base = ms->window.base;
+ const BYTE* const dictBase = ms->window.dictBase;
+ const U32 dictLimit = ms->window.dictLimit;
+ const BYTE* const ip = (curr>=dictLimit) ? base + curr : dictBase + curr;
+ const BYTE* const iend = (curr>=dictLimit) ? inputEnd : dictBase + dictLimit;
+ const BYTE* const dictEnd = dictBase + dictLimit;
+ const BYTE* const prefixStart = base + dictLimit;
+ const BYTE* match;
+ U32* smallerPtr = bt + 2*(curr&btMask);
+ U32* largerPtr = smallerPtr + 1;
+ U32 matchIndex = *smallerPtr; /* this candidate is unsorted : next sorted candidate is reached through *smallerPtr, while *largerPtr contains previous unsorted candidate (which is already saved and can be overwritten) */
+ U32 dummy32; /* to be nullified at the end */
+ U32 const windowValid = ms->window.lowLimit;
+ U32 const maxDistance = 1U << cParams->windowLog;
+ U32 const windowLow = (curr - windowValid > maxDistance) ? curr - maxDistance : windowValid;
+
+
+ DEBUGLOG(8, "ZSTD_insertDUBT1(%u) (dictLimit=%u, lowLimit=%u)",
+ curr, dictLimit, windowLow);
+ assert(curr >= btLow);
+ assert(ip < iend); /* condition for ZSTD_count */
+
+ for (; nbCompares && (matchIndex > windowLow); --nbCompares) {
+ U32* const nextPtr = bt + 2*(matchIndex & btMask);
+ size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */
+ assert(matchIndex < curr);
+ /* note : all candidates are now supposed sorted,
+ * but it's still possible to have nextPtr[1] == ZSTD_DUBT_UNSORTED_MARK
+ * when a real index has the same value as ZSTD_DUBT_UNSORTED_MARK */
+
+ if ( (dictMode != ZSTD_extDict)
+ || (matchIndex+matchLength >= dictLimit) /* both in current segment*/
+ || (curr < dictLimit) /* both in extDict */) {
+ const BYTE* const mBase = ( (dictMode != ZSTD_extDict)
+ || (matchIndex+matchLength >= dictLimit)) ?
+ base : dictBase;
+ assert( (matchIndex+matchLength >= dictLimit) /* might be wrong if extDict is incorrectly set to 0 */
+ || (curr < dictLimit) );
+ match = mBase + matchIndex;
+ matchLength += ZSTD_count(ip+matchLength, match+matchLength, iend);
+ } else {
+ match = dictBase + matchIndex;
+ matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iend, dictEnd, prefixStart);
+ if (matchIndex+matchLength >= dictLimit)
+ match = base + matchIndex; /* preparation for next read of match[matchLength] */
+ }
+
+ DEBUGLOG(8, "ZSTD_insertDUBT1: comparing %u with %u : found %u common bytes ",
+ curr, matchIndex, (U32)matchLength);
+
+ if (ip+matchLength == iend) { /* equal : no way to know if inf or sup */
+ break; /* drop , to guarantee consistency ; miss a bit of compression, but other solutions can corrupt tree */
+ }
+
+ if (match[matchLength] < ip[matchLength]) { /* necessarily within buffer */
+ /* match is smaller than current */
+ *smallerPtr = matchIndex; /* update smaller idx */
+ commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */
+ if (matchIndex <= btLow) { smallerPtr=&dummy32; break; } /* beyond tree size, stop searching */
+ DEBUGLOG(8, "ZSTD_insertDUBT1: %u (>btLow=%u) is smaller : next => %u",
+ matchIndex, btLow, nextPtr[1]);
+ smallerPtr = nextPtr+1; /* new "candidate" => larger than match, which was smaller than target */
+ matchIndex = nextPtr[1]; /* new matchIndex, larger than previous and closer to current */
+ } else {
+ /* match is larger than current */
+ *largerPtr = matchIndex;
+ commonLengthLarger = matchLength;
+ if (matchIndex <= btLow) { largerPtr=&dummy32; break; } /* beyond tree size, stop searching */
+ DEBUGLOG(8, "ZSTD_insertDUBT1: %u (>btLow=%u) is larger => %u",
+ matchIndex, btLow, nextPtr[0]);
+ largerPtr = nextPtr;
+ matchIndex = nextPtr[0];
+ } }
+
+ *smallerPtr = *largerPtr = 0;
+}
+
+
+static size_t
+ZSTD_DUBT_findBetterDictMatch (
+ const ZSTD_matchState_t* ms,
+ const BYTE* const ip, const BYTE* const iend,
+ size_t* offsetPtr,
+ size_t bestLength,
+ U32 nbCompares,
+ U32 const mls,
+ const ZSTD_dictMode_e dictMode)
+{
+ const ZSTD_matchState_t * const dms = ms->dictMatchState;
+ const ZSTD_compressionParameters* const dmsCParams = &dms->cParams;
+ const U32 * const dictHashTable = dms->hashTable;
+ U32 const hashLog = dmsCParams->hashLog;
+ size_t const h = ZSTD_hashPtr(ip, hashLog, mls);
+ U32 dictMatchIndex = dictHashTable[h];
+
+ const BYTE* const base = ms->window.base;
+ const BYTE* const prefixStart = base + ms->window.dictLimit;
+ U32 const curr = (U32)(ip-base);
+ const BYTE* const dictBase = dms->window.base;
+ const BYTE* const dictEnd = dms->window.nextSrc;
+ U32 const dictHighLimit = (U32)(dms->window.nextSrc - dms->window.base);
+ U32 const dictLowLimit = dms->window.lowLimit;
+ U32 const dictIndexDelta = ms->window.lowLimit - dictHighLimit;
+
+ U32* const dictBt = dms->chainTable;
+ U32 const btLog = dmsCParams->chainLog - 1;
+ U32 const btMask = (1 << btLog) - 1;
+ U32 const btLow = (btMask >= dictHighLimit - dictLowLimit) ? dictLowLimit : dictHighLimit - btMask;
+
+ size_t commonLengthSmaller=0, commonLengthLarger=0;
+
+ (void)dictMode;
+ assert(dictMode == ZSTD_dictMatchState);
+
+ for (; nbCompares && (dictMatchIndex > dictLowLimit); --nbCompares) {
+ U32* const nextPtr = dictBt + 2*(dictMatchIndex & btMask);
+ size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */
+ const BYTE* match = dictBase + dictMatchIndex;
+ matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iend, dictEnd, prefixStart);
+ if (dictMatchIndex+matchLength >= dictHighLimit)
+ match = base + dictMatchIndex + dictIndexDelta; /* to prepare for next usage of match[matchLength] */
+
+ if (matchLength > bestLength) {
+ U32 matchIndex = dictMatchIndex + dictIndexDelta;
+ if ( (4*(int)(matchLength-bestLength)) > (int)(ZSTD_highbit32(curr-matchIndex+1) - ZSTD_highbit32((U32)offsetPtr[0]+1)) ) {
+ DEBUGLOG(9, "ZSTD_DUBT_findBetterDictMatch(%u) : found better match length %u -> %u and offsetCode %u -> %u (dictMatchIndex %u, matchIndex %u)",
+ curr, (U32)bestLength, (U32)matchLength, (U32)*offsetPtr, STORE_OFFSET(curr - matchIndex), dictMatchIndex, matchIndex);
+ bestLength = matchLength, *offsetPtr = STORE_OFFSET(curr - matchIndex);
+ }
+ if (ip+matchLength == iend) { /* reached end of input : ip[matchLength] is not valid, no way to know if it's larger or smaller than match */
+ break; /* drop, to guarantee consistency (miss a little bit of compression) */
+ }
+ }
+
+ if (match[matchLength] < ip[matchLength]) {
+ if (dictMatchIndex <= btLow) { break; } /* beyond tree size, stop the search */
+ commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */
+ dictMatchIndex = nextPtr[1]; /* new matchIndex larger than previous (closer to current) */
+ } else {
+ /* match is larger than current */
+ if (dictMatchIndex <= btLow) { break; } /* beyond tree size, stop the search */
+ commonLengthLarger = matchLength;
+ dictMatchIndex = nextPtr[0];
+ }
+ }
+
+ if (bestLength >= MINMATCH) {
+ U32 const mIndex = curr - (U32)STORED_OFFSET(*offsetPtr); (void)mIndex;
+ DEBUGLOG(8, "ZSTD_DUBT_findBetterDictMatch(%u) : found match of length %u and offsetCode %u (pos %u)",
+ curr, (U32)bestLength, (U32)*offsetPtr, mIndex);
+ }
+ return bestLength;
+
+}
+
+
+static size_t
+ZSTD_DUBT_findBestMatch(ZSTD_matchState_t* ms,
+ const BYTE* const ip, const BYTE* const iend,
+ size_t* offsetPtr,
+ U32 const mls,
+ const ZSTD_dictMode_e dictMode)
+{
+ const ZSTD_compressionParameters* const cParams = &ms->cParams;
+ U32* const hashTable = ms->hashTable;
+ U32 const hashLog = cParams->hashLog;
+ size_t const h = ZSTD_hashPtr(ip, hashLog, mls);
+ U32 matchIndex = hashTable[h];
+
+ const BYTE* const base = ms->window.base;
+ U32 const curr = (U32)(ip-base);
+ U32 const windowLow = ZSTD_getLowestMatchIndex(ms, curr, cParams->windowLog);
+
+ U32* const bt = ms->chainTable;
+ U32 const btLog = cParams->chainLog - 1;
+ U32 const btMask = (1 << btLog) - 1;
+ U32 const btLow = (btMask >= curr) ? 0 : curr - btMask;
+ U32 const unsortLimit = MAX(btLow, windowLow);
+
+ U32* nextCandidate = bt + 2*(matchIndex&btMask);
+ U32* unsortedMark = bt + 2*(matchIndex&btMask) + 1;
+ U32 nbCompares = 1U << cParams->searchLog;
+ U32 nbCandidates = nbCompares;
+ U32 previousCandidate = 0;
+
+ DEBUGLOG(7, "ZSTD_DUBT_findBestMatch (%u) ", curr);
+ assert(ip <= iend-8); /* required for h calculation */
+ assert(dictMode != ZSTD_dedicatedDictSearch);
+
+ /* reach end of unsorted candidates list */
+ while ( (matchIndex > unsortLimit)
+ && (*unsortedMark == ZSTD_DUBT_UNSORTED_MARK)
+ && (nbCandidates > 1) ) {
+ DEBUGLOG(8, "ZSTD_DUBT_findBestMatch: candidate %u is unsorted",
+ matchIndex);
+ *unsortedMark = previousCandidate; /* the unsortedMark becomes a reversed chain, to move up back to original position */
+ previousCandidate = matchIndex;
+ matchIndex = *nextCandidate;
+ nextCandidate = bt + 2*(matchIndex&btMask);
+ unsortedMark = bt + 2*(matchIndex&btMask) + 1;
+ nbCandidates --;
+ }
+
+ /* nullify last candidate if it's still unsorted
+ * simplification, detrimental to compression ratio, beneficial for speed */
+ if ( (matchIndex > unsortLimit)
+ && (*unsortedMark==ZSTD_DUBT_UNSORTED_MARK) ) {
+ DEBUGLOG(7, "ZSTD_DUBT_findBestMatch: nullify last unsorted candidate %u",
+ matchIndex);
+ *nextCandidate = *unsortedMark = 0;
+ }
+
+ /* batch sort stacked candidates */
+ matchIndex = previousCandidate;
+ while (matchIndex) { /* will end on matchIndex == 0 */
+ U32* const nextCandidateIdxPtr = bt + 2*(matchIndex&btMask) + 1;
+ U32 const nextCandidateIdx = *nextCandidateIdxPtr;
+ ZSTD_insertDUBT1(ms, matchIndex, iend,
+ nbCandidates, unsortLimit, dictMode);
+ matchIndex = nextCandidateIdx;
+ nbCandidates++;
+ }
+
+ /* find longest match */
+ { size_t commonLengthSmaller = 0, commonLengthLarger = 0;
+ const BYTE* const dictBase = ms->window.dictBase;
+ const U32 dictLimit = ms->window.dictLimit;
+ const BYTE* const dictEnd = dictBase + dictLimit;
+ const BYTE* const prefixStart = base + dictLimit;
+ U32* smallerPtr = bt + 2*(curr&btMask);
+ U32* largerPtr = bt + 2*(curr&btMask) + 1;
+ U32 matchEndIdx = curr + 8 + 1;
+ U32 dummy32; /* to be nullified at the end */
+ size_t bestLength = 0;
+
+ matchIndex = hashTable[h];
+ hashTable[h] = curr; /* Update Hash Table */
+
+ for (; nbCompares && (matchIndex > windowLow); --nbCompares) {
+ U32* const nextPtr = bt + 2*(matchIndex & btMask);
+ size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */
+ const BYTE* match;
+
+ if ((dictMode != ZSTD_extDict) || (matchIndex+matchLength >= dictLimit)) {
+ match = base + matchIndex;
+ matchLength += ZSTD_count(ip+matchLength, match+matchLength, iend);
+ } else {
+ match = dictBase + matchIndex;
+ matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iend, dictEnd, prefixStart);
+ if (matchIndex+matchLength >= dictLimit)
+ match = base + matchIndex; /* to prepare for next usage of match[matchLength] */
+ }
+
+ if (matchLength > bestLength) {
+ if (matchLength > matchEndIdx - matchIndex)
+ matchEndIdx = matchIndex + (U32)matchLength;
+ if ( (4*(int)(matchLength-bestLength)) > (int)(ZSTD_highbit32(curr-matchIndex+1) - ZSTD_highbit32((U32)offsetPtr[0]+1)) )
+ bestLength = matchLength, *offsetPtr = STORE_OFFSET(curr - matchIndex);
+ if (ip+matchLength == iend) { /* equal : no way to know if inf or sup */
+ if (dictMode == ZSTD_dictMatchState) {
+ nbCompares = 0; /* in addition to avoiding checking any
+ * further in this loop, make sure we
+ * skip checking in the dictionary. */
+ }
+ break; /* drop, to guarantee consistency (miss a little bit of compression) */
+ }
+ }
+
+ if (match[matchLength] < ip[matchLength]) {
+ /* match is smaller than current */
+ *smallerPtr = matchIndex; /* update smaller idx */
+ commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */
+ if (matchIndex <= btLow) { smallerPtr=&dummy32; break; } /* beyond tree size, stop the search */
+ smallerPtr = nextPtr+1; /* new "smaller" => larger of match */
+ matchIndex = nextPtr[1]; /* new matchIndex larger than previous (closer to current) */
+ } else {
+ /* match is larger than current */
+ *largerPtr = matchIndex;
+ commonLengthLarger = matchLength;
+ if (matchIndex <= btLow) { largerPtr=&dummy32; break; } /* beyond tree size, stop the search */
+ largerPtr = nextPtr;
+ matchIndex = nextPtr[0];
+ } }
+
+ *smallerPtr = *largerPtr = 0;
+
+ assert(nbCompares <= (1U << ZSTD_SEARCHLOG_MAX)); /* Check we haven't underflowed. */
+ if (dictMode == ZSTD_dictMatchState && nbCompares) {
+ bestLength = ZSTD_DUBT_findBetterDictMatch(
+ ms, ip, iend,
+ offsetPtr, bestLength, nbCompares,
+ mls, dictMode);
+ }
+
+ assert(matchEndIdx > curr+8); /* ensure nextToUpdate is increased */
+ ms->nextToUpdate = matchEndIdx - 8; /* skip repetitive patterns */
+ if (bestLength >= MINMATCH) {
+ U32 const mIndex = curr - (U32)STORED_OFFSET(*offsetPtr); (void)mIndex;
+ DEBUGLOG(8, "ZSTD_DUBT_findBestMatch(%u) : found match of length %u and offsetCode %u (pos %u)",
+ curr, (U32)bestLength, (U32)*offsetPtr, mIndex);
+ }
+ return bestLength;
+ }
+}
+
+
+/* ZSTD_BtFindBestMatch() : Tree updater, providing best match */
+FORCE_INLINE_TEMPLATE size_t
+ZSTD_BtFindBestMatch( ZSTD_matchState_t* ms,
+ const BYTE* const ip, const BYTE* const iLimit,
+ size_t* offsetPtr,
+ const U32 mls /* template */,
+ const ZSTD_dictMode_e dictMode)
+{
+ DEBUGLOG(7, "ZSTD_BtFindBestMatch");
+ if (ip < ms->window.base + ms->nextToUpdate) return 0; /* skipped area */
+ ZSTD_updateDUBT(ms, ip, iLimit, mls);
+ return ZSTD_DUBT_findBestMatch(ms, ip, iLimit, offsetPtr, mls, dictMode);
+}
+
+/* *********************************
+* Dedicated dict search
+***********************************/
+
+void ZSTD_dedicatedDictSearch_lazy_loadDictionary(ZSTD_matchState_t* ms, const BYTE* const ip)
+{
+ const BYTE* const base = ms->window.base;
+ U32 const target = (U32)(ip - base);
+ U32* const hashTable = ms->hashTable;
+ U32* const chainTable = ms->chainTable;
+ U32 const chainSize = 1 << ms->cParams.chainLog;
+ U32 idx = ms->nextToUpdate;
+ U32 const minChain = chainSize < target - idx ? target - chainSize : idx;
+ U32 const bucketSize = 1 << ZSTD_LAZY_DDSS_BUCKET_LOG;
+ U32 const cacheSize = bucketSize - 1;
+ U32 const chainAttempts = (1 << ms->cParams.searchLog) - cacheSize;
+ U32 const chainLimit = chainAttempts > 255 ? 255 : chainAttempts;
+
+ /* We know the hashtable is oversized by a factor of `bucketSize`.
+ * We are going to temporarily pretend `bucketSize == 1`, keeping only a
+ * single entry. We will use the rest of the space to construct a temporary
+ * chaintable.
+ */
+ U32 const hashLog = ms->cParams.hashLog - ZSTD_LAZY_DDSS_BUCKET_LOG;
+ U32* const tmpHashTable = hashTable;
+ U32* const tmpChainTable = hashTable + ((size_t)1 << hashLog);
+ U32 const tmpChainSize = (U32)((1 << ZSTD_LAZY_DDSS_BUCKET_LOG) - 1) << hashLog;
+ U32 const tmpMinChain = tmpChainSize < target ? target - tmpChainSize : idx;
+ U32 hashIdx;
+
+ assert(ms->cParams.chainLog <= 24);
+ assert(ms->cParams.hashLog > ms->cParams.chainLog);
+ assert(idx != 0);
+ assert(tmpMinChain <= minChain);
+
+ /* fill conventional hash table and conventional chain table */
+ for ( ; idx < target; idx++) {
+ U32 const h = (U32)ZSTD_hashPtr(base + idx, hashLog, ms->cParams.minMatch);
+ if (idx >= tmpMinChain) {
+ tmpChainTable[idx - tmpMinChain] = hashTable[h];
+ }
+ tmpHashTable[h] = idx;
+ }
+
+ /* sort chains into ddss chain table */
+ {
+ U32 chainPos = 0;
+ for (hashIdx = 0; hashIdx < (1U << hashLog); hashIdx++) {
+ U32 count;
+ U32 countBeyondMinChain = 0;
+ U32 i = tmpHashTable[hashIdx];
+ for (count = 0; i >= tmpMinChain && count < cacheSize; count++) {
+ /* skip through the chain to the first position that won't be
+ * in the hash cache bucket */
+ if (i < minChain) {
+ countBeyondMinChain++;
+ }
+ i = tmpChainTable[i - tmpMinChain];
+ }
+ if (count == cacheSize) {
+ for (count = 0; count < chainLimit;) {
+ if (i < minChain) {
+ if (!i || ++countBeyondMinChain > cacheSize) {
+ /* only allow pulling `cacheSize` number of entries
+ * into the cache or chainTable beyond `minChain`,
+ * to replace the entries pulled out of the
+ * chainTable into the cache. This lets us reach
+ * back further without increasing the total number
+ * of entries in the chainTable, guaranteeing the
+ * DDSS chain table will fit into the space
+ * allocated for the regular one. */
+ break;
+ }
+ }
+ chainTable[chainPos++] = i;
+ count++;
+ if (i < tmpMinChain) {
+ break;
+ }
+ i = tmpChainTable[i - tmpMinChain];
+ }
+ } else {
+ count = 0;
+ }
+ if (count) {
+ tmpHashTable[hashIdx] = ((chainPos - count) << 8) + count;
+ } else {
+ tmpHashTable[hashIdx] = 0;
+ }
+ }
+ assert(chainPos <= chainSize); /* I believe this is guaranteed... */
+ }
+
+ /* move chain pointers into the last entry of each hash bucket */
+ for (hashIdx = (1 << hashLog); hashIdx; ) {
+ U32 const bucketIdx = --hashIdx << ZSTD_LAZY_DDSS_BUCKET_LOG;
+ U32 const chainPackedPointer = tmpHashTable[hashIdx];
+ U32 i;
+ for (i = 0; i < cacheSize; i++) {
+ hashTable[bucketIdx + i] = 0;
+ }
+ hashTable[bucketIdx + bucketSize - 1] = chainPackedPointer;
+ }
+
+ /* fill the buckets of the hash table */
+ for (idx = ms->nextToUpdate; idx < target; idx++) {
+ U32 const h = (U32)ZSTD_hashPtr(base + idx, hashLog, ms->cParams.minMatch)
+ << ZSTD_LAZY_DDSS_BUCKET_LOG;
+ U32 i;
+ /* Shift hash cache down 1. */
+ for (i = cacheSize - 1; i; i--)
+ hashTable[h + i] = hashTable[h + i - 1];
+ hashTable[h] = idx;
+ }
+
+ ms->nextToUpdate = target;
+}
+
+/* Returns the longest match length found in the dedicated dict search structure.
+ * If none are longer than the argument ml, then ml will be returned.
+ */
+FORCE_INLINE_TEMPLATE
+size_t ZSTD_dedicatedDictSearch_lazy_search(size_t* offsetPtr, size_t ml, U32 nbAttempts,
+ const ZSTD_matchState_t* const dms,
+ const BYTE* const ip, const BYTE* const iLimit,
+ const BYTE* const prefixStart, const U32 curr,
+ const U32 dictLimit, const size_t ddsIdx) {
+ const U32 ddsLowestIndex = dms->window.dictLimit;
+ const BYTE* const ddsBase = dms->window.base;
+ const BYTE* const ddsEnd = dms->window.nextSrc;
+ const U32 ddsSize = (U32)(ddsEnd - ddsBase);
+ const U32 ddsIndexDelta = dictLimit - ddsSize;
+ const U32 bucketSize = (1 << ZSTD_LAZY_DDSS_BUCKET_LOG);
+ const U32 bucketLimit = nbAttempts < bucketSize - 1 ? nbAttempts : bucketSize - 1;
+ U32 ddsAttempt;
+ U32 matchIndex;
+
+ for (ddsAttempt = 0; ddsAttempt < bucketSize - 1; ddsAttempt++) {
+ PREFETCH_L1(ddsBase + dms->hashTable[ddsIdx + ddsAttempt]);
+ }
+
+ {
+ U32 const chainPackedPointer = dms->hashTable[ddsIdx + bucketSize - 1];
+ U32 const chainIndex = chainPackedPointer >> 8;
+
+ PREFETCH_L1(&dms->chainTable[chainIndex]);
+ }
+
+ for (ddsAttempt = 0; ddsAttempt < bucketLimit; ddsAttempt++) {
+ size_t currentMl=0;
+ const BYTE* match;
+ matchIndex = dms->hashTable[ddsIdx + ddsAttempt];
+ match = ddsBase + matchIndex;
+
+ if (!matchIndex) {
+ return ml;
+ }
+
+ /* guaranteed by table construction */
+ (void)ddsLowestIndex;
+ assert(matchIndex >= ddsLowestIndex);
+ assert(match+4 <= ddsEnd);
+ if (MEM_read32(match) == MEM_read32(ip)) {
+ /* assumption : matchIndex <= dictLimit-4 (by table construction) */
+ currentMl = ZSTD_count_2segments(ip+4, match+4, iLimit, ddsEnd, prefixStart) + 4;
+ }
+
+ /* save best solution */
+ if (currentMl > ml) {
+ ml = currentMl;
+ *offsetPtr = STORE_OFFSET(curr - (matchIndex + ddsIndexDelta));
+ if (ip+currentMl == iLimit) {
+ /* best possible, avoids read overflow on next attempt */
+ return ml;
+ }
+ }
+ }
+
+ {
+ U32 const chainPackedPointer = dms->hashTable[ddsIdx + bucketSize - 1];
+ U32 chainIndex = chainPackedPointer >> 8;
+ U32 const chainLength = chainPackedPointer & 0xFF;
+ U32 const chainAttempts = nbAttempts - ddsAttempt;
+ U32 const chainLimit = chainAttempts > chainLength ? chainLength : chainAttempts;
+ U32 chainAttempt;
+
+ for (chainAttempt = 0 ; chainAttempt < chainLimit; chainAttempt++) {
+ PREFETCH_L1(ddsBase + dms->chainTable[chainIndex + chainAttempt]);
+ }
+
+ for (chainAttempt = 0 ; chainAttempt < chainLimit; chainAttempt++, chainIndex++) {
+ size_t currentMl=0;
+ const BYTE* match;
+ matchIndex = dms->chainTable[chainIndex];
+ match = ddsBase + matchIndex;
+
+ /* guaranteed by table construction */
+ assert(matchIndex >= ddsLowestIndex);
+ assert(match+4 <= ddsEnd);
+ if (MEM_read32(match) == MEM_read32(ip)) {
+ /* assumption : matchIndex <= dictLimit-4 (by table construction) */
+ currentMl = ZSTD_count_2segments(ip+4, match+4, iLimit, ddsEnd, prefixStart) + 4;
+ }
+
+ /* save best solution */
+ if (currentMl > ml) {
+ ml = currentMl;
+ *offsetPtr = STORE_OFFSET(curr - (matchIndex + ddsIndexDelta));
+ if (ip+currentMl == iLimit) break; /* best possible, avoids read overflow on next attempt */
+ }
+ }
+ }
+ return ml;
+}
+
+
+/* *********************************
+* Hash Chain
+***********************************/
+#define NEXT_IN_CHAIN(d, mask) chainTable[(d) & (mask)]
+
+/* Update chains up to ip (excluded)
+ Assumption : always within prefix (i.e. not within extDict) */
+FORCE_INLINE_TEMPLATE U32 ZSTD_insertAndFindFirstIndex_internal(
+ ZSTD_matchState_t* ms,
+ const ZSTD_compressionParameters* const cParams,
+ const BYTE* ip, U32 const mls)
+{
+ U32* const hashTable = ms->hashTable;
+ const U32 hashLog = cParams->hashLog;
+ U32* const chainTable = ms->chainTable;
+ const U32 chainMask = (1 << cParams->chainLog) - 1;
+ const BYTE* const base = ms->window.base;
+ const U32 target = (U32)(ip - base);
+ U32 idx = ms->nextToUpdate;
+
+ while(idx < target) { /* catch up */
+ size_t const h = ZSTD_hashPtr(base+idx, hashLog, mls);
+ NEXT_IN_CHAIN(idx, chainMask) = hashTable[h];
+ hashTable[h] = idx;
+ idx++;
+ }
+
+ ms->nextToUpdate = target;
+ return hashTable[ZSTD_hashPtr(ip, hashLog, mls)];
+}
+
+U32 ZSTD_insertAndFindFirstIndex(ZSTD_matchState_t* ms, const BYTE* ip) {
+ const ZSTD_compressionParameters* const cParams = &ms->cParams;
+ return ZSTD_insertAndFindFirstIndex_internal(ms, cParams, ip, ms->cParams.minMatch);
+}
+
+/* inlining is important to hardwire a hot branch (template emulation) */
+FORCE_INLINE_TEMPLATE
+size_t ZSTD_HcFindBestMatch(
+ ZSTD_matchState_t* ms,
+ const BYTE* const ip, const BYTE* const iLimit,
+ size_t* offsetPtr,
+ const U32 mls, const ZSTD_dictMode_e dictMode)
+{
+ const ZSTD_compressionParameters* const cParams = &ms->cParams;
+ U32* const chainTable = ms->chainTable;
+ const U32 chainSize = (1 << cParams->chainLog);
+ const U32 chainMask = chainSize-1;
+ const BYTE* const base = ms->window.base;
+ const BYTE* const dictBase = ms->window.dictBase;
+ const U32 dictLimit = ms->window.dictLimit;
+ const BYTE* const prefixStart = base + dictLimit;
+ const BYTE* const dictEnd = dictBase + dictLimit;
+ const U32 curr = (U32)(ip-base);
+ const U32 maxDistance = 1U << cParams->windowLog;
+ const U32 lowestValid = ms->window.lowLimit;
+ const U32 withinMaxDistance = (curr - lowestValid > maxDistance) ? curr - maxDistance : lowestValid;
+ const U32 isDictionary = (ms->loadedDictEnd != 0);
+ const U32 lowLimit = isDictionary ? lowestValid : withinMaxDistance;
+ const U32 minChain = curr > chainSize ? curr - chainSize : 0;
+ U32 nbAttempts = 1U << cParams->searchLog;
+ size_t ml=4-1;
+
+ const ZSTD_matchState_t* const dms = ms->dictMatchState;
+ const U32 ddsHashLog = dictMode == ZSTD_dedicatedDictSearch
+ ? dms->cParams.hashLog - ZSTD_LAZY_DDSS_BUCKET_LOG : 0;
+ const size_t ddsIdx = dictMode == ZSTD_dedicatedDictSearch
+ ? ZSTD_hashPtr(ip, ddsHashLog, mls) << ZSTD_LAZY_DDSS_BUCKET_LOG : 0;
+
+ U32 matchIndex;
+
+ if (dictMode == ZSTD_dedicatedDictSearch) {
+ const U32* entry = &dms->hashTable[ddsIdx];
+ PREFETCH_L1(entry);
+ }
+
+ /* HC4 match finder */
+ matchIndex = ZSTD_insertAndFindFirstIndex_internal(ms, cParams, ip, mls);
+
+ for ( ; (matchIndex>=lowLimit) & (nbAttempts>0) ; nbAttempts--) {
+ size_t currentMl=0;
+ if ((dictMode != ZSTD_extDict) || matchIndex >= dictLimit) {
+ const BYTE* const match = base + matchIndex;
+ assert(matchIndex >= dictLimit); /* ensures this is true if dictMode != ZSTD_extDict */
+ if (match[ml] == ip[ml]) /* potentially better */
+ currentMl = ZSTD_count(ip, match, iLimit);
+ } else {
+ const BYTE* const match = dictBase + matchIndex;
+ assert(match+4 <= dictEnd);
+ if (MEM_read32(match) == MEM_read32(ip)) /* assumption : matchIndex <= dictLimit-4 (by table construction) */
+ currentMl = ZSTD_count_2segments(ip+4, match+4, iLimit, dictEnd, prefixStart) + 4;
+ }
+
+ /* save best solution */
+ if (currentMl > ml) {
+ ml = currentMl;
+ *offsetPtr = STORE_OFFSET(curr - matchIndex);
+ if (ip+currentMl == iLimit) break; /* best possible, avoids read overflow on next attempt */
+ }
+
+ if (matchIndex <= minChain) break;
+ matchIndex = NEXT_IN_CHAIN(matchIndex, chainMask);
+ }
+
+ assert(nbAttempts <= (1U << ZSTD_SEARCHLOG_MAX)); /* Check we haven't underflowed. */
+ if (dictMode == ZSTD_dedicatedDictSearch) {
+ ml = ZSTD_dedicatedDictSearch_lazy_search(offsetPtr, ml, nbAttempts, dms,
+ ip, iLimit, prefixStart, curr, dictLimit, ddsIdx);
+ } else if (dictMode == ZSTD_dictMatchState) {
+ const U32* const dmsChainTable = dms->chainTable;
+ const U32 dmsChainSize = (1 << dms->cParams.chainLog);
+ const U32 dmsChainMask = dmsChainSize - 1;
+ const U32 dmsLowestIndex = dms->window.dictLimit;
+ const BYTE* const dmsBase = dms->window.base;
+ const BYTE* const dmsEnd = dms->window.nextSrc;
+ const U32 dmsSize = (U32)(dmsEnd - dmsBase);
+ const U32 dmsIndexDelta = dictLimit - dmsSize;
+ const U32 dmsMinChain = dmsSize > dmsChainSize ? dmsSize - dmsChainSize : 0;
+
+ matchIndex = dms->hashTable[ZSTD_hashPtr(ip, dms->cParams.hashLog, mls)];
+
+ for ( ; (matchIndex>=dmsLowestIndex) & (nbAttempts>0) ; nbAttempts--) {
+ size_t currentMl=0;
+ const BYTE* const match = dmsBase + matchIndex;
+ assert(match+4 <= dmsEnd);
+ if (MEM_read32(match) == MEM_read32(ip)) /* assumption : matchIndex <= dictLimit-4 (by table construction) */
+ currentMl = ZSTD_count_2segments(ip+4, match+4, iLimit, dmsEnd, prefixStart) + 4;
+
+ /* save best solution */
+ if (currentMl > ml) {
+ ml = currentMl;
+ assert(curr > matchIndex + dmsIndexDelta);
+ *offsetPtr = STORE_OFFSET(curr - (matchIndex + dmsIndexDelta));
+ if (ip+currentMl == iLimit) break; /* best possible, avoids read overflow on next attempt */
+ }
+
+ if (matchIndex <= dmsMinChain) break;
+
+ matchIndex = dmsChainTable[matchIndex & dmsChainMask];
+ }
+ }
+
+ return ml;
+}
+
+/* *********************************
+* (SIMD) Row-based matchfinder
+***********************************/
+/* Constants for row-based hash */
+#define ZSTD_ROW_HASH_TAG_OFFSET 16 /* byte offset of hashes in the match state's tagTable from the beginning of a row */
+#define ZSTD_ROW_HASH_TAG_BITS 8 /* nb bits to use for the tag */
+#define ZSTD_ROW_HASH_TAG_MASK ((1u << ZSTD_ROW_HASH_TAG_BITS) - 1)
+#define ZSTD_ROW_HASH_MAX_ENTRIES 64 /* absolute maximum number of entries per row, for all configurations */
+
+#define ZSTD_ROW_HASH_CACHE_MASK (ZSTD_ROW_HASH_CACHE_SIZE - 1)
+
+typedef U64 ZSTD_VecMask; /* Clarifies when we are interacting with a U64 representing a mask of matches */
+
+/* ZSTD_VecMask_next():
+ * Starting from the LSB, returns the idx of the next non-zero bit.
+ * Basically counting the nb of trailing zeroes.
+ */
+static U32 ZSTD_VecMask_next(ZSTD_VecMask val) {
+ assert(val != 0);
+# if (defined(__GNUC__) && ((__GNUC__ > 3) || ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 4))))
+ if (sizeof(size_t) == 4) {
+ U32 mostSignificantWord = (U32)(val >> 32);
+ U32 leastSignificantWord = (U32)val;
+ if (leastSignificantWord == 0) {
+ return 32 + (U32)__builtin_ctz(mostSignificantWord);
+ } else {
+ return (U32)__builtin_ctz(leastSignificantWord);
+ }
+ } else {
+ return (U32)__builtin_ctzll(val);
+ }
+# else
+ /* Software ctz version: http://aggregate.org/MAGIC/#Trailing%20Zero%20Count
+ * and: https://stackoverflow.com/questions/2709430/count-number-of-bits-in-a-64-bit-long-big-integer
+ */
+ val = ~val & (val - 1ULL); /* Lowest set bit mask */
+ val = val - ((val >> 1) & 0x5555555555555555);
+ val = (val & 0x3333333333333333ULL) + ((val >> 2) & 0x3333333333333333ULL);
+ return (U32)((((val + (val >> 4)) & 0xF0F0F0F0F0F0F0FULL) * 0x101010101010101ULL) >> 56);
+# endif
+}
+
+/* ZSTD_rotateRight_*():
+ * Rotates a bitfield to the right by "count" bits.
+ * https://en.wikipedia.org/w/index.php?title=Circular_shift&oldid=991635599#Implementing_circular_shifts
+ */
+FORCE_INLINE_TEMPLATE
+U64 ZSTD_rotateRight_U64(U64 const value, U32 count) {
+ assert(count < 64);
+ count &= 0x3F; /* for fickle pattern recognition */
+ return (value >> count) | (U64)(value << ((0U - count) & 0x3F));
+}
+
+FORCE_INLINE_TEMPLATE
+U32 ZSTD_rotateRight_U32(U32 const value, U32 count) {
+ assert(count < 32);
+ count &= 0x1F; /* for fickle pattern recognition */
+ return (value >> count) | (U32)(value << ((0U - count) & 0x1F));
+}
+
+FORCE_INLINE_TEMPLATE
+U16 ZSTD_rotateRight_U16(U16 const value, U32 count) {
+ assert(count < 16);
+ count &= 0x0F; /* for fickle pattern recognition */
+ return (value >> count) | (U16)(value << ((0U - count) & 0x0F));
+}
+
+/* ZSTD_row_nextIndex():
+ * Returns the next index to insert at within a tagTable row, and updates the "head"
+ * value to reflect the update. Essentially cycles backwards from [0, {entries per row})
+ */
+FORCE_INLINE_TEMPLATE U32 ZSTD_row_nextIndex(BYTE* const tagRow, U32 const rowMask) {
+ U32 const next = (*tagRow - 1) & rowMask;
+ *tagRow = (BYTE)next;
+ return next;
+}
+
+/* ZSTD_isAligned():
+ * Checks that a pointer is aligned to "align" bytes which must be a power of 2.
+ */
+MEM_STATIC int ZSTD_isAligned(void const* ptr, size_t align) {
+ assert((align & (align - 1)) == 0);
+ return (((size_t)ptr) & (align - 1)) == 0;
+}
+
+/* ZSTD_row_prefetch():
+ * Performs prefetching for the hashTable and tagTable at a given row.
+ */
+FORCE_INLINE_TEMPLATE void ZSTD_row_prefetch(U32 const* hashTable, U16 const* tagTable, U32 const relRow, U32 const rowLog) {
+ PREFETCH_L1(hashTable + relRow);
+ if (rowLog >= 5) {
+ PREFETCH_L1(hashTable + relRow + 16);
+ /* Note: prefetching more of the hash table does not appear to be beneficial for 128-entry rows */
+ }
+ PREFETCH_L1(tagTable + relRow);
+ if (rowLog == 6) {
+ PREFETCH_L1(tagTable + relRow + 32);
+ }
+ assert(rowLog == 4 || rowLog == 5 || rowLog == 6);
+ assert(ZSTD_isAligned(hashTable + relRow, 64)); /* prefetched hash row always 64-byte aligned */
+ assert(ZSTD_isAligned(tagTable + relRow, (size_t)1 << rowLog)); /* prefetched tagRow sits on correct multiple of bytes (32,64,128) */
+}
+
+/* ZSTD_row_fillHashCache():
+ * Fill up the hash cache starting at idx, prefetching up to ZSTD_ROW_HASH_CACHE_SIZE entries,
+ * but not beyond iLimit.
+ */
+FORCE_INLINE_TEMPLATE void ZSTD_row_fillHashCache(ZSTD_matchState_t* ms, const BYTE* base,
+ U32 const rowLog, U32 const mls,
+ U32 idx, const BYTE* const iLimit)
+{
+ U32 const* const hashTable = ms->hashTable;
+ U16 const* const tagTable = ms->tagTable;
+ U32 const hashLog = ms->rowHashLog;
+ U32 const maxElemsToPrefetch = (base + idx) > iLimit ? 0 : (U32)(iLimit - (base + idx) + 1);
+ U32 const lim = idx + MIN(ZSTD_ROW_HASH_CACHE_SIZE, maxElemsToPrefetch);
+
+ for (; idx < lim; ++idx) {
+ U32 const hash = (U32)ZSTD_hashPtr(base + idx, hashLog + ZSTD_ROW_HASH_TAG_BITS, mls);
+ U32 const row = (hash >> ZSTD_ROW_HASH_TAG_BITS) << rowLog;
+ ZSTD_row_prefetch(hashTable, tagTable, row, rowLog);
+ ms->hashCache[idx & ZSTD_ROW_HASH_CACHE_MASK] = hash;
+ }
+
+ DEBUGLOG(6, "ZSTD_row_fillHashCache(): [%u %u %u %u %u %u %u %u]", ms->hashCache[0], ms->hashCache[1],
+ ms->hashCache[2], ms->hashCache[3], ms->hashCache[4],
+ ms->hashCache[5], ms->hashCache[6], ms->hashCache[7]);
+}
+
+/* ZSTD_row_nextCachedHash():
+ * Returns the hash of base + idx, and replaces the hash in the hash cache with the byte at
+ * base + idx + ZSTD_ROW_HASH_CACHE_SIZE. Also prefetches the appropriate rows from hashTable and tagTable.
+ */
+FORCE_INLINE_TEMPLATE U32 ZSTD_row_nextCachedHash(U32* cache, U32 const* hashTable,
+ U16 const* tagTable, BYTE const* base,
+ U32 idx, U32 const hashLog,
+ U32 const rowLog, U32 const mls)
+{
+ U32 const newHash = (U32)ZSTD_hashPtr(base+idx+ZSTD_ROW_HASH_CACHE_SIZE, hashLog + ZSTD_ROW_HASH_TAG_BITS, mls);
+ U32 const row = (newHash >> ZSTD_ROW_HASH_TAG_BITS) << rowLog;
+ ZSTD_row_prefetch(hashTable, tagTable, row, rowLog);
+ { U32 const hash = cache[idx & ZSTD_ROW_HASH_CACHE_MASK];
+ cache[idx & ZSTD_ROW_HASH_CACHE_MASK] = newHash;
+ return hash;
+ }
+}
+
+/* ZSTD_row_update_internalImpl():
+ * Updates the hash table with positions starting from updateStartIdx until updateEndIdx.
+ */
+FORCE_INLINE_TEMPLATE void ZSTD_row_update_internalImpl(ZSTD_matchState_t* ms,
+ U32 updateStartIdx, U32 const updateEndIdx,
+ U32 const mls, U32 const rowLog,
+ U32 const rowMask, U32 const useCache)
+{
+ U32* const hashTable = ms->hashTable;
+ U16* const tagTable = ms->tagTable;
+ U32 const hashLog = ms->rowHashLog;
+ const BYTE* const base = ms->window.base;
+
+ DEBUGLOG(6, "ZSTD_row_update_internalImpl(): updateStartIdx=%u, updateEndIdx=%u", updateStartIdx, updateEndIdx);
+ for (; updateStartIdx < updateEndIdx; ++updateStartIdx) {
+ U32 const hash = useCache ? ZSTD_row_nextCachedHash(ms->hashCache, hashTable, tagTable, base, updateStartIdx, hashLog, rowLog, mls)
+ : (U32)ZSTD_hashPtr(base + updateStartIdx, hashLog + ZSTD_ROW_HASH_TAG_BITS, mls);
+ U32 const relRow = (hash >> ZSTD_ROW_HASH_TAG_BITS) << rowLog;
+ U32* const row = hashTable + relRow;
+ BYTE* tagRow = (BYTE*)(tagTable + relRow); /* Though tagTable is laid out as a table of U16, each tag is only 1 byte.
+ Explicit cast allows us to get exact desired position within each row */
+ U32 const pos = ZSTD_row_nextIndex(tagRow, rowMask);
+
+ assert(hash == ZSTD_hashPtr(base + updateStartIdx, hashLog + ZSTD_ROW_HASH_TAG_BITS, mls));
+ ((BYTE*)tagRow)[pos + ZSTD_ROW_HASH_TAG_OFFSET] = hash & ZSTD_ROW_HASH_TAG_MASK;
+ row[pos] = updateStartIdx;
+ }
+}
+
+/* ZSTD_row_update_internal():
+ * Inserts the byte at ip into the appropriate position in the hash table, and updates ms->nextToUpdate.
+ * Skips sections of long matches as is necessary.
+ */
+FORCE_INLINE_TEMPLATE void ZSTD_row_update_internal(ZSTD_matchState_t* ms, const BYTE* ip,
+ U32 const mls, U32 const rowLog,
+ U32 const rowMask, U32 const useCache)
+{
+ U32 idx = ms->nextToUpdate;
+ const BYTE* const base = ms->window.base;
+ const U32 target = (U32)(ip - base);
+ const U32 kSkipThreshold = 384;
+ const U32 kMaxMatchStartPositionsToUpdate = 96;
+ const U32 kMaxMatchEndPositionsToUpdate = 32;
+
+ if (useCache) {
+ /* Only skip positions when using hash cache, i.e.
+ * if we are loading a dict, don't skip anything.
+ * If we decide to skip, then we only update a set number
+ * of positions at the beginning and end of the match.
+ */
+ if (UNLIKELY(target - idx > kSkipThreshold)) {
+ U32 const bound = idx + kMaxMatchStartPositionsToUpdate;
+ ZSTD_row_update_internalImpl(ms, idx, bound, mls, rowLog, rowMask, useCache);
+ idx = target - kMaxMatchEndPositionsToUpdate;
+ ZSTD_row_fillHashCache(ms, base, rowLog, mls, idx, ip+1);
+ }
+ }
+ assert(target >= idx);
+ ZSTD_row_update_internalImpl(ms, idx, target, mls, rowLog, rowMask, useCache);
+ ms->nextToUpdate = target;
+}
+
+/* ZSTD_row_update():
+ * External wrapper for ZSTD_row_update_internal(). Used for filling the hashtable during dictionary
+ * processing.
+ */
+void ZSTD_row_update(ZSTD_matchState_t* const ms, const BYTE* ip) {
+ const U32 rowLog = BOUNDED(4, ms->cParams.searchLog, 6);
+ const U32 rowMask = (1u << rowLog) - 1;
+ const U32 mls = MIN(ms->cParams.minMatch, 6 /* mls caps out at 6 */);
+
+ DEBUGLOG(5, "ZSTD_row_update(), rowLog=%u", rowLog);
+ ZSTD_row_update_internal(ms, ip, mls, rowLog, rowMask, 0 /* dont use cache */);
+}
+
+#if defined(ZSTD_ARCH_X86_SSE2)
+FORCE_INLINE_TEMPLATE ZSTD_VecMask
+ZSTD_row_getSSEMask(int nbChunks, const BYTE* const src, const BYTE tag, const U32 head)
+{
+ const __m128i comparisonMask = _mm_set1_epi8((char)tag);
+ int matches[4] = {0};
+ int i;
+ assert(nbChunks == 1 || nbChunks == 2 || nbChunks == 4);
+ for (i=0; i<nbChunks; i++) {
+ const __m128i chunk = _mm_loadu_si128((const __m128i*)(const void*)(src + 16*i));
+ const __m128i equalMask = _mm_cmpeq_epi8(chunk, comparisonMask);
+ matches[i] = _mm_movemask_epi8(equalMask);
+ }
+ if (nbChunks == 1) return ZSTD_rotateRight_U16((U16)matches[0], head);
+ if (nbChunks == 2) return ZSTD_rotateRight_U32((U32)matches[1] << 16 | (U32)matches[0], head);
+ assert(nbChunks == 4);
+ return ZSTD_rotateRight_U64((U64)matches[3] << 48 | (U64)matches[2] << 32 | (U64)matches[1] << 16 | (U64)matches[0], head);
+}
+#endif
+
+/* Returns a ZSTD_VecMask (U32) that has the nth bit set to 1 if the newly-computed "tag" matches
+ * the hash at the nth position in a row of the tagTable.
+ * Each row is a circular buffer beginning at the value of "head". So we must rotate the "matches" bitfield
+ * to match up with the actual layout of the entries within the hashTable */
+FORCE_INLINE_TEMPLATE ZSTD_VecMask
+ZSTD_row_getMatchMask(const BYTE* const tagRow, const BYTE tag, const U32 head, const U32 rowEntries)
+{
+ const BYTE* const src = tagRow + ZSTD_ROW_HASH_TAG_OFFSET;
+ assert((rowEntries == 16) || (rowEntries == 32) || rowEntries == 64);
+ assert(rowEntries <= ZSTD_ROW_HASH_MAX_ENTRIES);
+
+#if defined(ZSTD_ARCH_X86_SSE2)
+
+ return ZSTD_row_getSSEMask(rowEntries / 16, src, tag, head);
+
+#else /* SW or NEON-LE */
+
+# if defined(ZSTD_ARCH_ARM_NEON)
+ /* This NEON path only works for little endian - otherwise use SWAR below */
+ if (MEM_isLittleEndian()) {
+ if (rowEntries == 16) {
+ const uint8x16_t chunk = vld1q_u8(src);
+ const uint16x8_t equalMask = vreinterpretq_u16_u8(vceqq_u8(chunk, vdupq_n_u8(tag)));
+ const uint16x8_t t0 = vshlq_n_u16(equalMask, 7);
+ const uint32x4_t t1 = vreinterpretq_u32_u16(vsriq_n_u16(t0, t0, 14));
+ const uint64x2_t t2 = vreinterpretq_u64_u32(vshrq_n_u32(t1, 14));
+ const uint8x16_t t3 = vreinterpretq_u8_u64(vsraq_n_u64(t2, t2, 28));
+ const U16 hi = (U16)vgetq_lane_u8(t3, 8);
+ const U16 lo = (U16)vgetq_lane_u8(t3, 0);
+ return ZSTD_rotateRight_U16((hi << 8) | lo, head);
+ } else if (rowEntries == 32) {
+ const uint16x8x2_t chunk = vld2q_u16((const U16*)(const void*)src);
+ const uint8x16_t chunk0 = vreinterpretq_u8_u16(chunk.val[0]);
+ const uint8x16_t chunk1 = vreinterpretq_u8_u16(chunk.val[1]);
+ const uint8x16_t equalMask0 = vceqq_u8(chunk0, vdupq_n_u8(tag));
+ const uint8x16_t equalMask1 = vceqq_u8(chunk1, vdupq_n_u8(tag));
+ const int8x8_t pack0 = vqmovn_s16(vreinterpretq_s16_u8(equalMask0));
+ const int8x8_t pack1 = vqmovn_s16(vreinterpretq_s16_u8(equalMask1));
+ const uint8x8_t t0 = vreinterpret_u8_s8(pack0);
+ const uint8x8_t t1 = vreinterpret_u8_s8(pack1);
+ const uint8x8_t t2 = vsri_n_u8(t1, t0, 2);
+ const uint8x8x2_t t3 = vuzp_u8(t2, t0);
+ const uint8x8_t t4 = vsri_n_u8(t3.val[1], t3.val[0], 4);
+ const U32 matches = vget_lane_u32(vreinterpret_u32_u8(t4), 0);
+ return ZSTD_rotateRight_U32(matches, head);
+ } else { /* rowEntries == 64 */
+ const uint8x16x4_t chunk = vld4q_u8(src);
+ const uint8x16_t dup = vdupq_n_u8(tag);
+ const uint8x16_t cmp0 = vceqq_u8(chunk.val[0], dup);
+ const uint8x16_t cmp1 = vceqq_u8(chunk.val[1], dup);
+ const uint8x16_t cmp2 = vceqq_u8(chunk.val[2], dup);
+ const uint8x16_t cmp3 = vceqq_u8(chunk.val[3], dup);
+
+ const uint8x16_t t0 = vsriq_n_u8(cmp1, cmp0, 1);
+ const uint8x16_t t1 = vsriq_n_u8(cmp3, cmp2, 1);
+ const uint8x16_t t2 = vsriq_n_u8(t1, t0, 2);
+ const uint8x16_t t3 = vsriq_n_u8(t2, t2, 4);
+ const uint8x8_t t4 = vshrn_n_u16(vreinterpretq_u16_u8(t3), 4);
+ const U64 matches = vget_lane_u64(vreinterpret_u64_u8(t4), 0);
+ return ZSTD_rotateRight_U64(matches, head);
+ }
+ }
+# endif /* ZSTD_ARCH_ARM_NEON */
+ /* SWAR */
+ { const size_t chunkSize = sizeof(size_t);
+ const size_t shiftAmount = ((chunkSize * 8) - chunkSize);
+ const size_t xFF = ~((size_t)0);
+ const size_t x01 = xFF / 0xFF;
+ const size_t x80 = x01 << 7;
+ const size_t splatChar = tag * x01;
+ ZSTD_VecMask matches = 0;
+ int i = rowEntries - chunkSize;
+ assert((sizeof(size_t) == 4) || (sizeof(size_t) == 8));
+ if (MEM_isLittleEndian()) { /* runtime check so have two loops */
+ const size_t extractMagic = (xFF / 0x7F) >> chunkSize;
+ do {
+ size_t chunk = MEM_readST(&src[i]);
+ chunk ^= splatChar;
+ chunk = (((chunk | x80) - x01) | chunk) & x80;
+ matches <<= chunkSize;
+ matches |= (chunk * extractMagic) >> shiftAmount;
+ i -= chunkSize;
+ } while (i >= 0);
+ } else { /* big endian: reverse bits during extraction */
+ const size_t msb = xFF ^ (xFF >> 1);
+ const size_t extractMagic = (msb / 0x1FF) | msb;
+ do {
+ size_t chunk = MEM_readST(&src[i]);
+ chunk ^= splatChar;
+ chunk = (((chunk | x80) - x01) | chunk) & x80;
+ matches <<= chunkSize;
+ matches |= ((chunk >> 7) * extractMagic) >> shiftAmount;
+ i -= chunkSize;
+ } while (i >= 0);
+ }
+ matches = ~matches;
+ if (rowEntries == 16) {
+ return ZSTD_rotateRight_U16((U16)matches, head);
+ } else if (rowEntries == 32) {
+ return ZSTD_rotateRight_U32((U32)matches, head);
+ } else {
+ return ZSTD_rotateRight_U64((U64)matches, head);
+ }
+ }
+#endif
+}
+
+/* The high-level approach of the SIMD row based match finder is as follows:
+ * - Figure out where to insert the new entry:
+ * - Generate a hash from a byte along with an additional 1-byte "short hash". The additional byte is our "tag"
+ * - The hashTable is effectively split into groups or "rows" of 16 or 32 entries of U32, and the hash determines
+ * which row to insert into.
+ * - Determine the correct position within the row to insert the entry into. Each row of 16 or 32 can
+ * be considered as a circular buffer with a "head" index that resides in the tagTable.
+ * - Also insert the "tag" into the equivalent row and position in the tagTable.
+ * - Note: The tagTable has 17 or 33 1-byte entries per row, due to 16 or 32 tags, and 1 "head" entry.
+ * The 17 or 33 entry rows are spaced out to occur every 32 or 64 bytes, respectively,
+ * for alignment/performance reasons, leaving some bytes unused.
+ * - Use SIMD to efficiently compare the tags in the tagTable to the 1-byte "short hash" and
+ * generate a bitfield that we can cycle through to check the collisions in the hash table.
+ * - Pick the longest match.
+ */
+FORCE_INLINE_TEMPLATE
+size_t ZSTD_RowFindBestMatch(
+ ZSTD_matchState_t* ms,
+ const BYTE* const ip, const BYTE* const iLimit,
+ size_t* offsetPtr,
+ const U32 mls, const ZSTD_dictMode_e dictMode,
+ const U32 rowLog)
+{
+ U32* const hashTable = ms->hashTable;
+ U16* const tagTable = ms->tagTable;
+ U32* const hashCache = ms->hashCache;
+ const U32 hashLog = ms->rowHashLog;
+ const ZSTD_compressionParameters* const cParams = &ms->cParams;
+ const BYTE* const base = ms->window.base;
+ const BYTE* const dictBase = ms->window.dictBase;
+ const U32 dictLimit = ms->window.dictLimit;
+ const BYTE* const prefixStart = base + dictLimit;
+ const BYTE* const dictEnd = dictBase + dictLimit;
+ const U32 curr = (U32)(ip-base);
+ const U32 maxDistance = 1U << cParams->windowLog;
+ const U32 lowestValid = ms->window.lowLimit;
+ const U32 withinMaxDistance = (curr - lowestValid > maxDistance) ? curr - maxDistance : lowestValid;
+ const U32 isDictionary = (ms->loadedDictEnd != 0);
+ const U32 lowLimit = isDictionary ? lowestValid : withinMaxDistance;
+ const U32 rowEntries = (1U << rowLog);
+ const U32 rowMask = rowEntries - 1;
+ const U32 cappedSearchLog = MIN(cParams->searchLog, rowLog); /* nb of searches is capped at nb entries per row */
+ U32 nbAttempts = 1U << cappedSearchLog;
+ size_t ml=4-1;
+
+ /* DMS/DDS variables that may be referenced laster */
+ const ZSTD_matchState_t* const dms = ms->dictMatchState;
+
+ /* Initialize the following variables to satisfy static analyzer */
+ size_t ddsIdx = 0;
+ U32 ddsExtraAttempts = 0; /* cctx hash tables are limited in searches, but allow extra searches into DDS */
+ U32 dmsTag = 0;
+ U32* dmsRow = NULL;
+ BYTE* dmsTagRow = NULL;
+
+ if (dictMode == ZSTD_dedicatedDictSearch) {
+ const U32 ddsHashLog = dms->cParams.hashLog - ZSTD_LAZY_DDSS_BUCKET_LOG;
+ { /* Prefetch DDS hashtable entry */
+ ddsIdx = ZSTD_hashPtr(ip, ddsHashLog, mls) << ZSTD_LAZY_DDSS_BUCKET_LOG;
+ PREFETCH_L1(&dms->hashTable[ddsIdx]);
+ }
+ ddsExtraAttempts = cParams->searchLog > rowLog ? 1U << (cParams->searchLog - rowLog) : 0;
+ }
+
+ if (dictMode == ZSTD_dictMatchState) {
+ /* Prefetch DMS rows */
+ U32* const dmsHashTable = dms->hashTable;
+ U16* const dmsTagTable = dms->tagTable;
+ U32 const dmsHash = (U32)ZSTD_hashPtr(ip, dms->rowHashLog + ZSTD_ROW_HASH_TAG_BITS, mls);
+ U32 const dmsRelRow = (dmsHash >> ZSTD_ROW_HASH_TAG_BITS) << rowLog;
+ dmsTag = dmsHash & ZSTD_ROW_HASH_TAG_MASK;
+ dmsTagRow = (BYTE*)(dmsTagTable + dmsRelRow);
+ dmsRow = dmsHashTable + dmsRelRow;
+ ZSTD_row_prefetch(dmsHashTable, dmsTagTable, dmsRelRow, rowLog);
+ }
+
+ /* Update the hashTable and tagTable up to (but not including) ip */
+ ZSTD_row_update_internal(ms, ip, mls, rowLog, rowMask, 1 /* useCache */);
+ { /* Get the hash for ip, compute the appropriate row */
+ U32 const hash = ZSTD_row_nextCachedHash(hashCache, hashTable, tagTable, base, curr, hashLog, rowLog, mls);
+ U32 const relRow = (hash >> ZSTD_ROW_HASH_TAG_BITS) << rowLog;
+ U32 const tag = hash & ZSTD_ROW_HASH_TAG_MASK;
+ U32* const row = hashTable + relRow;
+ BYTE* tagRow = (BYTE*)(tagTable + relRow);
+ U32 const head = *tagRow & rowMask;
+ U32 matchBuffer[ZSTD_ROW_HASH_MAX_ENTRIES];
+ size_t numMatches = 0;
+ size_t currMatch = 0;
+ ZSTD_VecMask matches = ZSTD_row_getMatchMask(tagRow, (BYTE)tag, head, rowEntries);
+
+ /* Cycle through the matches and prefetch */
+ for (; (matches > 0) && (nbAttempts > 0); --nbAttempts, matches &= (matches - 1)) {
+ U32 const matchPos = (head + ZSTD_VecMask_next(matches)) & rowMask;
+ U32 const matchIndex = row[matchPos];
+ assert(numMatches < rowEntries);
+ if (matchIndex < lowLimit)
+ break;
+ if ((dictMode != ZSTD_extDict) || matchIndex >= dictLimit) {
+ PREFETCH_L1(base + matchIndex);
+ } else {
+ PREFETCH_L1(dictBase + matchIndex);
+ }
+ matchBuffer[numMatches++] = matchIndex;
+ }
+
+ /* Speed opt: insert current byte into hashtable too. This allows us to avoid one iteration of the loop
+ in ZSTD_row_update_internal() at the next search. */
+ {
+ U32 const pos = ZSTD_row_nextIndex(tagRow, rowMask);
+ tagRow[pos + ZSTD_ROW_HASH_TAG_OFFSET] = (BYTE)tag;
+ row[pos] = ms->nextToUpdate++;
+ }
+
+ /* Return the longest match */
+ for (; currMatch < numMatches; ++currMatch) {
+ U32 const matchIndex = matchBuffer[currMatch];
+ size_t currentMl=0;
+ assert(matchIndex < curr);
+ assert(matchIndex >= lowLimit);
+
+ if ((dictMode != ZSTD_extDict) || matchIndex >= dictLimit) {
+ const BYTE* const match = base + matchIndex;
+ assert(matchIndex >= dictLimit); /* ensures this is true if dictMode != ZSTD_extDict */
+ if (match[ml] == ip[ml]) /* potentially better */
+ currentMl = ZSTD_count(ip, match, iLimit);
+ } else {
+ const BYTE* const match = dictBase + matchIndex;
+ assert(match+4 <= dictEnd);
+ if (MEM_read32(match) == MEM_read32(ip)) /* assumption : matchIndex <= dictLimit-4 (by table construction) */
+ currentMl = ZSTD_count_2segments(ip+4, match+4, iLimit, dictEnd, prefixStart) + 4;
+ }
+
+ /* Save best solution */
+ if (currentMl > ml) {
+ ml = currentMl;
+ *offsetPtr = STORE_OFFSET(curr - matchIndex);
+ if (ip+currentMl == iLimit) break; /* best possible, avoids read overflow on next attempt */
+ }
+ }
+ }
+
+ assert(nbAttempts <= (1U << ZSTD_SEARCHLOG_MAX)); /* Check we haven't underflowed. */
+ if (dictMode == ZSTD_dedicatedDictSearch) {
+ ml = ZSTD_dedicatedDictSearch_lazy_search(offsetPtr, ml, nbAttempts + ddsExtraAttempts, dms,
+ ip, iLimit, prefixStart, curr, dictLimit, ddsIdx);
+ } else if (dictMode == ZSTD_dictMatchState) {
+ /* TODO: Measure and potentially add prefetching to DMS */
+ const U32 dmsLowestIndex = dms->window.dictLimit;
+ const BYTE* const dmsBase = dms->window.base;
+ const BYTE* const dmsEnd = dms->window.nextSrc;
+ const U32 dmsSize = (U32)(dmsEnd - dmsBase);
+ const U32 dmsIndexDelta = dictLimit - dmsSize;
+
+ { U32 const head = *dmsTagRow & rowMask;
+ U32 matchBuffer[ZSTD_ROW_HASH_MAX_ENTRIES];
+ size_t numMatches = 0;
+ size_t currMatch = 0;
+ ZSTD_VecMask matches = ZSTD_row_getMatchMask(dmsTagRow, (BYTE)dmsTag, head, rowEntries);
+
+ for (; (matches > 0) && (nbAttempts > 0); --nbAttempts, matches &= (matches - 1)) {
+ U32 const matchPos = (head + ZSTD_VecMask_next(matches)) & rowMask;
+ U32 const matchIndex = dmsRow[matchPos];
+ if (matchIndex < dmsLowestIndex)
+ break;
+ PREFETCH_L1(dmsBase + matchIndex);
+ matchBuffer[numMatches++] = matchIndex;
+ }
+
+ /* Return the longest match */
+ for (; currMatch < numMatches; ++currMatch) {
+ U32 const matchIndex = matchBuffer[currMatch];
+ size_t currentMl=0;
+ assert(matchIndex >= dmsLowestIndex);
+ assert(matchIndex < curr);
+
+ { const BYTE* const match = dmsBase + matchIndex;
+ assert(match+4 <= dmsEnd);
+ if (MEM_read32(match) == MEM_read32(ip))
+ currentMl = ZSTD_count_2segments(ip+4, match+4, iLimit, dmsEnd, prefixStart) + 4;
+ }
+
+ if (currentMl > ml) {
+ ml = currentMl;
+ assert(curr > matchIndex + dmsIndexDelta);
+ *offsetPtr = STORE_OFFSET(curr - (matchIndex + dmsIndexDelta));
+ if (ip+currentMl == iLimit) break;
+ }
+ }
+ }
+ }
+ return ml;
+}
+
+
+/*
+ * Generate search functions templated on (dictMode, mls, rowLog).
+ * These functions are outlined for code size & compilation time.
+ * ZSTD_searchMax() dispatches to the correct implementation function.
+ *
+ * TODO: The start of the search function involves loading and calculating a
+ * bunch of constants from the ZSTD_matchState_t. These computations could be
+ * done in an initialization function, and saved somewhere in the match state.
+ * Then we could pass a pointer to the saved state instead of the match state,
+ * and avoid duplicate computations.
+ *
+ * TODO: Move the match re-winding into searchMax. This improves compression
+ * ratio, and unlocks further simplifications with the next TODO.
+ *
+ * TODO: Try moving the repcode search into searchMax. After the re-winding
+ * and repcode search are in searchMax, there is no more logic in the match
+ * finder loop that requires knowledge about the dictMode. So we should be
+ * able to avoid force inlining it, and we can join the extDict loop with
+ * the single segment loop. It should go in searchMax instead of its own
+ * function to avoid having multiple virtual function calls per search.
+ */
+
+#define ZSTD_BT_SEARCH_FN(dictMode, mls) ZSTD_BtFindBestMatch_##dictMode##_##mls
+#define ZSTD_HC_SEARCH_FN(dictMode, mls) ZSTD_HcFindBestMatch_##dictMode##_##mls
+#define ZSTD_ROW_SEARCH_FN(dictMode, mls, rowLog) ZSTD_RowFindBestMatch_##dictMode##_##mls##_##rowLog
+
+#define ZSTD_SEARCH_FN_ATTRS FORCE_NOINLINE
+
+#define GEN_ZSTD_BT_SEARCH_FN(dictMode, mls) \
+ ZSTD_SEARCH_FN_ATTRS size_t ZSTD_BT_SEARCH_FN(dictMode, mls)( \
+ ZSTD_matchState_t* ms, \
+ const BYTE* ip, const BYTE* const iLimit, \
+ size_t* offBasePtr) \
+ { \
+ assert(MAX(4, MIN(6, ms->cParams.minMatch)) == mls); \
+ return ZSTD_BtFindBestMatch(ms, ip, iLimit, offBasePtr, mls, ZSTD_##dictMode); \
+ } \
+
+#define GEN_ZSTD_HC_SEARCH_FN(dictMode, mls) \
+ ZSTD_SEARCH_FN_ATTRS size_t ZSTD_HC_SEARCH_FN(dictMode, mls)( \
+ ZSTD_matchState_t* ms, \
+ const BYTE* ip, const BYTE* const iLimit, \
+ size_t* offsetPtr) \
+ { \
+ assert(MAX(4, MIN(6, ms->cParams.minMatch)) == mls); \
+ return ZSTD_HcFindBestMatch(ms, ip, iLimit, offsetPtr, mls, ZSTD_##dictMode); \
+ } \
+
+#define GEN_ZSTD_ROW_SEARCH_FN(dictMode, mls, rowLog) \
+ ZSTD_SEARCH_FN_ATTRS size_t ZSTD_ROW_SEARCH_FN(dictMode, mls, rowLog)( \
+ ZSTD_matchState_t* ms, \
+ const BYTE* ip, const BYTE* const iLimit, \
+ size_t* offsetPtr) \
+ { \
+ assert(MAX(4, MIN(6, ms->cParams.minMatch)) == mls); \
+ assert(MAX(4, MIN(6, ms->cParams.searchLog)) == rowLog); \
+ return ZSTD_RowFindBestMatch(ms, ip, iLimit, offsetPtr, mls, ZSTD_##dictMode, rowLog); \
+ } \
+
+#define ZSTD_FOR_EACH_ROWLOG(X, dictMode, mls) \
+ X(dictMode, mls, 4) \
+ X(dictMode, mls, 5) \
+ X(dictMode, mls, 6)
+
+#define ZSTD_FOR_EACH_MLS_ROWLOG(X, dictMode) \
+ ZSTD_FOR_EACH_ROWLOG(X, dictMode, 4) \
+ ZSTD_FOR_EACH_ROWLOG(X, dictMode, 5) \
+ ZSTD_FOR_EACH_ROWLOG(X, dictMode, 6)
+
+#define ZSTD_FOR_EACH_MLS(X, dictMode) \
+ X(dictMode, 4) \
+ X(dictMode, 5) \
+ X(dictMode, 6)
+
+#define ZSTD_FOR_EACH_DICT_MODE(X, ...) \
+ X(__VA_ARGS__, noDict) \
+ X(__VA_ARGS__, extDict) \
+ X(__VA_ARGS__, dictMatchState) \
+ X(__VA_ARGS__, dedicatedDictSearch)
+
+/* Generate row search fns for each combination of (dictMode, mls, rowLog) */
+ZSTD_FOR_EACH_DICT_MODE(ZSTD_FOR_EACH_MLS_ROWLOG, GEN_ZSTD_ROW_SEARCH_FN)
+/* Generate binary Tree search fns for each combination of (dictMode, mls) */
+ZSTD_FOR_EACH_DICT_MODE(ZSTD_FOR_EACH_MLS, GEN_ZSTD_BT_SEARCH_FN)
+/* Generate hash chain search fns for each combination of (dictMode, mls) */
+ZSTD_FOR_EACH_DICT_MODE(ZSTD_FOR_EACH_MLS, GEN_ZSTD_HC_SEARCH_FN)
+
+typedef enum { search_hashChain=0, search_binaryTree=1, search_rowHash=2 } searchMethod_e;
+
+#define GEN_ZSTD_CALL_BT_SEARCH_FN(dictMode, mls) \
+ case mls: \
+ return ZSTD_BT_SEARCH_FN(dictMode, mls)(ms, ip, iend, offsetPtr);
+#define GEN_ZSTD_CALL_HC_SEARCH_FN(dictMode, mls) \
+ case mls: \
+ return ZSTD_HC_SEARCH_FN(dictMode, mls)(ms, ip, iend, offsetPtr);
+#define GEN_ZSTD_CALL_ROW_SEARCH_FN(dictMode, mls, rowLog) \
+ case rowLog: \
+ return ZSTD_ROW_SEARCH_FN(dictMode, mls, rowLog)(ms, ip, iend, offsetPtr);
+
+#define ZSTD_SWITCH_MLS(X, dictMode) \
+ switch (mls) { \
+ ZSTD_FOR_EACH_MLS(X, dictMode) \
+ }
+
+#define ZSTD_SWITCH_ROWLOG(dictMode, mls) \
+ case mls: \
+ switch (rowLog) { \
+ ZSTD_FOR_EACH_ROWLOG(GEN_ZSTD_CALL_ROW_SEARCH_FN, dictMode, mls) \
+ } \
+ ZSTD_UNREACHABLE; \
+ break;
+
+#define ZSTD_SWITCH_SEARCH_METHOD(dictMode) \
+ switch (searchMethod) { \
+ case search_hashChain: \
+ ZSTD_SWITCH_MLS(GEN_ZSTD_CALL_HC_SEARCH_FN, dictMode) \
+ break; \
+ case search_binaryTree: \
+ ZSTD_SWITCH_MLS(GEN_ZSTD_CALL_BT_SEARCH_FN, dictMode) \
+ break; \
+ case search_rowHash: \
+ ZSTD_SWITCH_MLS(ZSTD_SWITCH_ROWLOG, dictMode) \
+ break; \
+ } \
+ ZSTD_UNREACHABLE;
+
+/*
+ * Searches for the longest match at @p ip.
+ * Dispatches to the correct implementation function based on the
+ * (searchMethod, dictMode, mls, rowLog). We use switch statements
+ * here instead of using an indirect function call through a function
+ * pointer because after Spectre and Meltdown mitigations, indirect
+ * function calls can be very costly, especially in the kernel.
+ *
+ * NOTE: dictMode and searchMethod should be templated, so those switch
+ * statements should be optimized out. Only the mls & rowLog switches
+ * should be left.
+ *
+ * @param ms The match state.
+ * @param ip The position to search at.
+ * @param iend The end of the input data.
+ * @param[out] offsetPtr Stores the match offset into this pointer.
+ * @param mls The minimum search length, in the range [4, 6].
+ * @param rowLog The row log (if applicable), in the range [4, 6].
+ * @param searchMethod The search method to use (templated).
+ * @param dictMode The dictMode (templated).
+ *
+ * @returns The length of the longest match found, or < mls if no match is found.
+ * If a match is found its offset is stored in @p offsetPtr.
+ */
+FORCE_INLINE_TEMPLATE size_t ZSTD_searchMax(
+ ZSTD_matchState_t* ms,
+ const BYTE* ip,
+ const BYTE* iend,
+ size_t* offsetPtr,
+ U32 const mls,
+ U32 const rowLog,
+ searchMethod_e const searchMethod,
+ ZSTD_dictMode_e const dictMode)
+{
+ if (dictMode == ZSTD_noDict) {
+ ZSTD_SWITCH_SEARCH_METHOD(noDict)
+ } else if (dictMode == ZSTD_extDict) {
+ ZSTD_SWITCH_SEARCH_METHOD(extDict)
+ } else if (dictMode == ZSTD_dictMatchState) {
+ ZSTD_SWITCH_SEARCH_METHOD(dictMatchState)
+ } else if (dictMode == ZSTD_dedicatedDictSearch) {
+ ZSTD_SWITCH_SEARCH_METHOD(dedicatedDictSearch)
+ }
+ ZSTD_UNREACHABLE;
+ return 0;
+}
+
+/* *******************************
+* Common parser - lazy strategy
+*********************************/
+
+FORCE_INLINE_TEMPLATE size_t
+ZSTD_compressBlock_lazy_generic(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore,
+ U32 rep[ZSTD_REP_NUM],
+ const void* src, size_t srcSize,
+ const searchMethod_e searchMethod, const U32 depth,
+ ZSTD_dictMode_e const dictMode)
+{
+ const BYTE* const istart = (const BYTE*)src;
+ const BYTE* ip = istart;
+ const BYTE* anchor = istart;
+ const BYTE* const iend = istart + srcSize;
+ const BYTE* const ilimit = (searchMethod == search_rowHash) ? iend - 8 - ZSTD_ROW_HASH_CACHE_SIZE : iend - 8;
+ const BYTE* const base = ms->window.base;
+ const U32 prefixLowestIndex = ms->window.dictLimit;
+ const BYTE* const prefixLowest = base + prefixLowestIndex;
+ const U32 mls = BOUNDED(4, ms->cParams.minMatch, 6);
+ const U32 rowLog = BOUNDED(4, ms->cParams.searchLog, 6);
+
+ U32 offset_1 = rep[0], offset_2 = rep[1], savedOffset=0;
+
+ const int isDMS = dictMode == ZSTD_dictMatchState;
+ const int isDDS = dictMode == ZSTD_dedicatedDictSearch;
+ const int isDxS = isDMS || isDDS;
+ const ZSTD_matchState_t* const dms = ms->dictMatchState;
+ const U32 dictLowestIndex = isDxS ? dms->window.dictLimit : 0;
+ const BYTE* const dictBase = isDxS ? dms->window.base : NULL;
+ const BYTE* const dictLowest = isDxS ? dictBase + dictLowestIndex : NULL;
+ const BYTE* const dictEnd = isDxS ? dms->window.nextSrc : NULL;
+ const U32 dictIndexDelta = isDxS ?
+ prefixLowestIndex - (U32)(dictEnd - dictBase) :
+ 0;
+ const U32 dictAndPrefixLength = (U32)((ip - prefixLowest) + (dictEnd - dictLowest));
+
+ DEBUGLOG(5, "ZSTD_compressBlock_lazy_generic (dictMode=%u) (searchFunc=%u)", (U32)dictMode, (U32)searchMethod);
+ ip += (dictAndPrefixLength == 0);
+ if (dictMode == ZSTD_noDict) {
+ U32 const curr = (U32)(ip - base);
+ U32 const windowLow = ZSTD_getLowestPrefixIndex(ms, curr, ms->cParams.windowLog);
+ U32 const maxRep = curr - windowLow;
+ if (offset_2 > maxRep) savedOffset = offset_2, offset_2 = 0;
+ if (offset_1 > maxRep) savedOffset = offset_1, offset_1 = 0;
+ }
+ if (isDxS) {
+ /* dictMatchState repCode checks don't currently handle repCode == 0
+ * disabling. */
+ assert(offset_1 <= dictAndPrefixLength);
+ assert(offset_2 <= dictAndPrefixLength);
+ }
+
+ if (searchMethod == search_rowHash) {
+ ZSTD_row_fillHashCache(ms, base, rowLog,
+ MIN(ms->cParams.minMatch, 6 /* mls caps out at 6 */),
+ ms->nextToUpdate, ilimit);
+ }
+
+ /* Match Loop */
+#if defined(__x86_64__)
+ /* I've measured random a 5% speed loss on levels 5 & 6 (greedy) when the
+ * code alignment is perturbed. To fix the instability align the loop on 32-bytes.
+ */
+ __asm__(".p2align 5");
+#endif
+ while (ip < ilimit) {
+ size_t matchLength=0;
+ size_t offcode=STORE_REPCODE_1;
+ const BYTE* start=ip+1;
+ DEBUGLOG(7, "search baseline (depth 0)");
+
+ /* check repCode */
+ if (isDxS) {
+ const U32 repIndex = (U32)(ip - base) + 1 - offset_1;
+ const BYTE* repMatch = ((dictMode == ZSTD_dictMatchState || dictMode == ZSTD_dedicatedDictSearch)
+ && repIndex < prefixLowestIndex) ?
+ dictBase + (repIndex - dictIndexDelta) :
+ base + repIndex;
+ if (((U32)((prefixLowestIndex-1) - repIndex) >= 3 /* intentional underflow */)
+ && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {
+ const BYTE* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend;
+ matchLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixLowest) + 4;
+ if (depth==0) goto _storeSequence;
+ }
+ }
+ if ( dictMode == ZSTD_noDict
+ && ((offset_1 > 0) & (MEM_read32(ip+1-offset_1) == MEM_read32(ip+1)))) {
+ matchLength = ZSTD_count(ip+1+4, ip+1+4-offset_1, iend) + 4;
+ if (depth==0) goto _storeSequence;
+ }
+
+ /* first search (depth 0) */
+ { size_t offsetFound = 999999999;
+ size_t const ml2 = ZSTD_searchMax(ms, ip, iend, &offsetFound, mls, rowLog, searchMethod, dictMode);
+ if (ml2 > matchLength)
+ matchLength = ml2, start = ip, offcode=offsetFound;
+ }
+
+ if (matchLength < 4) {
+ ip += ((ip-anchor) >> kSearchStrength) + 1; /* jump faster over incompressible sections */
+ continue;
+ }
+
+ /* let's try to find a better solution */
+ if (depth>=1)
+ while (ip<ilimit) {
+ DEBUGLOG(7, "search depth 1");
+ ip ++;
+ if ( (dictMode == ZSTD_noDict)
+ && (offcode) && ((offset_1>0) & (MEM_read32(ip) == MEM_read32(ip - offset_1)))) {
+ size_t const mlRep = ZSTD_count(ip+4, ip+4-offset_1, iend) + 4;
+ int const gain2 = (int)(mlRep * 3);
+ int const gain1 = (int)(matchLength*3 - ZSTD_highbit32((U32)STORED_TO_OFFBASE(offcode)) + 1);
+ if ((mlRep >= 4) && (gain2 > gain1))
+ matchLength = mlRep, offcode = STORE_REPCODE_1, start = ip;
+ }
+ if (isDxS) {
+ const U32 repIndex = (U32)(ip - base) - offset_1;
+ const BYTE* repMatch = repIndex < prefixLowestIndex ?
+ dictBase + (repIndex - dictIndexDelta) :
+ base + repIndex;
+ if (((U32)((prefixLowestIndex-1) - repIndex) >= 3 /* intentional underflow */)
+ && (MEM_read32(repMatch) == MEM_read32(ip)) ) {
+ const BYTE* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend;
+ size_t const mlRep = ZSTD_count_2segments(ip+4, repMatch+4, iend, repMatchEnd, prefixLowest) + 4;
+ int const gain2 = (int)(mlRep * 3);
+ int const gain1 = (int)(matchLength*3 - ZSTD_highbit32((U32)STORED_TO_OFFBASE(offcode)) + 1);
+ if ((mlRep >= 4) && (gain2 > gain1))
+ matchLength = mlRep, offcode = STORE_REPCODE_1, start = ip;
+ }
+ }
+ { size_t offset2=999999999;
+ size_t const ml2 = ZSTD_searchMax(ms, ip, iend, &offset2, mls, rowLog, searchMethod, dictMode);
+ int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)STORED_TO_OFFBASE(offset2))); /* raw approx */
+ int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)STORED_TO_OFFBASE(offcode)) + 4);
+ if ((ml2 >= 4) && (gain2 > gain1)) {
+ matchLength = ml2, offcode = offset2, start = ip;
+ continue; /* search a better one */
+ } }
+
+ /* let's find an even better one */
+ if ((depth==2) && (ip<ilimit)) {
+ DEBUGLOG(7, "search depth 2");
+ ip ++;
+ if ( (dictMode == ZSTD_noDict)
+ && (offcode) && ((offset_1>0) & (MEM_read32(ip) == MEM_read32(ip - offset_1)))) {
+ size_t const mlRep = ZSTD_count(ip+4, ip+4-offset_1, iend) + 4;
+ int const gain2 = (int)(mlRep * 4);
+ int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)STORED_TO_OFFBASE(offcode)) + 1);
+ if ((mlRep >= 4) && (gain2 > gain1))
+ matchLength = mlRep, offcode = STORE_REPCODE_1, start = ip;
+ }
+ if (isDxS) {
+ const U32 repIndex = (U32)(ip - base) - offset_1;
+ const BYTE* repMatch = repIndex < prefixLowestIndex ?
+ dictBase + (repIndex - dictIndexDelta) :
+ base + repIndex;
+ if (((U32)((prefixLowestIndex-1) - repIndex) >= 3 /* intentional underflow */)
+ && (MEM_read32(repMatch) == MEM_read32(ip)) ) {
+ const BYTE* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend;
+ size_t const mlRep = ZSTD_count_2segments(ip+4, repMatch+4, iend, repMatchEnd, prefixLowest) + 4;
+ int const gain2 = (int)(mlRep * 4);
+ int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)STORED_TO_OFFBASE(offcode)) + 1);
+ if ((mlRep >= 4) && (gain2 > gain1))
+ matchLength = mlRep, offcode = STORE_REPCODE_1, start = ip;
+ }
+ }
+ { size_t offset2=999999999;
+ size_t const ml2 = ZSTD_searchMax(ms, ip, iend, &offset2, mls, rowLog, searchMethod, dictMode);
+ int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)STORED_TO_OFFBASE(offset2))); /* raw approx */
+ int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)STORED_TO_OFFBASE(offcode)) + 7);
+ if ((ml2 >= 4) && (gain2 > gain1)) {
+ matchLength = ml2, offcode = offset2, start = ip;
+ continue;
+ } } }
+ break; /* nothing found : store previous solution */
+ }
+
+ /* NOTE:
+ * Pay attention that `start[-value]` can lead to strange undefined behavior
+ * notably if `value` is unsigned, resulting in a large positive `-value`.
+ */
+ /* catch up */
+ if (STORED_IS_OFFSET(offcode)) {
+ if (dictMode == ZSTD_noDict) {
+ while ( ((start > anchor) & (start - STORED_OFFSET(offcode) > prefixLowest))
+ && (start[-1] == (start-STORED_OFFSET(offcode))[-1]) ) /* only search for offset within prefix */
+ { start--; matchLength++; }
+ }
+ if (isDxS) {
+ U32 const matchIndex = (U32)((size_t)(start-base) - STORED_OFFSET(offcode));
+ const BYTE* match = (matchIndex < prefixLowestIndex) ? dictBase + matchIndex - dictIndexDelta : base + matchIndex;
+ const BYTE* const mStart = (matchIndex < prefixLowestIndex) ? dictLowest : prefixLowest;
+ while ((start>anchor) && (match>mStart) && (start[-1] == match[-1])) { start--; match--; matchLength++; } /* catch up */
+ }
+ offset_2 = offset_1; offset_1 = (U32)STORED_OFFSET(offcode);
+ }
+ /* store sequence */
+_storeSequence:
+ { size_t const litLength = (size_t)(start - anchor);
+ ZSTD_storeSeq(seqStore, litLength, anchor, iend, (U32)offcode, matchLength);
+ anchor = ip = start + matchLength;
+ }
+
+ /* check immediate repcode */
+ if (isDxS) {
+ while (ip <= ilimit) {
+ U32 const current2 = (U32)(ip-base);
+ U32 const repIndex = current2 - offset_2;
+ const BYTE* repMatch = repIndex < prefixLowestIndex ?
+ dictBase - dictIndexDelta + repIndex :
+ base + repIndex;
+ if ( ((U32)((prefixLowestIndex-1) - (U32)repIndex) >= 3 /* intentional overflow */)
+ && (MEM_read32(repMatch) == MEM_read32(ip)) ) {
+ const BYTE* const repEnd2 = repIndex < prefixLowestIndex ? dictEnd : iend;
+ matchLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd2, prefixLowest) + 4;
+ offcode = offset_2; offset_2 = offset_1; offset_1 = (U32)offcode; /* swap offset_2 <=> offset_1 */
+ ZSTD_storeSeq(seqStore, 0, anchor, iend, STORE_REPCODE_1, matchLength);
+ ip += matchLength;
+ anchor = ip;
+ continue;
+ }
+ break;
+ }
+ }
+
+ if (dictMode == ZSTD_noDict) {
+ while ( ((ip <= ilimit) & (offset_2>0))
+ && (MEM_read32(ip) == MEM_read32(ip - offset_2)) ) {
+ /* store sequence */
+ matchLength = ZSTD_count(ip+4, ip+4-offset_2, iend) + 4;
+ offcode = offset_2; offset_2 = offset_1; offset_1 = (U32)offcode; /* swap repcodes */
+ ZSTD_storeSeq(seqStore, 0, anchor, iend, STORE_REPCODE_1, matchLength);
+ ip += matchLength;
+ anchor = ip;
+ continue; /* faster when present ... (?) */
+ } } }
+
+ /* Save reps for next block */
+ rep[0] = offset_1 ? offset_1 : savedOffset;
+ rep[1] = offset_2 ? offset_2 : savedOffset;
+
+ /* Return the last literals size */
+ return (size_t)(iend - anchor);
+}
+
+
+size_t ZSTD_compressBlock_btlazy2(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+{
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_binaryTree, 2, ZSTD_noDict);
+}
+
+size_t ZSTD_compressBlock_lazy2(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+{
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 2, ZSTD_noDict);
+}
+
+size_t ZSTD_compressBlock_lazy(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+{
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 1, ZSTD_noDict);
+}
+
+size_t ZSTD_compressBlock_greedy(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+{
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 0, ZSTD_noDict);
+}
+
+size_t ZSTD_compressBlock_btlazy2_dictMatchState(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+{
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_binaryTree, 2, ZSTD_dictMatchState);
+}
+
+size_t ZSTD_compressBlock_lazy2_dictMatchState(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+{
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 2, ZSTD_dictMatchState);
+}
+
+size_t ZSTD_compressBlock_lazy_dictMatchState(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+{
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 1, ZSTD_dictMatchState);
+}
+
+size_t ZSTD_compressBlock_greedy_dictMatchState(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+{
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 0, ZSTD_dictMatchState);
+}
+
+
+size_t ZSTD_compressBlock_lazy2_dedicatedDictSearch(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+{
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 2, ZSTD_dedicatedDictSearch);
+}
+
+size_t ZSTD_compressBlock_lazy_dedicatedDictSearch(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+{
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 1, ZSTD_dedicatedDictSearch);
+}
+
+size_t ZSTD_compressBlock_greedy_dedicatedDictSearch(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+{
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 0, ZSTD_dedicatedDictSearch);
+}
+
+/* Row-based matchfinder */
+size_t ZSTD_compressBlock_lazy2_row(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+{
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 2, ZSTD_noDict);
+}
+
+size_t ZSTD_compressBlock_lazy_row(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+{
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 1, ZSTD_noDict);
+}
+
+size_t ZSTD_compressBlock_greedy_row(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+{
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 0, ZSTD_noDict);
+}
+
+size_t ZSTD_compressBlock_lazy2_dictMatchState_row(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+{
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 2, ZSTD_dictMatchState);
+}
+
+size_t ZSTD_compressBlock_lazy_dictMatchState_row(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+{
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 1, ZSTD_dictMatchState);
+}
+
+size_t ZSTD_compressBlock_greedy_dictMatchState_row(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+{
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 0, ZSTD_dictMatchState);
+}
+
+
+size_t ZSTD_compressBlock_lazy2_dedicatedDictSearch_row(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+{
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 2, ZSTD_dedicatedDictSearch);
+}
+
+size_t ZSTD_compressBlock_lazy_dedicatedDictSearch_row(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+{
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 1, ZSTD_dedicatedDictSearch);
+}
+
+size_t ZSTD_compressBlock_greedy_dedicatedDictSearch_row(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+{
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 0, ZSTD_dedicatedDictSearch);
+}
+
+FORCE_INLINE_TEMPLATE
+size_t ZSTD_compressBlock_lazy_extDict_generic(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore,
+ U32 rep[ZSTD_REP_NUM],
+ const void* src, size_t srcSize,
+ const searchMethod_e searchMethod, const U32 depth)
+{
+ const BYTE* const istart = (const BYTE*)src;
+ const BYTE* ip = istart;
+ const BYTE* anchor = istart;
+ const BYTE* const iend = istart + srcSize;
+ const BYTE* const ilimit = searchMethod == search_rowHash ? iend - 8 - ZSTD_ROW_HASH_CACHE_SIZE : iend - 8;
+ const BYTE* const base = ms->window.base;
+ const U32 dictLimit = ms->window.dictLimit;
+ const BYTE* const prefixStart = base + dictLimit;
+ const BYTE* const dictBase = ms->window.dictBase;
+ const BYTE* const dictEnd = dictBase + dictLimit;
+ const BYTE* const dictStart = dictBase + ms->window.lowLimit;
+ const U32 windowLog = ms->cParams.windowLog;
+ const U32 mls = BOUNDED(4, ms->cParams.minMatch, 6);
+ const U32 rowLog = BOUNDED(4, ms->cParams.searchLog, 6);
+
+ U32 offset_1 = rep[0], offset_2 = rep[1];
+
+ DEBUGLOG(5, "ZSTD_compressBlock_lazy_extDict_generic (searchFunc=%u)", (U32)searchMethod);
+
+ /* init */
+ ip += (ip == prefixStart);
+ if (searchMethod == search_rowHash) {
+ ZSTD_row_fillHashCache(ms, base, rowLog,
+ MIN(ms->cParams.minMatch, 6 /* mls caps out at 6 */),
+ ms->nextToUpdate, ilimit);
+ }
+
+ /* Match Loop */
+#if defined(__x86_64__)
+ /* I've measured random a 5% speed loss on levels 5 & 6 (greedy) when the
+ * code alignment is perturbed. To fix the instability align the loop on 32-bytes.
+ */
+ __asm__(".p2align 5");
+#endif
+ while (ip < ilimit) {
+ size_t matchLength=0;
+ size_t offcode=STORE_REPCODE_1;
+ const BYTE* start=ip+1;
+ U32 curr = (U32)(ip-base);
+
+ /* check repCode */
+ { const U32 windowLow = ZSTD_getLowestMatchIndex(ms, curr+1, windowLog);
+ const U32 repIndex = (U32)(curr+1 - offset_1);
+ const BYTE* const repBase = repIndex < dictLimit ? dictBase : base;
+ const BYTE* const repMatch = repBase + repIndex;
+ if ( ((U32)((dictLimit-1) - repIndex) >= 3) /* intentional overflow */
+ & (offset_1 <= curr+1 - windowLow) ) /* note: we are searching at curr+1 */
+ if (MEM_read32(ip+1) == MEM_read32(repMatch)) {
+ /* repcode detected we should take it */
+ const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend;
+ matchLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repEnd, prefixStart) + 4;
+ if (depth==0) goto _storeSequence;
+ } }
+
+ /* first search (depth 0) */
+ { size_t offsetFound = 999999999;
+ size_t const ml2 = ZSTD_searchMax(ms, ip, iend, &offsetFound, mls, rowLog, searchMethod, ZSTD_extDict);
+ if (ml2 > matchLength)
+ matchLength = ml2, start = ip, offcode=offsetFound;
+ }
+
+ if (matchLength < 4) {
+ ip += ((ip-anchor) >> kSearchStrength) + 1; /* jump faster over incompressible sections */
+ continue;
+ }
+
+ /* let's try to find a better solution */
+ if (depth>=1)
+ while (ip<ilimit) {
+ ip ++;
+ curr++;
+ /* check repCode */
+ if (offcode) {
+ const U32 windowLow = ZSTD_getLowestMatchIndex(ms, curr, windowLog);
+ const U32 repIndex = (U32)(curr - offset_1);
+ const BYTE* const repBase = repIndex < dictLimit ? dictBase : base;
+ const BYTE* const repMatch = repBase + repIndex;
+ if ( ((U32)((dictLimit-1) - repIndex) >= 3) /* intentional overflow : do not test positions overlapping 2 memory segments */
+ & (offset_1 <= curr - windowLow) ) /* equivalent to `curr > repIndex >= windowLow` */
+ if (MEM_read32(ip) == MEM_read32(repMatch)) {
+ /* repcode detected */
+ const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend;
+ size_t const repLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd, prefixStart) + 4;
+ int const gain2 = (int)(repLength * 3);
+ int const gain1 = (int)(matchLength*3 - ZSTD_highbit32((U32)STORED_TO_OFFBASE(offcode)) + 1);
+ if ((repLength >= 4) && (gain2 > gain1))
+ matchLength = repLength, offcode = STORE_REPCODE_1, start = ip;
+ } }
+
+ /* search match, depth 1 */
+ { size_t offset2=999999999;
+ size_t const ml2 = ZSTD_searchMax(ms, ip, iend, &offset2, mls, rowLog, searchMethod, ZSTD_extDict);
+ int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)STORED_TO_OFFBASE(offset2))); /* raw approx */
+ int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)STORED_TO_OFFBASE(offcode)) + 4);
+ if ((ml2 >= 4) && (gain2 > gain1)) {
+ matchLength = ml2, offcode = offset2, start = ip;
+ continue; /* search a better one */
+ } }
+
+ /* let's find an even better one */
+ if ((depth==2) && (ip<ilimit)) {
+ ip ++;
+ curr++;
+ /* check repCode */
+ if (offcode) {
+ const U32 windowLow = ZSTD_getLowestMatchIndex(ms, curr, windowLog);
+ const U32 repIndex = (U32)(curr - offset_1);
+ const BYTE* const repBase = repIndex < dictLimit ? dictBase : base;
+ const BYTE* const repMatch = repBase + repIndex;
+ if ( ((U32)((dictLimit-1) - repIndex) >= 3) /* intentional overflow : do not test positions overlapping 2 memory segments */
+ & (offset_1 <= curr - windowLow) ) /* equivalent to `curr > repIndex >= windowLow` */
+ if (MEM_read32(ip) == MEM_read32(repMatch)) {
+ /* repcode detected */
+ const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend;
+ size_t const repLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd, prefixStart) + 4;
+ int const gain2 = (int)(repLength * 4);
+ int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)STORED_TO_OFFBASE(offcode)) + 1);
+ if ((repLength >= 4) && (gain2 > gain1))
+ matchLength = repLength, offcode = STORE_REPCODE_1, start = ip;
+ } }
+
+ /* search match, depth 2 */
+ { size_t offset2=999999999;
+ size_t const ml2 = ZSTD_searchMax(ms, ip, iend, &offset2, mls, rowLog, searchMethod, ZSTD_extDict);
+ int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)STORED_TO_OFFBASE(offset2))); /* raw approx */
+ int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)STORED_TO_OFFBASE(offcode)) + 7);
+ if ((ml2 >= 4) && (gain2 > gain1)) {
+ matchLength = ml2, offcode = offset2, start = ip;
+ continue;
+ } } }
+ break; /* nothing found : store previous solution */
+ }
+
+ /* catch up */
+ if (STORED_IS_OFFSET(offcode)) {
+ U32 const matchIndex = (U32)((size_t)(start-base) - STORED_OFFSET(offcode));
+ const BYTE* match = (matchIndex < dictLimit) ? dictBase + matchIndex : base + matchIndex;
+ const BYTE* const mStart = (matchIndex < dictLimit) ? dictStart : prefixStart;
+ while ((start>anchor) && (match>mStart) && (start[-1] == match[-1])) { start--; match--; matchLength++; } /* catch up */
+ offset_2 = offset_1; offset_1 = (U32)STORED_OFFSET(offcode);
+ }
+
+ /* store sequence */
+_storeSequence:
+ { size_t const litLength = (size_t)(start - anchor);
+ ZSTD_storeSeq(seqStore, litLength, anchor, iend, (U32)offcode, matchLength);
+ anchor = ip = start + matchLength;
+ }
+
+ /* check immediate repcode */
+ while (ip <= ilimit) {
+ const U32 repCurrent = (U32)(ip-base);
+ const U32 windowLow = ZSTD_getLowestMatchIndex(ms, repCurrent, windowLog);
+ const U32 repIndex = repCurrent - offset_2;
+ const BYTE* const repBase = repIndex < dictLimit ? dictBase : base;
+ const BYTE* const repMatch = repBase + repIndex;
+ if ( ((U32)((dictLimit-1) - repIndex) >= 3) /* intentional overflow : do not test positions overlapping 2 memory segments */
+ & (offset_2 <= repCurrent - windowLow) ) /* equivalent to `curr > repIndex >= windowLow` */
+ if (MEM_read32(ip) == MEM_read32(repMatch)) {
+ /* repcode detected we should take it */
+ const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend;
+ matchLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd, prefixStart) + 4;
+ offcode = offset_2; offset_2 = offset_1; offset_1 = (U32)offcode; /* swap offset history */
+ ZSTD_storeSeq(seqStore, 0, anchor, iend, STORE_REPCODE_1, matchLength);
+ ip += matchLength;
+ anchor = ip;
+ continue; /* faster when present ... (?) */
+ }
+ break;
+ } }
+
+ /* Save reps for next block */
+ rep[0] = offset_1;
+ rep[1] = offset_2;
+
+ /* Return the last literals size */
+ return (size_t)(iend - anchor);
+}
+
+
+size_t ZSTD_compressBlock_greedy_extDict(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+{
+ return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 0);
+}
+
+size_t ZSTD_compressBlock_lazy_extDict(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+
+{
+ return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 1);
+}
+
+size_t ZSTD_compressBlock_lazy2_extDict(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+
+{
+ return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 2);
+}
+
+size_t ZSTD_compressBlock_btlazy2_extDict(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+
+{
+ return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_binaryTree, 2);
+}
+
+size_t ZSTD_compressBlock_greedy_extDict_row(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+{
+ return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 0);
+}
+
+size_t ZSTD_compressBlock_lazy_extDict_row(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+
+{
+ return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 1);
+}
+
+size_t ZSTD_compressBlock_lazy2_extDict_row(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+
+{
+ return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 2);
+}
diff --git a/lib/zstd/compress/zstd_lazy.h b/lib/zstd/compress/zstd_lazy.h
new file mode 100644
index 000000000000..e5bdf4df8dde
--- /dev/null
+++ b/lib/zstd/compress/zstd_lazy.h
@@ -0,0 +1,119 @@
+/*
+ * Copyright (c) Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#ifndef ZSTD_LAZY_H
+#define ZSTD_LAZY_H
+
+
+#include "zstd_compress_internal.h"
+
+/*
+ * Dedicated Dictionary Search Structure bucket log. In the
+ * ZSTD_dedicatedDictSearch mode, the hashTable has
+ * 2 ** ZSTD_LAZY_DDSS_BUCKET_LOG entries in each bucket, rather than just
+ * one.
+ */
+#define ZSTD_LAZY_DDSS_BUCKET_LOG 2
+
+U32 ZSTD_insertAndFindFirstIndex(ZSTD_matchState_t* ms, const BYTE* ip);
+void ZSTD_row_update(ZSTD_matchState_t* const ms, const BYTE* ip);
+
+void ZSTD_dedicatedDictSearch_lazy_loadDictionary(ZSTD_matchState_t* ms, const BYTE* const ip);
+
+void ZSTD_preserveUnsortedMark (U32* const table, U32 const size, U32 const reducerValue); /*! used in ZSTD_reduceIndex(). preemptively increase value of ZSTD_DUBT_UNSORTED_MARK */
+
+size_t ZSTD_compressBlock_btlazy2(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+size_t ZSTD_compressBlock_lazy2(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+size_t ZSTD_compressBlock_lazy(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+size_t ZSTD_compressBlock_greedy(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+size_t ZSTD_compressBlock_lazy2_row(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+size_t ZSTD_compressBlock_lazy_row(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+size_t ZSTD_compressBlock_greedy_row(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+
+size_t ZSTD_compressBlock_btlazy2_dictMatchState(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+size_t ZSTD_compressBlock_lazy2_dictMatchState(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+size_t ZSTD_compressBlock_lazy_dictMatchState(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+size_t ZSTD_compressBlock_greedy_dictMatchState(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+size_t ZSTD_compressBlock_lazy2_dictMatchState_row(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+size_t ZSTD_compressBlock_lazy_dictMatchState_row(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+size_t ZSTD_compressBlock_greedy_dictMatchState_row(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+
+size_t ZSTD_compressBlock_lazy2_dedicatedDictSearch(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+size_t ZSTD_compressBlock_lazy_dedicatedDictSearch(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+size_t ZSTD_compressBlock_greedy_dedicatedDictSearch(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+size_t ZSTD_compressBlock_lazy2_dedicatedDictSearch_row(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+size_t ZSTD_compressBlock_lazy_dedicatedDictSearch_row(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+size_t ZSTD_compressBlock_greedy_dedicatedDictSearch_row(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+
+size_t ZSTD_compressBlock_greedy_extDict(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+size_t ZSTD_compressBlock_lazy_extDict(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+size_t ZSTD_compressBlock_lazy2_extDict(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+size_t ZSTD_compressBlock_greedy_extDict_row(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+size_t ZSTD_compressBlock_lazy_extDict_row(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+size_t ZSTD_compressBlock_lazy2_extDict_row(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+size_t ZSTD_compressBlock_btlazy2_extDict(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+
+
+
+#endif /* ZSTD_LAZY_H */
diff --git a/lib/zstd/compress/zstd_ldm.c b/lib/zstd/compress/zstd_ldm.c
new file mode 100644
index 000000000000..dd86fc83e7dd
--- /dev/null
+++ b/lib/zstd/compress/zstd_ldm.c
@@ -0,0 +1,724 @@
+/*
+ * Copyright (c) Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#include "zstd_ldm.h"
+
+#include "../common/debug.h"
+#include <linux/xxhash.h>
+#include "zstd_fast.h" /* ZSTD_fillHashTable() */
+#include "zstd_double_fast.h" /* ZSTD_fillDoubleHashTable() */
+#include "zstd_ldm_geartab.h"
+
+#define LDM_BUCKET_SIZE_LOG 3
+#define LDM_MIN_MATCH_LENGTH 64
+#define LDM_HASH_RLOG 7
+
+typedef struct {
+ U64 rolling;
+ U64 stopMask;
+} ldmRollingHashState_t;
+
+/* ZSTD_ldm_gear_init():
+ *
+ * Initializes the rolling hash state such that it will honor the
+ * settings in params. */
+static void ZSTD_ldm_gear_init(ldmRollingHashState_t* state, ldmParams_t const* params)
+{
+ unsigned maxBitsInMask = MIN(params->minMatchLength, 64);
+ unsigned hashRateLog = params->hashRateLog;
+
+ state->rolling = ~(U32)0;
+
+ /* The choice of the splitting criterion is subject to two conditions:
+ * 1. it has to trigger on average every 2^(hashRateLog) bytes;
+ * 2. ideally, it has to depend on a window of minMatchLength bytes.
+ *
+ * In the gear hash algorithm, bit n depends on the last n bytes;
+ * so in order to obtain a good quality splitting criterion it is
+ * preferable to use bits with high weight.
+ *
+ * To match condition 1 we use a mask with hashRateLog bits set
+ * and, because of the previous remark, we make sure these bits
+ * have the highest possible weight while still respecting
+ * condition 2.
+ */
+ if (hashRateLog > 0 && hashRateLog <= maxBitsInMask) {
+ state->stopMask = (((U64)1 << hashRateLog) - 1) << (maxBitsInMask - hashRateLog);
+ } else {
+ /* In this degenerate case we simply honor the hash rate. */
+ state->stopMask = ((U64)1 << hashRateLog) - 1;
+ }
+}
+
+/* ZSTD_ldm_gear_reset()
+ * Feeds [data, data + minMatchLength) into the hash without registering any
+ * splits. This effectively resets the hash state. This is used when skipping
+ * over data, either at the beginning of a block, or skipping sections.
+ */
+static void ZSTD_ldm_gear_reset(ldmRollingHashState_t* state,
+ BYTE const* data, size_t minMatchLength)
+{
+ U64 hash = state->rolling;
+ size_t n = 0;
+
+#define GEAR_ITER_ONCE() do { \
+ hash = (hash << 1) + ZSTD_ldm_gearTab[data[n] & 0xff]; \
+ n += 1; \
+ } while (0)
+ while (n + 3 < minMatchLength) {
+ GEAR_ITER_ONCE();
+ GEAR_ITER_ONCE();
+ GEAR_ITER_ONCE();
+ GEAR_ITER_ONCE();
+ }
+ while (n < minMatchLength) {
+ GEAR_ITER_ONCE();
+ }
+#undef GEAR_ITER_ONCE
+}
+
+/* ZSTD_ldm_gear_feed():
+ *
+ * Registers in the splits array all the split points found in the first
+ * size bytes following the data pointer. This function terminates when
+ * either all the data has been processed or LDM_BATCH_SIZE splits are
+ * present in the splits array.
+ *
+ * Precondition: The splits array must not be full.
+ * Returns: The number of bytes processed. */
+static size_t ZSTD_ldm_gear_feed(ldmRollingHashState_t* state,
+ BYTE const* data, size_t size,
+ size_t* splits, unsigned* numSplits)
+{
+ size_t n;
+ U64 hash, mask;
+
+ hash = state->rolling;
+ mask = state->stopMask;
+ n = 0;
+
+#define GEAR_ITER_ONCE() do { \
+ hash = (hash << 1) + ZSTD_ldm_gearTab[data[n] & 0xff]; \
+ n += 1; \
+ if (UNLIKELY((hash & mask) == 0)) { \
+ splits[*numSplits] = n; \
+ *numSplits += 1; \
+ if (*numSplits == LDM_BATCH_SIZE) \
+ goto done; \
+ } \
+ } while (0)
+
+ while (n + 3 < size) {
+ GEAR_ITER_ONCE();
+ GEAR_ITER_ONCE();
+ GEAR_ITER_ONCE();
+ GEAR_ITER_ONCE();
+ }
+ while (n < size) {
+ GEAR_ITER_ONCE();
+ }
+
+#undef GEAR_ITER_ONCE
+
+done:
+ state->rolling = hash;
+ return n;
+}
+
+void ZSTD_ldm_adjustParameters(ldmParams_t* params,
+ ZSTD_compressionParameters const* cParams)
+{
+ params->windowLog = cParams->windowLog;
+ ZSTD_STATIC_ASSERT(LDM_BUCKET_SIZE_LOG <= ZSTD_LDM_BUCKETSIZELOG_MAX);
+ DEBUGLOG(4, "ZSTD_ldm_adjustParameters");
+ if (!params->bucketSizeLog) params->bucketSizeLog = LDM_BUCKET_SIZE_LOG;
+ if (!params->minMatchLength) params->minMatchLength = LDM_MIN_MATCH_LENGTH;
+ if (params->hashLog == 0) {
+ params->hashLog = MAX(ZSTD_HASHLOG_MIN, params->windowLog - LDM_HASH_RLOG);
+ assert(params->hashLog <= ZSTD_HASHLOG_MAX);
+ }
+ if (params->hashRateLog == 0) {
+ params->hashRateLog = params->windowLog < params->hashLog
+ ? 0
+ : params->windowLog - params->hashLog;
+ }
+ params->bucketSizeLog = MIN(params->bucketSizeLog, params->hashLog);
+}
+
+size_t ZSTD_ldm_getTableSize(ldmParams_t params)
+{
+ size_t const ldmHSize = ((size_t)1) << params.hashLog;
+ size_t const ldmBucketSizeLog = MIN(params.bucketSizeLog, params.hashLog);
+ size_t const ldmBucketSize = ((size_t)1) << (params.hashLog - ldmBucketSizeLog);
+ size_t const totalSize = ZSTD_cwksp_alloc_size(ldmBucketSize)
+ + ZSTD_cwksp_alloc_size(ldmHSize * sizeof(ldmEntry_t));
+ return params.enableLdm == ZSTD_ps_enable ? totalSize : 0;
+}
+
+size_t ZSTD_ldm_getMaxNbSeq(ldmParams_t params, size_t maxChunkSize)
+{
+ return params.enableLdm == ZSTD_ps_enable ? (maxChunkSize / params.minMatchLength) : 0;
+}
+
+/* ZSTD_ldm_getBucket() :
+ * Returns a pointer to the start of the bucket associated with hash. */
+static ldmEntry_t* ZSTD_ldm_getBucket(
+ ldmState_t* ldmState, size_t hash, ldmParams_t const ldmParams)
+{
+ return ldmState->hashTable + (hash << ldmParams.bucketSizeLog);
+}
+
+/* ZSTD_ldm_insertEntry() :
+ * Insert the entry with corresponding hash into the hash table */
+static void ZSTD_ldm_insertEntry(ldmState_t* ldmState,
+ size_t const hash, const ldmEntry_t entry,
+ ldmParams_t const ldmParams)
+{
+ BYTE* const pOffset = ldmState->bucketOffsets + hash;
+ unsigned const offset = *pOffset;
+
+ *(ZSTD_ldm_getBucket(ldmState, hash, ldmParams) + offset) = entry;
+ *pOffset = (BYTE)((offset + 1) & ((1u << ldmParams.bucketSizeLog) - 1));
+
+}
+
+/* ZSTD_ldm_countBackwardsMatch() :
+ * Returns the number of bytes that match backwards before pIn and pMatch.
+ *
+ * We count only bytes where pMatch >= pBase and pIn >= pAnchor. */
+static size_t ZSTD_ldm_countBackwardsMatch(
+ const BYTE* pIn, const BYTE* pAnchor,
+ const BYTE* pMatch, const BYTE* pMatchBase)
+{
+ size_t matchLength = 0;
+ while (pIn > pAnchor && pMatch > pMatchBase && pIn[-1] == pMatch[-1]) {
+ pIn--;
+ pMatch--;
+ matchLength++;
+ }
+ return matchLength;
+}
+
+/* ZSTD_ldm_countBackwardsMatch_2segments() :
+ * Returns the number of bytes that match backwards from pMatch,
+ * even with the backwards match spanning 2 different segments.
+ *
+ * On reaching `pMatchBase`, start counting from mEnd */
+static size_t ZSTD_ldm_countBackwardsMatch_2segments(
+ const BYTE* pIn, const BYTE* pAnchor,
+ const BYTE* pMatch, const BYTE* pMatchBase,
+ const BYTE* pExtDictStart, const BYTE* pExtDictEnd)
+{
+ size_t matchLength = ZSTD_ldm_countBackwardsMatch(pIn, pAnchor, pMatch, pMatchBase);
+ if (pMatch - matchLength != pMatchBase || pMatchBase == pExtDictStart) {
+ /* If backwards match is entirely in the extDict or prefix, immediately return */
+ return matchLength;
+ }
+ DEBUGLOG(7, "ZSTD_ldm_countBackwardsMatch_2segments: found 2-parts backwards match (length in prefix==%zu)", matchLength);
+ matchLength += ZSTD_ldm_countBackwardsMatch(pIn - matchLength, pAnchor, pExtDictEnd, pExtDictStart);
+ DEBUGLOG(7, "final backwards match length = %zu", matchLength);
+ return matchLength;
+}
+
+/* ZSTD_ldm_fillFastTables() :
+ *
+ * Fills the relevant tables for the ZSTD_fast and ZSTD_dfast strategies.
+ * This is similar to ZSTD_loadDictionaryContent.
+ *
+ * The tables for the other strategies are filled within their
+ * block compressors. */
+static size_t ZSTD_ldm_fillFastTables(ZSTD_matchState_t* ms,
+ void const* end)
+{
+ const BYTE* const iend = (const BYTE*)end;
+
+ switch(ms->cParams.strategy)
+ {
+ case ZSTD_fast:
+ ZSTD_fillHashTable(ms, iend, ZSTD_dtlm_fast);
+ break;
+
+ case ZSTD_dfast:
+ ZSTD_fillDoubleHashTable(ms, iend, ZSTD_dtlm_fast);
+ break;
+
+ case ZSTD_greedy:
+ case ZSTD_lazy:
+ case ZSTD_lazy2:
+ case ZSTD_btlazy2:
+ case ZSTD_btopt:
+ case ZSTD_btultra:
+ case ZSTD_btultra2:
+ break;
+ default:
+ assert(0); /* not possible : not a valid strategy id */
+ }
+
+ return 0;
+}
+
+void ZSTD_ldm_fillHashTable(
+ ldmState_t* ldmState, const BYTE* ip,
+ const BYTE* iend, ldmParams_t const* params)
+{
+ U32 const minMatchLength = params->minMatchLength;
+ U32 const hBits = params->hashLog - params->bucketSizeLog;
+ BYTE const* const base = ldmState->window.base;
+ BYTE const* const istart = ip;
+ ldmRollingHashState_t hashState;
+ size_t* const splits = ldmState->splitIndices;
+ unsigned numSplits;
+
+ DEBUGLOG(5, "ZSTD_ldm_fillHashTable");
+
+ ZSTD_ldm_gear_init(&hashState, params);
+ while (ip < iend) {
+ size_t hashed;
+ unsigned n;
+
+ numSplits = 0;
+ hashed = ZSTD_ldm_gear_feed(&hashState, ip, iend - ip, splits, &numSplits);
+
+ for (n = 0; n < numSplits; n++) {
+ if (ip + splits[n] >= istart + minMatchLength) {
+ BYTE const* const split = ip + splits[n] - minMatchLength;
+ U64 const xxhash = xxh64(split, minMatchLength, 0);
+ U32 const hash = (U32)(xxhash & (((U32)1 << hBits) - 1));
+ ldmEntry_t entry;
+
+ entry.offset = (U32)(split - base);
+ entry.checksum = (U32)(xxhash >> 32);
+ ZSTD_ldm_insertEntry(ldmState, hash, entry, *params);
+ }
+ }
+
+ ip += hashed;
+ }
+}
+
+
+/* ZSTD_ldm_limitTableUpdate() :
+ *
+ * Sets cctx->nextToUpdate to a position corresponding closer to anchor
+ * if it is far way
+ * (after a long match, only update tables a limited amount). */
+static void ZSTD_ldm_limitTableUpdate(ZSTD_matchState_t* ms, const BYTE* anchor)
+{
+ U32 const curr = (U32)(anchor - ms->window.base);
+ if (curr > ms->nextToUpdate + 1024) {
+ ms->nextToUpdate =
+ curr - MIN(512, curr - ms->nextToUpdate - 1024);
+ }
+}
+
+static size_t ZSTD_ldm_generateSequences_internal(
+ ldmState_t* ldmState, rawSeqStore_t* rawSeqStore,
+ ldmParams_t const* params, void const* src, size_t srcSize)
+{
+ /* LDM parameters */
+ int const extDict = ZSTD_window_hasExtDict(ldmState->window);
+ U32 const minMatchLength = params->minMatchLength;
+ U32 const entsPerBucket = 1U << params->bucketSizeLog;
+ U32 const hBits = params->hashLog - params->bucketSizeLog;
+ /* Prefix and extDict parameters */
+ U32 const dictLimit = ldmState->window.dictLimit;
+ U32 const lowestIndex = extDict ? ldmState->window.lowLimit : dictLimit;
+ BYTE const* const base = ldmState->window.base;
+ BYTE const* const dictBase = extDict ? ldmState->window.dictBase : NULL;
+ BYTE const* const dictStart = extDict ? dictBase + lowestIndex : NULL;
+ BYTE const* const dictEnd = extDict ? dictBase + dictLimit : NULL;
+ BYTE const* const lowPrefixPtr = base + dictLimit;
+ /* Input bounds */
+ BYTE const* const istart = (BYTE const*)src;
+ BYTE const* const iend = istart + srcSize;
+ BYTE const* const ilimit = iend - HASH_READ_SIZE;
+ /* Input positions */
+ BYTE const* anchor = istart;
+ BYTE const* ip = istart;
+ /* Rolling hash state */
+ ldmRollingHashState_t hashState;
+ /* Arrays for staged-processing */
+ size_t* const splits = ldmState->splitIndices;
+ ldmMatchCandidate_t* const candidates = ldmState->matchCandidates;
+ unsigned numSplits;
+
+ if (srcSize < minMatchLength)
+ return iend - anchor;
+
+ /* Initialize the rolling hash state with the first minMatchLength bytes */
+ ZSTD_ldm_gear_init(&hashState, params);
+ ZSTD_ldm_gear_reset(&hashState, ip, minMatchLength);
+ ip += minMatchLength;
+
+ while (ip < ilimit) {
+ size_t hashed;
+ unsigned n;
+
+ numSplits = 0;
+ hashed = ZSTD_ldm_gear_feed(&hashState, ip, ilimit - ip,
+ splits, &numSplits);
+
+ for (n = 0; n < numSplits; n++) {
+ BYTE const* const split = ip + splits[n] - minMatchLength;
+ U64 const xxhash = xxh64(split, minMatchLength, 0);
+ U32 const hash = (U32)(xxhash & (((U32)1 << hBits) - 1));
+
+ candidates[n].split = split;
+ candidates[n].hash = hash;
+ candidates[n].checksum = (U32)(xxhash >> 32);
+ candidates[n].bucket = ZSTD_ldm_getBucket(ldmState, hash, *params);
+ PREFETCH_L1(candidates[n].bucket);
+ }
+
+ for (n = 0; n < numSplits; n++) {
+ size_t forwardMatchLength = 0, backwardMatchLength = 0,
+ bestMatchLength = 0, mLength;
+ U32 offset;
+ BYTE const* const split = candidates[n].split;
+ U32 const checksum = candidates[n].checksum;
+ U32 const hash = candidates[n].hash;
+ ldmEntry_t* const bucket = candidates[n].bucket;
+ ldmEntry_t const* cur;
+ ldmEntry_t const* bestEntry = NULL;
+ ldmEntry_t newEntry;
+
+ newEntry.offset = (U32)(split - base);
+ newEntry.checksum = checksum;
+
+ /* If a split point would generate a sequence overlapping with
+ * the previous one, we merely register it in the hash table and
+ * move on */
+ if (split < anchor) {
+ ZSTD_ldm_insertEntry(ldmState, hash, newEntry, *params);
+ continue;
+ }
+
+ for (cur = bucket; cur < bucket + entsPerBucket; cur++) {
+ size_t curForwardMatchLength, curBackwardMatchLength,
+ curTotalMatchLength;
+ if (cur->checksum != checksum || cur->offset <= lowestIndex) {
+ continue;
+ }
+ if (extDict) {
+ BYTE const* const curMatchBase =
+ cur->offset < dictLimit ? dictBase : base;
+ BYTE const* const pMatch = curMatchBase + cur->offset;
+ BYTE const* const matchEnd =
+ cur->offset < dictLimit ? dictEnd : iend;
+ BYTE const* const lowMatchPtr =
+ cur->offset < dictLimit ? dictStart : lowPrefixPtr;
+ curForwardMatchLength =
+ ZSTD_count_2segments(split, pMatch, iend, matchEnd, lowPrefixPtr);
+ if (curForwardMatchLength < minMatchLength) {
+ continue;
+ }
+ curBackwardMatchLength = ZSTD_ldm_countBackwardsMatch_2segments(
+ split, anchor, pMatch, lowMatchPtr, dictStart, dictEnd);
+ } else { /* !extDict */
+ BYTE const* const pMatch = base + cur->offset;
+ curForwardMatchLength = ZSTD_count(split, pMatch, iend);
+ if (curForwardMatchLength < minMatchLength) {
+ continue;
+ }
+ curBackwardMatchLength =
+ ZSTD_ldm_countBackwardsMatch(split, anchor, pMatch, lowPrefixPtr);
+ }
+ curTotalMatchLength = curForwardMatchLength + curBackwardMatchLength;
+
+ if (curTotalMatchLength > bestMatchLength) {
+ bestMatchLength = curTotalMatchLength;
+ forwardMatchLength = curForwardMatchLength;
+ backwardMatchLength = curBackwardMatchLength;
+ bestEntry = cur;
+ }
+ }
+
+ /* No match found -- insert an entry into the hash table
+ * and process the next candidate match */
+ if (bestEntry == NULL) {
+ ZSTD_ldm_insertEntry(ldmState, hash, newEntry, *params);
+ continue;
+ }
+
+ /* Match found */
+ offset = (U32)(split - base) - bestEntry->offset;
+ mLength = forwardMatchLength + backwardMatchLength;
+ {
+ rawSeq* const seq = rawSeqStore->seq + rawSeqStore->size;
+
+ /* Out of sequence storage */
+ if (rawSeqStore->size == rawSeqStore->capacity)
+ return ERROR(dstSize_tooSmall);
+ seq->litLength = (U32)(split - backwardMatchLength - anchor);
+ seq->matchLength = (U32)mLength;
+ seq->offset = offset;
+ rawSeqStore->size++;
+ }
+
+ /* Insert the current entry into the hash table --- it must be
+ * done after the previous block to avoid clobbering bestEntry */
+ ZSTD_ldm_insertEntry(ldmState, hash, newEntry, *params);
+
+ anchor = split + forwardMatchLength;
+
+ /* If we find a match that ends after the data that we've hashed
+ * then we have a repeating, overlapping, pattern. E.g. all zeros.
+ * If one repetition of the pattern matches our `stopMask` then all
+ * repetitions will. We don't need to insert them all into out table,
+ * only the first one. So skip over overlapping matches.
+ * This is a major speed boost (20x) for compressing a single byte
+ * repeated, when that byte ends up in the table.
+ */
+ if (anchor > ip + hashed) {
+ ZSTD_ldm_gear_reset(&hashState, anchor - minMatchLength, minMatchLength);
+ /* Continue the outer loop at anchor (ip + hashed == anchor). */
+ ip = anchor - hashed;
+ break;
+ }
+ }
+
+ ip += hashed;
+ }
+
+ return iend - anchor;
+}
+
+/*! ZSTD_ldm_reduceTable() :
+ * reduce table indexes by `reducerValue` */
+static void ZSTD_ldm_reduceTable(ldmEntry_t* const table, U32 const size,
+ U32 const reducerValue)
+{
+ U32 u;
+ for (u = 0; u < size; u++) {
+ if (table[u].offset < reducerValue) table[u].offset = 0;
+ else table[u].offset -= reducerValue;
+ }
+}
+
+size_t ZSTD_ldm_generateSequences(
+ ldmState_t* ldmState, rawSeqStore_t* sequences,
+ ldmParams_t const* params, void const* src, size_t srcSize)
+{
+ U32 const maxDist = 1U << params->windowLog;
+ BYTE const* const istart = (BYTE const*)src;
+ BYTE const* const iend = istart + srcSize;
+ size_t const kMaxChunkSize = 1 << 20;
+ size_t const nbChunks = (srcSize / kMaxChunkSize) + ((srcSize % kMaxChunkSize) != 0);
+ size_t chunk;
+ size_t leftoverSize = 0;
+
+ assert(ZSTD_CHUNKSIZE_MAX >= kMaxChunkSize);
+ /* Check that ZSTD_window_update() has been called for this chunk prior
+ * to passing it to this function.
+ */
+ assert(ldmState->window.nextSrc >= (BYTE const*)src + srcSize);
+ /* The input could be very large (in zstdmt), so it must be broken up into
+ * chunks to enforce the maximum distance and handle overflow correction.
+ */
+ assert(sequences->pos <= sequences->size);
+ assert(sequences->size <= sequences->capacity);
+ for (chunk = 0; chunk < nbChunks && sequences->size < sequences->capacity; ++chunk) {
+ BYTE const* const chunkStart = istart + chunk * kMaxChunkSize;
+ size_t const remaining = (size_t)(iend - chunkStart);
+ BYTE const *const chunkEnd =
+ (remaining < kMaxChunkSize) ? iend : chunkStart + kMaxChunkSize;
+ size_t const chunkSize = chunkEnd - chunkStart;
+ size_t newLeftoverSize;
+ size_t const prevSize = sequences->size;
+
+ assert(chunkStart < iend);
+ /* 1. Perform overflow correction if necessary. */
+ if (ZSTD_window_needOverflowCorrection(ldmState->window, 0, maxDist, ldmState->loadedDictEnd, chunkStart, chunkEnd)) {
+ U32 const ldmHSize = 1U << params->hashLog;
+ U32 const correction = ZSTD_window_correctOverflow(
+ &ldmState->window, /* cycleLog */ 0, maxDist, chunkStart);
+ ZSTD_ldm_reduceTable(ldmState->hashTable, ldmHSize, correction);
+ /* invalidate dictionaries on overflow correction */
+ ldmState->loadedDictEnd = 0;
+ }
+ /* 2. We enforce the maximum offset allowed.
+ *
+ * kMaxChunkSize should be small enough that we don't lose too much of
+ * the window through early invalidation.
+ * TODO: * Test the chunk size.
+ * * Try invalidation after the sequence generation and test the
+ * the offset against maxDist directly.
+ *
+ * NOTE: Because of dictionaries + sequence splitting we MUST make sure
+ * that any offset used is valid at the END of the sequence, since it may
+ * be split into two sequences. This condition holds when using
+ * ZSTD_window_enforceMaxDist(), but if we move to checking offsets
+ * against maxDist directly, we'll have to carefully handle that case.
+ */
+ ZSTD_window_enforceMaxDist(&ldmState->window, chunkEnd, maxDist, &ldmState->loadedDictEnd, NULL);
+ /* 3. Generate the sequences for the chunk, and get newLeftoverSize. */
+ newLeftoverSize = ZSTD_ldm_generateSequences_internal(
+ ldmState, sequences, params, chunkStart, chunkSize);
+ if (ZSTD_isError(newLeftoverSize))
+ return newLeftoverSize;
+ /* 4. We add the leftover literals from previous iterations to the first
+ * newly generated sequence, or add the `newLeftoverSize` if none are
+ * generated.
+ */
+ /* Prepend the leftover literals from the last call */
+ if (prevSize < sequences->size) {
+ sequences->seq[prevSize].litLength += (U32)leftoverSize;
+ leftoverSize = newLeftoverSize;
+ } else {
+ assert(newLeftoverSize == chunkSize);
+ leftoverSize += chunkSize;
+ }
+ }
+ return 0;
+}
+
+void
+ZSTD_ldm_skipSequences(rawSeqStore_t* rawSeqStore, size_t srcSize, U32 const minMatch)
+{
+ while (srcSize > 0 && rawSeqStore->pos < rawSeqStore->size) {
+ rawSeq* seq = rawSeqStore->seq + rawSeqStore->pos;
+ if (srcSize <= seq->litLength) {
+ /* Skip past srcSize literals */
+ seq->litLength -= (U32)srcSize;
+ return;
+ }
+ srcSize -= seq->litLength;
+ seq->litLength = 0;
+ if (srcSize < seq->matchLength) {
+ /* Skip past the first srcSize of the match */
+ seq->matchLength -= (U32)srcSize;
+ if (seq->matchLength < minMatch) {
+ /* The match is too short, omit it */
+ if (rawSeqStore->pos + 1 < rawSeqStore->size) {
+ seq[1].litLength += seq[0].matchLength;
+ }
+ rawSeqStore->pos++;
+ }
+ return;
+ }
+ srcSize -= seq->matchLength;
+ seq->matchLength = 0;
+ rawSeqStore->pos++;
+ }
+}
+
+/*
+ * If the sequence length is longer than remaining then the sequence is split
+ * between this block and the next.
+ *
+ * Returns the current sequence to handle, or if the rest of the block should
+ * be literals, it returns a sequence with offset == 0.
+ */
+static rawSeq maybeSplitSequence(rawSeqStore_t* rawSeqStore,
+ U32 const remaining, U32 const minMatch)
+{
+ rawSeq sequence = rawSeqStore->seq[rawSeqStore->pos];
+ assert(sequence.offset > 0);
+ /* Likely: No partial sequence */
+ if (remaining >= sequence.litLength + sequence.matchLength) {
+ rawSeqStore->pos++;
+ return sequence;
+ }
+ /* Cut the sequence short (offset == 0 ==> rest is literals). */
+ if (remaining <= sequence.litLength) {
+ sequence.offset = 0;
+ } else if (remaining < sequence.litLength + sequence.matchLength) {
+ sequence.matchLength = remaining - sequence.litLength;
+ if (sequence.matchLength < minMatch) {
+ sequence.offset = 0;
+ }
+ }
+ /* Skip past `remaining` bytes for the future sequences. */
+ ZSTD_ldm_skipSequences(rawSeqStore, remaining, minMatch);
+ return sequence;
+}
+
+void ZSTD_ldm_skipRawSeqStoreBytes(rawSeqStore_t* rawSeqStore, size_t nbBytes) {
+ U32 currPos = (U32)(rawSeqStore->posInSequence + nbBytes);
+ while (currPos && rawSeqStore->pos < rawSeqStore->size) {
+ rawSeq currSeq = rawSeqStore->seq[rawSeqStore->pos];
+ if (currPos >= currSeq.litLength + currSeq.matchLength) {
+ currPos -= currSeq.litLength + currSeq.matchLength;
+ rawSeqStore->pos++;
+ } else {
+ rawSeqStore->posInSequence = currPos;
+ break;
+ }
+ }
+ if (currPos == 0 || rawSeqStore->pos == rawSeqStore->size) {
+ rawSeqStore->posInSequence = 0;
+ }
+}
+
+size_t ZSTD_ldm_blockCompress(rawSeqStore_t* rawSeqStore,
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_paramSwitch_e useRowMatchFinder,
+ void const* src, size_t srcSize)
+{
+ const ZSTD_compressionParameters* const cParams = &ms->cParams;
+ unsigned const minMatch = cParams->minMatch;
+ ZSTD_blockCompressor const blockCompressor =
+ ZSTD_selectBlockCompressor(cParams->strategy, useRowMatchFinder, ZSTD_matchState_dictMode(ms));
+ /* Input bounds */
+ BYTE const* const istart = (BYTE const*)src;
+ BYTE const* const iend = istart + srcSize;
+ /* Input positions */
+ BYTE const* ip = istart;
+
+ DEBUGLOG(5, "ZSTD_ldm_blockCompress: srcSize=%zu", srcSize);
+ /* If using opt parser, use LDMs only as candidates rather than always accepting them */
+ if (cParams->strategy >= ZSTD_btopt) {
+ size_t lastLLSize;
+ ms->ldmSeqStore = rawSeqStore;
+ lastLLSize = blockCompressor(ms, seqStore, rep, src, srcSize);
+ ZSTD_ldm_skipRawSeqStoreBytes(rawSeqStore, srcSize);
+ return lastLLSize;
+ }
+
+ assert(rawSeqStore->pos <= rawSeqStore->size);
+ assert(rawSeqStore->size <= rawSeqStore->capacity);
+ /* Loop through each sequence and apply the block compressor to the literals */
+ while (rawSeqStore->pos < rawSeqStore->size && ip < iend) {
+ /* maybeSplitSequence updates rawSeqStore->pos */
+ rawSeq const sequence = maybeSplitSequence(rawSeqStore,
+ (U32)(iend - ip), minMatch);
+ int i;
+ /* End signal */
+ if (sequence.offset == 0)
+ break;
+
+ assert(ip + sequence.litLength + sequence.matchLength <= iend);
+
+ /* Fill tables for block compressor */
+ ZSTD_ldm_limitTableUpdate(ms, ip);
+ ZSTD_ldm_fillFastTables(ms, ip);
+ /* Run the block compressor */
+ DEBUGLOG(5, "pos %u : calling block compressor on segment of size %u", (unsigned)(ip-istart), sequence.litLength);
+ {
+ size_t const newLitLength =
+ blockCompressor(ms, seqStore, rep, ip, sequence.litLength);
+ ip += sequence.litLength;
+ /* Update the repcodes */
+ for (i = ZSTD_REP_NUM - 1; i > 0; i--)
+ rep[i] = rep[i-1];
+ rep[0] = sequence.offset;
+ /* Store the sequence */
+ ZSTD_storeSeq(seqStore, newLitLength, ip - newLitLength, iend,
+ STORE_OFFSET(sequence.offset),
+ sequence.matchLength);
+ ip += sequence.matchLength;
+ }
+ }
+ /* Fill the tables for the block compressor */
+ ZSTD_ldm_limitTableUpdate(ms, ip);
+ ZSTD_ldm_fillFastTables(ms, ip);
+ /* Compress the last literals */
+ return blockCompressor(ms, seqStore, rep, ip, iend - ip);
+}
diff --git a/lib/zstd/compress/zstd_ldm.h b/lib/zstd/compress/zstd_ldm.h
new file mode 100644
index 000000000000..fbc6a5e88fd7
--- /dev/null
+++ b/lib/zstd/compress/zstd_ldm.h
@@ -0,0 +1,111 @@
+/*
+ * Copyright (c) Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#ifndef ZSTD_LDM_H
+#define ZSTD_LDM_H
+
+
+#include "zstd_compress_internal.h" /* ldmParams_t, U32 */
+#include <linux/zstd.h> /* ZSTD_CCtx, size_t */
+
+/*-*************************************
+* Long distance matching
+***************************************/
+
+#define ZSTD_LDM_DEFAULT_WINDOW_LOG ZSTD_WINDOWLOG_LIMIT_DEFAULT
+
+void ZSTD_ldm_fillHashTable(
+ ldmState_t* state, const BYTE* ip,
+ const BYTE* iend, ldmParams_t const* params);
+
+/*
+ * ZSTD_ldm_generateSequences():
+ *
+ * Generates the sequences using the long distance match finder.
+ * Generates long range matching sequences in `sequences`, which parse a prefix
+ * of the source. `sequences` must be large enough to store every sequence,
+ * which can be checked with `ZSTD_ldm_getMaxNbSeq()`.
+ * @returns 0 or an error code.
+ *
+ * NOTE: The user must have called ZSTD_window_update() for all of the input
+ * they have, even if they pass it to ZSTD_ldm_generateSequences() in chunks.
+ * NOTE: This function returns an error if it runs out of space to store
+ * sequences.
+ */
+size_t ZSTD_ldm_generateSequences(
+ ldmState_t* ldms, rawSeqStore_t* sequences,
+ ldmParams_t const* params, void const* src, size_t srcSize);
+
+/*
+ * ZSTD_ldm_blockCompress():
+ *
+ * Compresses a block using the predefined sequences, along with a secondary
+ * block compressor. The literals section of every sequence is passed to the
+ * secondary block compressor, and those sequences are interspersed with the
+ * predefined sequences. Returns the length of the last literals.
+ * Updates `rawSeqStore.pos` to indicate how many sequences have been consumed.
+ * `rawSeqStore.seq` may also be updated to split the last sequence between two
+ * blocks.
+ * @return The length of the last literals.
+ *
+ * NOTE: The source must be at most the maximum block size, but the predefined
+ * sequences can be any size, and may be longer than the block. In the case that
+ * they are longer than the block, the last sequences may need to be split into
+ * two. We handle that case correctly, and update `rawSeqStore` appropriately.
+ * NOTE: This function does not return any errors.
+ */
+size_t ZSTD_ldm_blockCompress(rawSeqStore_t* rawSeqStore,
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_paramSwitch_e useRowMatchFinder,
+ void const* src, size_t srcSize);
+
+/*
+ * ZSTD_ldm_skipSequences():
+ *
+ * Skip past `srcSize` bytes worth of sequences in `rawSeqStore`.
+ * Avoids emitting matches less than `minMatch` bytes.
+ * Must be called for data that is not passed to ZSTD_ldm_blockCompress().
+ */
+void ZSTD_ldm_skipSequences(rawSeqStore_t* rawSeqStore, size_t srcSize,
+ U32 const minMatch);
+
+/* ZSTD_ldm_skipRawSeqStoreBytes():
+ * Moves forward in rawSeqStore by nbBytes, updating fields 'pos' and 'posInSequence'.
+ * Not to be used in conjunction with ZSTD_ldm_skipSequences().
+ * Must be called for data with is not passed to ZSTD_ldm_blockCompress().
+ */
+void ZSTD_ldm_skipRawSeqStoreBytes(rawSeqStore_t* rawSeqStore, size_t nbBytes);
+
+/* ZSTD_ldm_getTableSize() :
+ * Estimate the space needed for long distance matching tables or 0 if LDM is
+ * disabled.
+ */
+size_t ZSTD_ldm_getTableSize(ldmParams_t params);
+
+/* ZSTD_ldm_getSeqSpace() :
+ * Return an upper bound on the number of sequences that can be produced by
+ * the long distance matcher, or 0 if LDM is disabled.
+ */
+size_t ZSTD_ldm_getMaxNbSeq(ldmParams_t params, size_t maxChunkSize);
+
+/* ZSTD_ldm_adjustParameters() :
+ * If the params->hashRateLog is not set, set it to its default value based on
+ * windowLog and params->hashLog.
+ *
+ * Ensures that params->bucketSizeLog is <= params->hashLog (setting it to
+ * params->hashLog if it is not).
+ *
+ * Ensures that the minMatchLength >= targetLength during optimal parsing.
+ */
+void ZSTD_ldm_adjustParameters(ldmParams_t* params,
+ ZSTD_compressionParameters const* cParams);
+
+
+#endif /* ZSTD_FAST_H */
diff --git a/lib/zstd/compress/zstd_ldm_geartab.h b/lib/zstd/compress/zstd_ldm_geartab.h
new file mode 100644
index 000000000000..647f865be290
--- /dev/null
+++ b/lib/zstd/compress/zstd_ldm_geartab.h
@@ -0,0 +1,106 @@
+/*
+ * Copyright (c) Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#ifndef ZSTD_LDM_GEARTAB_H
+#define ZSTD_LDM_GEARTAB_H
+
+#include "../common/compiler.h" /* UNUSED_ATTR */
+#include "../common/mem.h" /* U64 */
+
+static UNUSED_ATTR const U64 ZSTD_ldm_gearTab[256] = {
+ 0xf5b8f72c5f77775c, 0x84935f266b7ac412, 0xb647ada9ca730ccc,
+ 0xb065bb4b114fb1de, 0x34584e7e8c3a9fd0, 0x4e97e17c6ae26b05,
+ 0x3a03d743bc99a604, 0xcecd042422c4044f, 0x76de76c58524259e,
+ 0x9c8528f65badeaca, 0x86563706e2097529, 0x2902475fa375d889,
+ 0xafb32a9739a5ebe6, 0xce2714da3883e639, 0x21eaf821722e69e,
+ 0x37b628620b628, 0x49a8d455d88caf5, 0x8556d711e6958140,
+ 0x4f7ae74fc605c1f, 0x829f0c3468bd3a20, 0x4ffdc885c625179e,
+ 0x8473de048a3daf1b, 0x51008822b05646b2, 0x69d75d12b2d1cc5f,
+ 0x8c9d4a19159154bc, 0xc3cc10f4abbd4003, 0xd06ddc1cecb97391,
+ 0xbe48e6e7ed80302e, 0x3481db31cee03547, 0xacc3f67cdaa1d210,
+ 0x65cb771d8c7f96cc, 0x8eb27177055723dd, 0xc789950d44cd94be,
+ 0x934feadc3700b12b, 0x5e485f11edbdf182, 0x1e2e2a46fd64767a,
+ 0x2969ca71d82efa7c, 0x9d46e9935ebbba2e, 0xe056b67e05e6822b,
+ 0x94d73f55739d03a0, 0xcd7010bdb69b5a03, 0x455ef9fcd79b82f4,
+ 0x869cb54a8749c161, 0x38d1a4fa6185d225, 0xb475166f94bbe9bb,
+ 0xa4143548720959f1, 0x7aed4780ba6b26ba, 0xd0ce264439e02312,
+ 0x84366d746078d508, 0xa8ce973c72ed17be, 0x21c323a29a430b01,
+ 0x9962d617e3af80ee, 0xab0ce91d9c8cf75b, 0x530e8ee6d19a4dbc,
+ 0x2ef68c0cf53f5d72, 0xc03a681640a85506, 0x496e4e9f9c310967,
+ 0x78580472b59b14a0, 0x273824c23b388577, 0x66bf923ad45cb553,
+ 0x47ae1a5a2492ba86, 0x35e304569e229659, 0x4765182a46870b6f,
+ 0x6cbab625e9099412, 0xddac9a2e598522c1, 0x7172086e666624f2,
+ 0xdf5003ca503b7837, 0x88c0c1db78563d09, 0x58d51865acfc289d,
+ 0x177671aec65224f1, 0xfb79d8a241e967d7, 0x2be1e101cad9a49a,
+ 0x6625682f6e29186b, 0x399553457ac06e50, 0x35dffb4c23abb74,
+ 0x429db2591f54aade, 0xc52802a8037d1009, 0x6acb27381f0b25f3,
+ 0xf45e2551ee4f823b, 0x8b0ea2d99580c2f7, 0x3bed519cbcb4e1e1,
+ 0xff452823dbb010a, 0x9d42ed614f3dd267, 0x5b9313c06257c57b,
+ 0xa114b8008b5e1442, 0xc1fe311c11c13d4b, 0x66e8763ea34c5568,
+ 0x8b982af1c262f05d, 0xee8876faaa75fbb7, 0x8a62a4d0d172bb2a,
+ 0xc13d94a3b7449a97, 0x6dbbba9dc15d037c, 0xc786101f1d92e0f1,
+ 0xd78681a907a0b79b, 0xf61aaf2962c9abb9, 0x2cfd16fcd3cb7ad9,
+ 0x868c5b6744624d21, 0x25e650899c74ddd7, 0xba042af4a7c37463,
+ 0x4eb1a539465a3eca, 0xbe09dbf03b05d5ca, 0x774e5a362b5472ba,
+ 0x47a1221229d183cd, 0x504b0ca18ef5a2df, 0xdffbdfbde2456eb9,
+ 0x46cd2b2fbee34634, 0xf2aef8fe819d98c3, 0x357f5276d4599d61,
+ 0x24a5483879c453e3, 0x88026889192b4b9, 0x28da96671782dbec,
+ 0x4ef37c40588e9aaa, 0x8837b90651bc9fb3, 0xc164f741d3f0e5d6,
+ 0xbc135a0a704b70ba, 0x69cd868f7622ada, 0xbc37ba89e0b9c0ab,
+ 0x47c14a01323552f6, 0x4f00794bacee98bb, 0x7107de7d637a69d5,
+ 0x88af793bb6f2255e, 0xf3c6466b8799b598, 0xc288c616aa7f3b59,
+ 0x81ca63cf42fca3fd, 0x88d85ace36a2674b, 0xd056bd3792389e7,
+ 0xe55c396c4e9dd32d, 0xbefb504571e6c0a6, 0x96ab32115e91e8cc,
+ 0xbf8acb18de8f38d1, 0x66dae58801672606, 0x833b6017872317fb,
+ 0xb87c16f2d1c92864, 0xdb766a74e58b669c, 0x89659f85c61417be,
+ 0xc8daad856011ea0c, 0x76a4b565b6fe7eae, 0xa469d085f6237312,
+ 0xaaf0365683a3e96c, 0x4dbb746f8424f7b8, 0x638755af4e4acc1,
+ 0x3d7807f5bde64486, 0x17be6d8f5bbb7639, 0x903f0cd44dc35dc,
+ 0x67b672eafdf1196c, 0xa676ff93ed4c82f1, 0x521d1004c5053d9d,
+ 0x37ba9ad09ccc9202, 0x84e54d297aacfb51, 0xa0b4b776a143445,
+ 0x820d471e20b348e, 0x1874383cb83d46dc, 0x97edeec7a1efe11c,
+ 0xb330e50b1bdc42aa, 0x1dd91955ce70e032, 0xa514cdb88f2939d5,
+ 0x2791233fd90db9d3, 0x7b670a4cc50f7a9b, 0x77c07d2a05c6dfa5,
+ 0xe3778b6646d0a6fa, 0xb39c8eda47b56749, 0x933ed448addbef28,
+ 0xaf846af6ab7d0bf4, 0xe5af208eb666e49, 0x5e6622f73534cd6a,
+ 0x297daeca42ef5b6e, 0x862daef3d35539a6, 0xe68722498f8e1ea9,
+ 0x981c53093dc0d572, 0xfa09b0bfbf86fbf5, 0x30b1e96166219f15,
+ 0x70e7d466bdc4fb83, 0x5a66736e35f2a8e9, 0xcddb59d2b7c1baef,
+ 0xd6c7d247d26d8996, 0xea4e39eac8de1ba3, 0x539c8bb19fa3aff2,
+ 0x9f90e4c5fd508d8, 0xa34e5956fbaf3385, 0x2e2f8e151d3ef375,
+ 0x173691e9b83faec1, 0xb85a8d56bf016379, 0x8382381267408ae3,
+ 0xb90f901bbdc0096d, 0x7c6ad32933bcec65, 0x76bb5e2f2c8ad595,
+ 0x390f851a6cf46d28, 0xc3e6064da1c2da72, 0xc52a0c101cfa5389,
+ 0xd78eaf84a3fbc530, 0x3781b9e2288b997e, 0x73c2f6dea83d05c4,
+ 0x4228e364c5b5ed7, 0x9d7a3edf0da43911, 0x8edcfeda24686756,
+ 0x5e7667a7b7a9b3a1, 0x4c4f389fa143791d, 0xb08bc1023da7cddc,
+ 0x7ab4be3ae529b1cc, 0x754e6132dbe74ff9, 0x71635442a839df45,
+ 0x2f6fb1643fbe52de, 0x961e0a42cf7a8177, 0xf3b45d83d89ef2ea,
+ 0xee3de4cf4a6e3e9b, 0xcd6848542c3295e7, 0xe4cee1664c78662f,
+ 0x9947548b474c68c4, 0x25d73777a5ed8b0b, 0xc915b1d636b7fc,
+ 0x21c2ba75d9b0d2da, 0x5f6b5dcf608a64a1, 0xdcf333255ff9570c,
+ 0x633b922418ced4ee, 0xc136dde0b004b34a, 0x58cc83b05d4b2f5a,
+ 0x5eb424dda28e42d2, 0x62df47369739cd98, 0xb4e0b42485e4ce17,
+ 0x16e1f0c1f9a8d1e7, 0x8ec3916707560ebf, 0x62ba6e2df2cc9db3,
+ 0xcbf9f4ff77d83a16, 0x78d9d7d07d2bbcc4, 0xef554ce1e02c41f4,
+ 0x8d7581127eccf94d, 0xa9b53336cb3c8a05, 0x38c42c0bf45c4f91,
+ 0x640893cdf4488863, 0x80ec34bc575ea568, 0x39f324f5b48eaa40,
+ 0xe9d9ed1f8eff527f, 0x9224fc058cc5a214, 0xbaba00b04cfe7741,
+ 0x309a9f120fcf52af, 0xa558f3ec65626212, 0x424bec8b7adabe2f,
+ 0x41622513a6aea433, 0xb88da2d5324ca798, 0xd287733b245528a4,
+ 0x9a44697e6d68aec3, 0x7b1093be2f49bb28, 0x50bbec632e3d8aad,
+ 0x6cd90723e1ea8283, 0x897b9e7431b02bf3, 0x219efdcb338a7047,
+ 0x3b0311f0a27c0656, 0xdb17bf91c0db96e7, 0x8cd4fd6b4e85a5b2,
+ 0xfab071054ba6409d, 0x40d6fe831fa9dfd9, 0xaf358debad7d791e,
+ 0xeb8d0e25a65e3e58, 0xbbcbd3df14e08580, 0xcf751f27ecdab2b,
+ 0x2b4da14f2613d8f4
+};
+
+#endif /* ZSTD_LDM_GEARTAB_H */
diff --git a/lib/zstd/compress/zstd_opt.c b/lib/zstd/compress/zstd_opt.c
new file mode 100644
index 000000000000..fd82acfda62f
--- /dev/null
+++ b/lib/zstd/compress/zstd_opt.c
@@ -0,0 +1,1446 @@
+/*
+ * Copyright (c) Przemyslaw Skibinski, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#include "zstd_compress_internal.h"
+#include "hist.h"
+#include "zstd_opt.h"
+
+
+#define ZSTD_LITFREQ_ADD 2 /* scaling factor for litFreq, so that frequencies adapt faster to new stats */
+#define ZSTD_MAX_PRICE (1<<30)
+
+#define ZSTD_PREDEF_THRESHOLD 1024 /* if srcSize < ZSTD_PREDEF_THRESHOLD, symbols' cost is assumed static, directly determined by pre-defined distributions */
+
+
+/*-*************************************
+* Price functions for optimal parser
+***************************************/
+
+#if 0 /* approximation at bit level (for tests) */
+# define BITCOST_ACCURACY 0
+# define BITCOST_MULTIPLIER (1 << BITCOST_ACCURACY)
+# define WEIGHT(stat, opt) ((void)opt, ZSTD_bitWeight(stat))
+#elif 0 /* fractional bit accuracy (for tests) */
+# define BITCOST_ACCURACY 8
+# define BITCOST_MULTIPLIER (1 << BITCOST_ACCURACY)
+# define WEIGHT(stat,opt) ((void)opt, ZSTD_fracWeight(stat))
+#else /* opt==approx, ultra==accurate */
+# define BITCOST_ACCURACY 8
+# define BITCOST_MULTIPLIER (1 << BITCOST_ACCURACY)
+# define WEIGHT(stat,opt) (opt ? ZSTD_fracWeight(stat) : ZSTD_bitWeight(stat))
+#endif
+
+MEM_STATIC U32 ZSTD_bitWeight(U32 stat)
+{
+ return (ZSTD_highbit32(stat+1) * BITCOST_MULTIPLIER);
+}
+
+MEM_STATIC U32 ZSTD_fracWeight(U32 rawStat)
+{
+ U32 const stat = rawStat + 1;
+ U32 const hb = ZSTD_highbit32(stat);
+ U32 const BWeight = hb * BITCOST_MULTIPLIER;
+ U32 const FWeight = (stat << BITCOST_ACCURACY) >> hb;
+ U32 const weight = BWeight + FWeight;
+ assert(hb + BITCOST_ACCURACY < 31);
+ return weight;
+}
+
+#if (DEBUGLEVEL>=2)
+/* debugging function,
+ * @return price in bytes as fractional value
+ * for debug messages only */
+MEM_STATIC double ZSTD_fCost(U32 price)
+{
+ return (double)price / (BITCOST_MULTIPLIER*8);
+}
+#endif
+
+static int ZSTD_compressedLiterals(optState_t const* const optPtr)
+{
+ return optPtr->literalCompressionMode != ZSTD_ps_disable;
+}
+
+static void ZSTD_setBasePrices(optState_t* optPtr, int optLevel)
+{
+ if (ZSTD_compressedLiterals(optPtr))
+ optPtr->litSumBasePrice = WEIGHT(optPtr->litSum, optLevel);
+ optPtr->litLengthSumBasePrice = WEIGHT(optPtr->litLengthSum, optLevel);
+ optPtr->matchLengthSumBasePrice = WEIGHT(optPtr->matchLengthSum, optLevel);
+ optPtr->offCodeSumBasePrice = WEIGHT(optPtr->offCodeSum, optLevel);
+}
+
+
+static U32 sum_u32(const unsigned table[], size_t nbElts)
+{
+ size_t n;
+ U32 total = 0;
+ for (n=0; n<nbElts; n++) {
+ total += table[n];
+ }
+ return total;
+}
+
+static U32 ZSTD_downscaleStats(unsigned* table, U32 lastEltIndex, U32 shift)
+{
+ U32 s, sum=0;
+ DEBUGLOG(5, "ZSTD_downscaleStats (nbElts=%u, shift=%u)", (unsigned)lastEltIndex+1, (unsigned)shift);
+ assert(shift < 30);
+ for (s=0; s<lastEltIndex+1; s++) {
+ table[s] = 1 + (table[s] >> shift);
+ sum += table[s];
+ }
+ return sum;
+}
+
+/* ZSTD_scaleStats() :
+ * reduce all elements in table is sum too large
+ * return the resulting sum of elements */
+static U32 ZSTD_scaleStats(unsigned* table, U32 lastEltIndex, U32 logTarget)
+{
+ U32 const prevsum = sum_u32(table, lastEltIndex+1);
+ U32 const factor = prevsum >> logTarget;
+ DEBUGLOG(5, "ZSTD_scaleStats (nbElts=%u, target=%u)", (unsigned)lastEltIndex+1, (unsigned)logTarget);
+ assert(logTarget < 30);
+ if (factor <= 1) return prevsum;
+ return ZSTD_downscaleStats(table, lastEltIndex, ZSTD_highbit32(factor));
+}
+
+/* ZSTD_rescaleFreqs() :
+ * if first block (detected by optPtr->litLengthSum == 0) : init statistics
+ * take hints from dictionary if there is one
+ * and init from zero if there is none,
+ * using src for literals stats, and baseline stats for sequence symbols
+ * otherwise downscale existing stats, to be used as seed for next block.
+ */
+static void
+ZSTD_rescaleFreqs(optState_t* const optPtr,
+ const BYTE* const src, size_t const srcSize,
+ int const optLevel)
+{
+ int const compressedLiterals = ZSTD_compressedLiterals(optPtr);
+ DEBUGLOG(5, "ZSTD_rescaleFreqs (srcSize=%u)", (unsigned)srcSize);
+ optPtr->priceType = zop_dynamic;
+
+ if (optPtr->litLengthSum == 0) { /* first block : init */
+ if (srcSize <= ZSTD_PREDEF_THRESHOLD) { /* heuristic */
+ DEBUGLOG(5, "(srcSize <= ZSTD_PREDEF_THRESHOLD) => zop_predef");
+ optPtr->priceType = zop_predef;
+ }
+
+ assert(optPtr->symbolCosts != NULL);
+ if (optPtr->symbolCosts->huf.repeatMode == HUF_repeat_valid) {
+ /* huffman table presumed generated by dictionary */
+ optPtr->priceType = zop_dynamic;
+
+ if (compressedLiterals) {
+ unsigned lit;
+ assert(optPtr->litFreq != NULL);
+ optPtr->litSum = 0;
+ for (lit=0; lit<=MaxLit; lit++) {
+ U32 const scaleLog = 11; /* scale to 2K */
+ U32 const bitCost = HUF_getNbBitsFromCTable(optPtr->symbolCosts->huf.CTable, lit);
+ assert(bitCost <= scaleLog);
+ optPtr->litFreq[lit] = bitCost ? 1 << (scaleLog-bitCost) : 1 /*minimum to calculate cost*/;
+ optPtr->litSum += optPtr->litFreq[lit];
+ } }
+
+ { unsigned ll;
+ FSE_CState_t llstate;
+ FSE_initCState(&llstate, optPtr->symbolCosts->fse.litlengthCTable);
+ optPtr->litLengthSum = 0;
+ for (ll=0; ll<=MaxLL; ll++) {
+ U32 const scaleLog = 10; /* scale to 1K */
+ U32 const bitCost = FSE_getMaxNbBits(llstate.symbolTT, ll);
+ assert(bitCost < scaleLog);
+ optPtr->litLengthFreq[ll] = bitCost ? 1 << (scaleLog-bitCost) : 1 /*minimum to calculate cost*/;
+ optPtr->litLengthSum += optPtr->litLengthFreq[ll];
+ } }
+
+ { unsigned ml;
+ FSE_CState_t mlstate;
+ FSE_initCState(&mlstate, optPtr->symbolCosts->fse.matchlengthCTable);
+ optPtr->matchLengthSum = 0;
+ for (ml=0; ml<=MaxML; ml++) {
+ U32 const scaleLog = 10;
+ U32 const bitCost = FSE_getMaxNbBits(mlstate.symbolTT, ml);
+ assert(bitCost < scaleLog);
+ optPtr->matchLengthFreq[ml] = bitCost ? 1 << (scaleLog-bitCost) : 1 /*minimum to calculate cost*/;
+ optPtr->matchLengthSum += optPtr->matchLengthFreq[ml];
+ } }
+
+ { unsigned of;
+ FSE_CState_t ofstate;
+ FSE_initCState(&ofstate, optPtr->symbolCosts->fse.offcodeCTable);
+ optPtr->offCodeSum = 0;
+ for (of=0; of<=MaxOff; of++) {
+ U32 const scaleLog = 10;
+ U32 const bitCost = FSE_getMaxNbBits(ofstate.symbolTT, of);
+ assert(bitCost < scaleLog);
+ optPtr->offCodeFreq[of] = bitCost ? 1 << (scaleLog-bitCost) : 1 /*minimum to calculate cost*/;
+ optPtr->offCodeSum += optPtr->offCodeFreq[of];
+ } }
+
+ } else { /* not a dictionary */
+
+ assert(optPtr->litFreq != NULL);
+ if (compressedLiterals) {
+ unsigned lit = MaxLit;
+ HIST_count_simple(optPtr->litFreq, &lit, src, srcSize); /* use raw first block to init statistics */
+ optPtr->litSum = ZSTD_downscaleStats(optPtr->litFreq, MaxLit, 8);
+ }
+
+ { unsigned const baseLLfreqs[MaxLL+1] = {
+ 4, 2, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1
+ };
+ ZSTD_memcpy(optPtr->litLengthFreq, baseLLfreqs, sizeof(baseLLfreqs));
+ optPtr->litLengthSum = sum_u32(baseLLfreqs, MaxLL+1);
+ }
+
+ { unsigned ml;
+ for (ml=0; ml<=MaxML; ml++)
+ optPtr->matchLengthFreq[ml] = 1;
+ }
+ optPtr->matchLengthSum = MaxML+1;
+
+ { unsigned const baseOFCfreqs[MaxOff+1] = {
+ 6, 2, 1, 1, 2, 3, 4, 4,
+ 4, 3, 2, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1
+ };
+ ZSTD_memcpy(optPtr->offCodeFreq, baseOFCfreqs, sizeof(baseOFCfreqs));
+ optPtr->offCodeSum = sum_u32(baseOFCfreqs, MaxOff+1);
+ }
+
+
+ }
+
+ } else { /* new block : re-use previous statistics, scaled down */
+
+ if (compressedLiterals)
+ optPtr->litSum = ZSTD_scaleStats(optPtr->litFreq, MaxLit, 12);
+ optPtr->litLengthSum = ZSTD_scaleStats(optPtr->litLengthFreq, MaxLL, 11);
+ optPtr->matchLengthSum = ZSTD_scaleStats(optPtr->matchLengthFreq, MaxML, 11);
+ optPtr->offCodeSum = ZSTD_scaleStats(optPtr->offCodeFreq, MaxOff, 11);
+ }
+
+ ZSTD_setBasePrices(optPtr, optLevel);
+}
+
+/* ZSTD_rawLiteralsCost() :
+ * price of literals (only) in specified segment (which length can be 0).
+ * does not include price of literalLength symbol */
+static U32 ZSTD_rawLiteralsCost(const BYTE* const literals, U32 const litLength,
+ const optState_t* const optPtr,
+ int optLevel)
+{
+ if (litLength == 0) return 0;
+
+ if (!ZSTD_compressedLiterals(optPtr))
+ return (litLength << 3) * BITCOST_MULTIPLIER; /* Uncompressed - 8 bytes per literal. */
+
+ if (optPtr->priceType == zop_predef)
+ return (litLength*6) * BITCOST_MULTIPLIER; /* 6 bit per literal - no statistic used */
+
+ /* dynamic statistics */
+ { U32 price = litLength * optPtr->litSumBasePrice;
+ U32 u;
+ for (u=0; u < litLength; u++) {
+ assert(WEIGHT(optPtr->litFreq[literals[u]], optLevel) <= optPtr->litSumBasePrice); /* literal cost should never be negative */
+ price -= WEIGHT(optPtr->litFreq[literals[u]], optLevel);
+ }
+ return price;
+ }
+}
+
+/* ZSTD_litLengthPrice() :
+ * cost of literalLength symbol */
+static U32 ZSTD_litLengthPrice(U32 const litLength, const optState_t* const optPtr, int optLevel)
+{
+ assert(litLength <= ZSTD_BLOCKSIZE_MAX);
+ if (optPtr->priceType == zop_predef)
+ return WEIGHT(litLength, optLevel);
+ /* We can't compute the litLength price for sizes >= ZSTD_BLOCKSIZE_MAX
+ * because it isn't representable in the zstd format. So instead just
+ * call it 1 bit more than ZSTD_BLOCKSIZE_MAX - 1. In this case the block
+ * would be all literals.
+ */
+ if (litLength == ZSTD_BLOCKSIZE_MAX)
+ return BITCOST_MULTIPLIER + ZSTD_litLengthPrice(ZSTD_BLOCKSIZE_MAX - 1, optPtr, optLevel);
+
+ /* dynamic statistics */
+ { U32 const llCode = ZSTD_LLcode(litLength);
+ return (LL_bits[llCode] * BITCOST_MULTIPLIER)
+ + optPtr->litLengthSumBasePrice
+ - WEIGHT(optPtr->litLengthFreq[llCode], optLevel);
+ }
+}
+
+/* ZSTD_getMatchPrice() :
+ * Provides the cost of the match part (offset + matchLength) of a sequence
+ * Must be combined with ZSTD_fullLiteralsCost() to get the full cost of a sequence.
+ * @offcode : expects a scale where 0,1,2 are repcodes 1-3, and 3+ are real_offsets+2
+ * @optLevel: when <2, favors small offset for decompression speed (improved cache efficiency)
+ */
+FORCE_INLINE_TEMPLATE U32
+ZSTD_getMatchPrice(U32 const offcode,
+ U32 const matchLength,
+ const optState_t* const optPtr,
+ int const optLevel)
+{
+ U32 price;
+ U32 const offCode = ZSTD_highbit32(STORED_TO_OFFBASE(offcode));
+ U32 const mlBase = matchLength - MINMATCH;
+ assert(matchLength >= MINMATCH);
+
+ if (optPtr->priceType == zop_predef) /* fixed scheme, do not use statistics */
+ return WEIGHT(mlBase, optLevel) + ((16 + offCode) * BITCOST_MULTIPLIER);
+
+ /* dynamic statistics */
+ price = (offCode * BITCOST_MULTIPLIER) + (optPtr->offCodeSumBasePrice - WEIGHT(optPtr->offCodeFreq[offCode], optLevel));
+ if ((optLevel<2) /*static*/ && offCode >= 20)
+ price += (offCode-19)*2 * BITCOST_MULTIPLIER; /* handicap for long distance offsets, favor decompression speed */
+
+ /* match Length */
+ { U32 const mlCode = ZSTD_MLcode(mlBase);
+ price += (ML_bits[mlCode] * BITCOST_MULTIPLIER) + (optPtr->matchLengthSumBasePrice - WEIGHT(optPtr->matchLengthFreq[mlCode], optLevel));
+ }
+
+ price += BITCOST_MULTIPLIER / 5; /* heuristic : make matches a bit more costly to favor less sequences -> faster decompression speed */
+
+ DEBUGLOG(8, "ZSTD_getMatchPrice(ml:%u) = %u", matchLength, price);
+ return price;
+}
+
+/* ZSTD_updateStats() :
+ * assumption : literals + litLengtn <= iend */
+static void ZSTD_updateStats(optState_t* const optPtr,
+ U32 litLength, const BYTE* literals,
+ U32 offsetCode, U32 matchLength)
+{
+ /* literals */
+ if (ZSTD_compressedLiterals(optPtr)) {
+ U32 u;
+ for (u=0; u < litLength; u++)
+ optPtr->litFreq[literals[u]] += ZSTD_LITFREQ_ADD;
+ optPtr->litSum += litLength*ZSTD_LITFREQ_ADD;
+ }
+
+ /* literal Length */
+ { U32 const llCode = ZSTD_LLcode(litLength);
+ optPtr->litLengthFreq[llCode]++;
+ optPtr->litLengthSum++;
+ }
+
+ /* offset code : expected to follow storeSeq() numeric representation */
+ { U32 const offCode = ZSTD_highbit32(STORED_TO_OFFBASE(offsetCode));
+ assert(offCode <= MaxOff);
+ optPtr->offCodeFreq[offCode]++;
+ optPtr->offCodeSum++;
+ }
+
+ /* match Length */
+ { U32 const mlBase = matchLength - MINMATCH;
+ U32 const mlCode = ZSTD_MLcode(mlBase);
+ optPtr->matchLengthFreq[mlCode]++;
+ optPtr->matchLengthSum++;
+ }
+}
+
+
+/* ZSTD_readMINMATCH() :
+ * function safe only for comparisons
+ * assumption : memPtr must be at least 4 bytes before end of buffer */
+MEM_STATIC U32 ZSTD_readMINMATCH(const void* memPtr, U32 length)
+{
+ switch (length)
+ {
+ default :
+ case 4 : return MEM_read32(memPtr);
+ case 3 : if (MEM_isLittleEndian())
+ return MEM_read32(memPtr)<<8;
+ else
+ return MEM_read32(memPtr)>>8;
+ }
+}
+
+
+/* Update hashTable3 up to ip (excluded)
+ Assumption : always within prefix (i.e. not within extDict) */
+static U32 ZSTD_insertAndFindFirstIndexHash3 (const ZSTD_matchState_t* ms,
+ U32* nextToUpdate3,
+ const BYTE* const ip)
+{
+ U32* const hashTable3 = ms->hashTable3;
+ U32 const hashLog3 = ms->hashLog3;
+ const BYTE* const base = ms->window.base;
+ U32 idx = *nextToUpdate3;
+ U32 const target = (U32)(ip - base);
+ size_t const hash3 = ZSTD_hash3Ptr(ip, hashLog3);
+ assert(hashLog3 > 0);
+
+ while(idx < target) {
+ hashTable3[ZSTD_hash3Ptr(base+idx, hashLog3)] = idx;
+ idx++;
+ }
+
+ *nextToUpdate3 = target;
+ return hashTable3[hash3];
+}
+
+
+/*-*************************************
+* Binary Tree search
+***************************************/
+/* ZSTD_insertBt1() : add one or multiple positions to tree.
+ * @param ip assumed <= iend-8 .
+ * @param target The target of ZSTD_updateTree_internal() - we are filling to this position
+ * @return : nb of positions added */
+static U32 ZSTD_insertBt1(
+ const ZSTD_matchState_t* ms,
+ const BYTE* const ip, const BYTE* const iend,
+ U32 const target,
+ U32 const mls, const int extDict)
+{
+ const ZSTD_compressionParameters* const cParams = &ms->cParams;
+ U32* const hashTable = ms->hashTable;
+ U32 const hashLog = cParams->hashLog;
+ size_t const h = ZSTD_hashPtr(ip, hashLog, mls);
+ U32* const bt = ms->chainTable;
+ U32 const btLog = cParams->chainLog - 1;
+ U32 const btMask = (1 << btLog) - 1;
+ U32 matchIndex = hashTable[h];
+ size_t commonLengthSmaller=0, commonLengthLarger=0;
+ const BYTE* const base = ms->window.base;
+ const BYTE* const dictBase = ms->window.dictBase;
+ const U32 dictLimit = ms->window.dictLimit;
+ const BYTE* const dictEnd = dictBase + dictLimit;
+ const BYTE* const prefixStart = base + dictLimit;
+ const BYTE* match;
+ const U32 curr = (U32)(ip-base);
+ const U32 btLow = btMask >= curr ? 0 : curr - btMask;
+ U32* smallerPtr = bt + 2*(curr&btMask);
+ U32* largerPtr = smallerPtr + 1;
+ U32 dummy32; /* to be nullified at the end */
+ /* windowLow is based on target because
+ * we only need positions that will be in the window at the end of the tree update.
+ */
+ U32 const windowLow = ZSTD_getLowestMatchIndex(ms, target, cParams->windowLog);
+ U32 matchEndIdx = curr+8+1;
+ size_t bestLength = 8;
+ U32 nbCompares = 1U << cParams->searchLog;
+#ifdef ZSTD_C_PREDICT
+ U32 predictedSmall = *(bt + 2*((curr-1)&btMask) + 0);
+ U32 predictedLarge = *(bt + 2*((curr-1)&btMask) + 1);
+ predictedSmall += (predictedSmall>0);
+ predictedLarge += (predictedLarge>0);
+#endif /* ZSTD_C_PREDICT */
+
+ DEBUGLOG(8, "ZSTD_insertBt1 (%u)", curr);
+
+ assert(curr <= target);
+ assert(ip <= iend-8); /* required for h calculation */
+ hashTable[h] = curr; /* Update Hash Table */
+
+ assert(windowLow > 0);
+ for (; nbCompares && (matchIndex >= windowLow); --nbCompares) {
+ U32* const nextPtr = bt + 2*(matchIndex & btMask);
+ size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */
+ assert(matchIndex < curr);
+
+#ifdef ZSTD_C_PREDICT /* note : can create issues when hlog small <= 11 */
+ const U32* predictPtr = bt + 2*((matchIndex-1) & btMask); /* written this way, as bt is a roll buffer */
+ if (matchIndex == predictedSmall) {
+ /* no need to check length, result known */
+ *smallerPtr = matchIndex;
+ if (matchIndex <= btLow) { smallerPtr=&dummy32; break; } /* beyond tree size, stop the search */
+ smallerPtr = nextPtr+1; /* new "smaller" => larger of match */
+ matchIndex = nextPtr[1]; /* new matchIndex larger than previous (closer to current) */
+ predictedSmall = predictPtr[1] + (predictPtr[1]>0);
+ continue;
+ }
+ if (matchIndex == predictedLarge) {
+ *largerPtr = matchIndex;
+ if (matchIndex <= btLow) { largerPtr=&dummy32; break; } /* beyond tree size, stop the search */
+ largerPtr = nextPtr;
+ matchIndex = nextPtr[0];
+ predictedLarge = predictPtr[0] + (predictPtr[0]>0);
+ continue;
+ }
+#endif
+
+ if (!extDict || (matchIndex+matchLength >= dictLimit)) {
+ assert(matchIndex+matchLength >= dictLimit); /* might be wrong if actually extDict */
+ match = base + matchIndex;
+ matchLength += ZSTD_count(ip+matchLength, match+matchLength, iend);
+ } else {
+ match = dictBase + matchIndex;
+ matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iend, dictEnd, prefixStart);
+ if (matchIndex+matchLength >= dictLimit)
+ match = base + matchIndex; /* to prepare for next usage of match[matchLength] */
+ }
+
+ if (matchLength > bestLength) {
+ bestLength = matchLength;
+ if (matchLength > matchEndIdx - matchIndex)
+ matchEndIdx = matchIndex + (U32)matchLength;
+ }
+
+ if (ip+matchLength == iend) { /* equal : no way to know if inf or sup */
+ break; /* drop , to guarantee consistency ; miss a bit of compression, but other solutions can corrupt tree */
+ }
+
+ if (match[matchLength] < ip[matchLength]) { /* necessarily within buffer */
+ /* match is smaller than current */
+ *smallerPtr = matchIndex; /* update smaller idx */
+ commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */
+ if (matchIndex <= btLow) { smallerPtr=&dummy32; break; } /* beyond tree size, stop searching */
+ smallerPtr = nextPtr+1; /* new "candidate" => larger than match, which was smaller than target */
+ matchIndex = nextPtr[1]; /* new matchIndex, larger than previous and closer to current */
+ } else {
+ /* match is larger than current */
+ *largerPtr = matchIndex;
+ commonLengthLarger = matchLength;
+ if (matchIndex <= btLow) { largerPtr=&dummy32; break; } /* beyond tree size, stop searching */
+ largerPtr = nextPtr;
+ matchIndex = nextPtr[0];
+ } }
+
+ *smallerPtr = *largerPtr = 0;
+ { U32 positions = 0;
+ if (bestLength > 384) positions = MIN(192, (U32)(bestLength - 384)); /* speed optimization */
+ assert(matchEndIdx > curr + 8);
+ return MAX(positions, matchEndIdx - (curr + 8));
+ }
+}
+
+FORCE_INLINE_TEMPLATE
+void ZSTD_updateTree_internal(
+ ZSTD_matchState_t* ms,
+ const BYTE* const ip, const BYTE* const iend,
+ const U32 mls, const ZSTD_dictMode_e dictMode)
+{
+ const BYTE* const base = ms->window.base;
+ U32 const target = (U32)(ip - base);
+ U32 idx = ms->nextToUpdate;
+ DEBUGLOG(6, "ZSTD_updateTree_internal, from %u to %u (dictMode:%u)",
+ idx, target, dictMode);
+
+ while(idx < target) {
+ U32 const forward = ZSTD_insertBt1(ms, base+idx, iend, target, mls, dictMode == ZSTD_extDict);
+ assert(idx < (U32)(idx + forward));
+ idx += forward;
+ }
+ assert((size_t)(ip - base) <= (size_t)(U32)(-1));
+ assert((size_t)(iend - base) <= (size_t)(U32)(-1));
+ ms->nextToUpdate = target;
+}
+
+void ZSTD_updateTree(ZSTD_matchState_t* ms, const BYTE* ip, const BYTE* iend) {
+ ZSTD_updateTree_internal(ms, ip, iend, ms->cParams.minMatch, ZSTD_noDict);
+}
+
+FORCE_INLINE_TEMPLATE
+U32 ZSTD_insertBtAndGetAllMatches (
+ ZSTD_match_t* matches, /* store result (found matches) in this table (presumed large enough) */
+ ZSTD_matchState_t* ms,
+ U32* nextToUpdate3,
+ const BYTE* const ip, const BYTE* const iLimit, const ZSTD_dictMode_e dictMode,
+ const U32 rep[ZSTD_REP_NUM],
+ U32 const ll0, /* tells if associated literal length is 0 or not. This value must be 0 or 1 */
+ const U32 lengthToBeat,
+ U32 const mls /* template */)
+{
+ const ZSTD_compressionParameters* const cParams = &ms->cParams;
+ U32 const sufficient_len = MIN(cParams->targetLength, ZSTD_OPT_NUM -1);
+ const BYTE* const base = ms->window.base;
+ U32 const curr = (U32)(ip-base);
+ U32 const hashLog = cParams->hashLog;
+ U32 const minMatch = (mls==3) ? 3 : 4;
+ U32* const hashTable = ms->hashTable;
+ size_t const h = ZSTD_hashPtr(ip, hashLog, mls);
+ U32 matchIndex = hashTable[h];
+ U32* const bt = ms->chainTable;
+ U32 const btLog = cParams->chainLog - 1;
+ U32 const btMask= (1U << btLog) - 1;
+ size_t commonLengthSmaller=0, commonLengthLarger=0;
+ const BYTE* const dictBase = ms->window.dictBase;
+ U32 const dictLimit = ms->window.dictLimit;
+ const BYTE* const dictEnd = dictBase + dictLimit;
+ const BYTE* const prefixStart = base + dictLimit;
+ U32 const btLow = (btMask >= curr) ? 0 : curr - btMask;
+ U32 const windowLow = ZSTD_getLowestMatchIndex(ms, curr, cParams->windowLog);
+ U32 const matchLow = windowLow ? windowLow : 1;
+ U32* smallerPtr = bt + 2*(curr&btMask);
+ U32* largerPtr = bt + 2*(curr&btMask) + 1;
+ U32 matchEndIdx = curr+8+1; /* farthest referenced position of any match => detects repetitive patterns */
+ U32 dummy32; /* to be nullified at the end */
+ U32 mnum = 0;
+ U32 nbCompares = 1U << cParams->searchLog;
+
+ const ZSTD_matchState_t* dms = dictMode == ZSTD_dictMatchState ? ms->dictMatchState : NULL;
+ const ZSTD_compressionParameters* const dmsCParams =
+ dictMode == ZSTD_dictMatchState ? &dms->cParams : NULL;
+ const BYTE* const dmsBase = dictMode == ZSTD_dictMatchState ? dms->window.base : NULL;
+ const BYTE* const dmsEnd = dictMode == ZSTD_dictMatchState ? dms->window.nextSrc : NULL;
+ U32 const dmsHighLimit = dictMode == ZSTD_dictMatchState ? (U32)(dmsEnd - dmsBase) : 0;
+ U32 const dmsLowLimit = dictMode == ZSTD_dictMatchState ? dms->window.lowLimit : 0;
+ U32 const dmsIndexDelta = dictMode == ZSTD_dictMatchState ? windowLow - dmsHighLimit : 0;
+ U32 const dmsHashLog = dictMode == ZSTD_dictMatchState ? dmsCParams->hashLog : hashLog;
+ U32 const dmsBtLog = dictMode == ZSTD_dictMatchState ? dmsCParams->chainLog - 1 : btLog;
+ U32 const dmsBtMask = dictMode == ZSTD_dictMatchState ? (1U << dmsBtLog) - 1 : 0;
+ U32 const dmsBtLow = dictMode == ZSTD_dictMatchState && dmsBtMask < dmsHighLimit - dmsLowLimit ? dmsHighLimit - dmsBtMask : dmsLowLimit;
+
+ size_t bestLength = lengthToBeat-1;
+ DEBUGLOG(8, "ZSTD_insertBtAndGetAllMatches: current=%u", curr);
+
+ /* check repCode */
+ assert(ll0 <= 1); /* necessarily 1 or 0 */
+ { U32 const lastR = ZSTD_REP_NUM + ll0;
+ U32 repCode;
+ for (repCode = ll0; repCode < lastR; repCode++) {
+ U32 const repOffset = (repCode==ZSTD_REP_NUM) ? (rep[0] - 1) : rep[repCode];
+ U32 const repIndex = curr - repOffset;
+ U32 repLen = 0;
+ assert(curr >= dictLimit);
+ if (repOffset-1 /* intentional overflow, discards 0 and -1 */ < curr-dictLimit) { /* equivalent to `curr > repIndex >= dictLimit` */
+ /* We must validate the repcode offset because when we're using a dictionary the
+ * valid offset range shrinks when the dictionary goes out of bounds.
+ */
+ if ((repIndex >= windowLow) & (ZSTD_readMINMATCH(ip, minMatch) == ZSTD_readMINMATCH(ip - repOffset, minMatch))) {
+ repLen = (U32)ZSTD_count(ip+minMatch, ip+minMatch-repOffset, iLimit) + minMatch;
+ }
+ } else { /* repIndex < dictLimit || repIndex >= curr */
+ const BYTE* const repMatch = dictMode == ZSTD_dictMatchState ?
+ dmsBase + repIndex - dmsIndexDelta :
+ dictBase + repIndex;
+ assert(curr >= windowLow);
+ if ( dictMode == ZSTD_extDict
+ && ( ((repOffset-1) /*intentional overflow*/ < curr - windowLow) /* equivalent to `curr > repIndex >= windowLow` */
+ & (((U32)((dictLimit-1) - repIndex) >= 3) ) /* intentional overflow : do not test positions overlapping 2 memory segments */)
+ && (ZSTD_readMINMATCH(ip, minMatch) == ZSTD_readMINMATCH(repMatch, minMatch)) ) {
+ repLen = (U32)ZSTD_count_2segments(ip+minMatch, repMatch+minMatch, iLimit, dictEnd, prefixStart) + minMatch;
+ }
+ if (dictMode == ZSTD_dictMatchState
+ && ( ((repOffset-1) /*intentional overflow*/ < curr - (dmsLowLimit + dmsIndexDelta)) /* equivalent to `curr > repIndex >= dmsLowLimit` */
+ & ((U32)((dictLimit-1) - repIndex) >= 3) ) /* intentional overflow : do not test positions overlapping 2 memory segments */
+ && (ZSTD_readMINMATCH(ip, minMatch) == ZSTD_readMINMATCH(repMatch, minMatch)) ) {
+ repLen = (U32)ZSTD_count_2segments(ip+minMatch, repMatch+minMatch, iLimit, dmsEnd, prefixStart) + minMatch;
+ } }
+ /* save longer solution */
+ if (repLen > bestLength) {
+ DEBUGLOG(8, "found repCode %u (ll0:%u, offset:%u) of length %u",
+ repCode, ll0, repOffset, repLen);
+ bestLength = repLen;
+ matches[mnum].off = STORE_REPCODE(repCode - ll0 + 1); /* expect value between 1 and 3 */
+ matches[mnum].len = (U32)repLen;
+ mnum++;
+ if ( (repLen > sufficient_len)
+ | (ip+repLen == iLimit) ) { /* best possible */
+ return mnum;
+ } } } }
+
+ /* HC3 match finder */
+ if ((mls == 3) /*static*/ && (bestLength < mls)) {
+ U32 const matchIndex3 = ZSTD_insertAndFindFirstIndexHash3(ms, nextToUpdate3, ip);
+ if ((matchIndex3 >= matchLow)
+ & (curr - matchIndex3 < (1<<18)) /*heuristic : longer distance likely too expensive*/ ) {
+ size_t mlen;
+ if ((dictMode == ZSTD_noDict) /*static*/ || (dictMode == ZSTD_dictMatchState) /*static*/ || (matchIndex3 >= dictLimit)) {
+ const BYTE* const match = base + matchIndex3;
+ mlen = ZSTD_count(ip, match, iLimit);
+ } else {
+ const BYTE* const match = dictBase + matchIndex3;
+ mlen = ZSTD_count_2segments(ip, match, iLimit, dictEnd, prefixStart);
+ }
+
+ /* save best solution */
+ if (mlen >= mls /* == 3 > bestLength */) {
+ DEBUGLOG(8, "found small match with hlog3, of length %u",
+ (U32)mlen);
+ bestLength = mlen;
+ assert(curr > matchIndex3);
+ assert(mnum==0); /* no prior solution */
+ matches[0].off = STORE_OFFSET(curr - matchIndex3);
+ matches[0].len = (U32)mlen;
+ mnum = 1;
+ if ( (mlen > sufficient_len) |
+ (ip+mlen == iLimit) ) { /* best possible length */
+ ms->nextToUpdate = curr+1; /* skip insertion */
+ return 1;
+ } } }
+ /* no dictMatchState lookup: dicts don't have a populated HC3 table */
+ } /* if (mls == 3) */
+
+ hashTable[h] = curr; /* Update Hash Table */
+
+ for (; nbCompares && (matchIndex >= matchLow); --nbCompares) {
+ U32* const nextPtr = bt + 2*(matchIndex & btMask);
+ const BYTE* match;
+ size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */
+ assert(curr > matchIndex);
+
+ if ((dictMode == ZSTD_noDict) || (dictMode == ZSTD_dictMatchState) || (matchIndex+matchLength >= dictLimit)) {
+ assert(matchIndex+matchLength >= dictLimit); /* ensure the condition is correct when !extDict */
+ match = base + matchIndex;
+ if (matchIndex >= dictLimit) assert(memcmp(match, ip, matchLength) == 0); /* ensure early section of match is equal as expected */
+ matchLength += ZSTD_count(ip+matchLength, match+matchLength, iLimit);
+ } else {
+ match = dictBase + matchIndex;
+ assert(memcmp(match, ip, matchLength) == 0); /* ensure early section of match is equal as expected */
+ matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iLimit, dictEnd, prefixStart);
+ if (matchIndex+matchLength >= dictLimit)
+ match = base + matchIndex; /* prepare for match[matchLength] read */
+ }
+
+ if (matchLength > bestLength) {
+ DEBUGLOG(8, "found match of length %u at distance %u (offCode=%u)",
+ (U32)matchLength, curr - matchIndex, STORE_OFFSET(curr - matchIndex));
+ assert(matchEndIdx > matchIndex);
+ if (matchLength > matchEndIdx - matchIndex)
+ matchEndIdx = matchIndex + (U32)matchLength;
+ bestLength = matchLength;
+ matches[mnum].off = STORE_OFFSET(curr - matchIndex);
+ matches[mnum].len = (U32)matchLength;
+ mnum++;
+ if ( (matchLength > ZSTD_OPT_NUM)
+ | (ip+matchLength == iLimit) /* equal : no way to know if inf or sup */) {
+ if (dictMode == ZSTD_dictMatchState) nbCompares = 0; /* break should also skip searching dms */
+ break; /* drop, to preserve bt consistency (miss a little bit of compression) */
+ } }
+
+ if (match[matchLength] < ip[matchLength]) {
+ /* match smaller than current */
+ *smallerPtr = matchIndex; /* update smaller idx */
+ commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */
+ if (matchIndex <= btLow) { smallerPtr=&dummy32; break; } /* beyond tree size, stop the search */
+ smallerPtr = nextPtr+1; /* new candidate => larger than match, which was smaller than current */
+ matchIndex = nextPtr[1]; /* new matchIndex, larger than previous, closer to current */
+ } else {
+ *largerPtr = matchIndex;
+ commonLengthLarger = matchLength;
+ if (matchIndex <= btLow) { largerPtr=&dummy32; break; } /* beyond tree size, stop the search */
+ largerPtr = nextPtr;
+ matchIndex = nextPtr[0];
+ } }
+
+ *smallerPtr = *largerPtr = 0;
+
+ assert(nbCompares <= (1U << ZSTD_SEARCHLOG_MAX)); /* Check we haven't underflowed. */
+ if (dictMode == ZSTD_dictMatchState && nbCompares) {
+ size_t const dmsH = ZSTD_hashPtr(ip, dmsHashLog, mls);
+ U32 dictMatchIndex = dms->hashTable[dmsH];
+ const U32* const dmsBt = dms->chainTable;
+ commonLengthSmaller = commonLengthLarger = 0;
+ for (; nbCompares && (dictMatchIndex > dmsLowLimit); --nbCompares) {
+ const U32* const nextPtr = dmsBt + 2*(dictMatchIndex & dmsBtMask);
+ size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */
+ const BYTE* match = dmsBase + dictMatchIndex;
+ matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iLimit, dmsEnd, prefixStart);
+ if (dictMatchIndex+matchLength >= dmsHighLimit)
+ match = base + dictMatchIndex + dmsIndexDelta; /* to prepare for next usage of match[matchLength] */
+
+ if (matchLength > bestLength) {
+ matchIndex = dictMatchIndex + dmsIndexDelta;
+ DEBUGLOG(8, "found dms match of length %u at distance %u (offCode=%u)",
+ (U32)matchLength, curr - matchIndex, STORE_OFFSET(curr - matchIndex));
+ if (matchLength > matchEndIdx - matchIndex)
+ matchEndIdx = matchIndex + (U32)matchLength;
+ bestLength = matchLength;
+ matches[mnum].off = STORE_OFFSET(curr - matchIndex);
+ matches[mnum].len = (U32)matchLength;
+ mnum++;
+ if ( (matchLength > ZSTD_OPT_NUM)
+ | (ip+matchLength == iLimit) /* equal : no way to know if inf or sup */) {
+ break; /* drop, to guarantee consistency (miss a little bit of compression) */
+ } }
+
+ if (dictMatchIndex <= dmsBtLow) { break; } /* beyond tree size, stop the search */
+ if (match[matchLength] < ip[matchLength]) {
+ commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */
+ dictMatchIndex = nextPtr[1]; /* new matchIndex larger than previous (closer to current) */
+ } else {
+ /* match is larger than current */
+ commonLengthLarger = matchLength;
+ dictMatchIndex = nextPtr[0];
+ } } } /* if (dictMode == ZSTD_dictMatchState) */
+
+ assert(matchEndIdx > curr+8);
+ ms->nextToUpdate = matchEndIdx - 8; /* skip repetitive patterns */
+ return mnum;
+}
+
+typedef U32 (*ZSTD_getAllMatchesFn)(
+ ZSTD_match_t*,
+ ZSTD_matchState_t*,
+ U32*,
+ const BYTE*,
+ const BYTE*,
+ const U32 rep[ZSTD_REP_NUM],
+ U32 const ll0,
+ U32 const lengthToBeat);
+
+FORCE_INLINE_TEMPLATE U32 ZSTD_btGetAllMatches_internal(
+ ZSTD_match_t* matches,
+ ZSTD_matchState_t* ms,
+ U32* nextToUpdate3,
+ const BYTE* ip,
+ const BYTE* const iHighLimit,
+ const U32 rep[ZSTD_REP_NUM],
+ U32 const ll0,
+ U32 const lengthToBeat,
+ const ZSTD_dictMode_e dictMode,
+ const U32 mls)
+{
+ assert(BOUNDED(3, ms->cParams.minMatch, 6) == mls);
+ DEBUGLOG(8, "ZSTD_BtGetAllMatches(dictMode=%d, mls=%u)", (int)dictMode, mls);
+ if (ip < ms->window.base + ms->nextToUpdate)
+ return 0; /* skipped area */
+ ZSTD_updateTree_internal(ms, ip, iHighLimit, mls, dictMode);
+ return ZSTD_insertBtAndGetAllMatches(matches, ms, nextToUpdate3, ip, iHighLimit, dictMode, rep, ll0, lengthToBeat, mls);
+}
+
+#define ZSTD_BT_GET_ALL_MATCHES_FN(dictMode, mls) ZSTD_btGetAllMatches_##dictMode##_##mls
+
+#define GEN_ZSTD_BT_GET_ALL_MATCHES_(dictMode, mls) \
+ static U32 ZSTD_BT_GET_ALL_MATCHES_FN(dictMode, mls)( \
+ ZSTD_match_t* matches, \
+ ZSTD_matchState_t* ms, \
+ U32* nextToUpdate3, \
+ const BYTE* ip, \
+ const BYTE* const iHighLimit, \
+ const U32 rep[ZSTD_REP_NUM], \
+ U32 const ll0, \
+ U32 const lengthToBeat) \
+ { \
+ return ZSTD_btGetAllMatches_internal( \
+ matches, ms, nextToUpdate3, ip, iHighLimit, \
+ rep, ll0, lengthToBeat, ZSTD_##dictMode, mls); \
+ }
+
+#define GEN_ZSTD_BT_GET_ALL_MATCHES(dictMode) \
+ GEN_ZSTD_BT_GET_ALL_MATCHES_(dictMode, 3) \
+ GEN_ZSTD_BT_GET_ALL_MATCHES_(dictMode, 4) \
+ GEN_ZSTD_BT_GET_ALL_MATCHES_(dictMode, 5) \
+ GEN_ZSTD_BT_GET_ALL_MATCHES_(dictMode, 6)
+
+GEN_ZSTD_BT_GET_ALL_MATCHES(noDict)
+GEN_ZSTD_BT_GET_ALL_MATCHES(extDict)
+GEN_ZSTD_BT_GET_ALL_MATCHES(dictMatchState)
+
+#define ZSTD_BT_GET_ALL_MATCHES_ARRAY(dictMode) \
+ { \
+ ZSTD_BT_GET_ALL_MATCHES_FN(dictMode, 3), \
+ ZSTD_BT_GET_ALL_MATCHES_FN(dictMode, 4), \
+ ZSTD_BT_GET_ALL_MATCHES_FN(dictMode, 5), \
+ ZSTD_BT_GET_ALL_MATCHES_FN(dictMode, 6) \
+ }
+
+static ZSTD_getAllMatchesFn
+ZSTD_selectBtGetAllMatches(ZSTD_matchState_t const* ms, ZSTD_dictMode_e const dictMode)
+{
+ ZSTD_getAllMatchesFn const getAllMatchesFns[3][4] = {
+ ZSTD_BT_GET_ALL_MATCHES_ARRAY(noDict),
+ ZSTD_BT_GET_ALL_MATCHES_ARRAY(extDict),
+ ZSTD_BT_GET_ALL_MATCHES_ARRAY(dictMatchState)
+ };
+ U32 const mls = BOUNDED(3, ms->cParams.minMatch, 6);
+ assert((U32)dictMode < 3);
+ assert(mls - 3 < 4);
+ return getAllMatchesFns[(int)dictMode][mls - 3];
+}
+
+/* ***********************
+* LDM helper functions *
+*************************/
+
+/* Struct containing info needed to make decision about ldm inclusion */
+typedef struct {
+ rawSeqStore_t seqStore; /* External match candidates store for this block */
+ U32 startPosInBlock; /* Start position of the current match candidate */
+ U32 endPosInBlock; /* End position of the current match candidate */
+ U32 offset; /* Offset of the match candidate */
+} ZSTD_optLdm_t;
+
+/* ZSTD_optLdm_skipRawSeqStoreBytes():
+ * Moves forward in @rawSeqStore by @nbBytes,
+ * which will update the fields 'pos' and 'posInSequence'.
+ */
+static void ZSTD_optLdm_skipRawSeqStoreBytes(rawSeqStore_t* rawSeqStore, size_t nbBytes)
+{
+ U32 currPos = (U32)(rawSeqStore->posInSequence + nbBytes);
+ while (currPos && rawSeqStore->pos < rawSeqStore->size) {
+ rawSeq currSeq = rawSeqStore->seq[rawSeqStore->pos];
+ if (currPos >= currSeq.litLength + currSeq.matchLength) {
+ currPos -= currSeq.litLength + currSeq.matchLength;
+ rawSeqStore->pos++;
+ } else {
+ rawSeqStore->posInSequence = currPos;
+ break;
+ }
+ }
+ if (currPos == 0 || rawSeqStore->pos == rawSeqStore->size) {
+ rawSeqStore->posInSequence = 0;
+ }
+}
+
+/* ZSTD_opt_getNextMatchAndUpdateSeqStore():
+ * Calculates the beginning and end of the next match in the current block.
+ * Updates 'pos' and 'posInSequence' of the ldmSeqStore.
+ */
+static void
+ZSTD_opt_getNextMatchAndUpdateSeqStore(ZSTD_optLdm_t* optLdm, U32 currPosInBlock,
+ U32 blockBytesRemaining)
+{
+ rawSeq currSeq;
+ U32 currBlockEndPos;
+ U32 literalsBytesRemaining;
+ U32 matchBytesRemaining;
+
+ /* Setting match end position to MAX to ensure we never use an LDM during this block */
+ if (optLdm->seqStore.size == 0 || optLdm->seqStore.pos >= optLdm->seqStore.size) {
+ optLdm->startPosInBlock = UINT_MAX;
+ optLdm->endPosInBlock = UINT_MAX;
+ return;
+ }
+ /* Calculate appropriate bytes left in matchLength and litLength
+ * after adjusting based on ldmSeqStore->posInSequence */
+ currSeq = optLdm->seqStore.seq[optLdm->seqStore.pos];
+ assert(optLdm->seqStore.posInSequence <= currSeq.litLength + currSeq.matchLength);
+ currBlockEndPos = currPosInBlock + blockBytesRemaining;
+ literalsBytesRemaining = (optLdm->seqStore.posInSequence < currSeq.litLength) ?
+ currSeq.litLength - (U32)optLdm->seqStore.posInSequence :
+ 0;
+ matchBytesRemaining = (literalsBytesRemaining == 0) ?
+ currSeq.matchLength - ((U32)optLdm->seqStore.posInSequence - currSeq.litLength) :
+ currSeq.matchLength;
+
+ /* If there are more literal bytes than bytes remaining in block, no ldm is possible */
+ if (literalsBytesRemaining >= blockBytesRemaining) {
+ optLdm->startPosInBlock = UINT_MAX;
+ optLdm->endPosInBlock = UINT_MAX;
+ ZSTD_optLdm_skipRawSeqStoreBytes(&optLdm->seqStore, blockBytesRemaining);
+ return;
+ }
+
+ /* Matches may be < MINMATCH by this process. In that case, we will reject them
+ when we are deciding whether or not to add the ldm */
+ optLdm->startPosInBlock = currPosInBlock + literalsBytesRemaining;
+ optLdm->endPosInBlock = optLdm->startPosInBlock + matchBytesRemaining;
+ optLdm->offset = currSeq.offset;
+
+ if (optLdm->endPosInBlock > currBlockEndPos) {
+ /* Match ends after the block ends, we can't use the whole match */
+ optLdm->endPosInBlock = currBlockEndPos;
+ ZSTD_optLdm_skipRawSeqStoreBytes(&optLdm->seqStore, currBlockEndPos - currPosInBlock);
+ } else {
+ /* Consume nb of bytes equal to size of sequence left */
+ ZSTD_optLdm_skipRawSeqStoreBytes(&optLdm->seqStore, literalsBytesRemaining + matchBytesRemaining);
+ }
+}
+
+/* ZSTD_optLdm_maybeAddMatch():
+ * Adds a match if it's long enough,
+ * based on it's 'matchStartPosInBlock' and 'matchEndPosInBlock',
+ * into 'matches'. Maintains the correct ordering of 'matches'.
+ */
+static void ZSTD_optLdm_maybeAddMatch(ZSTD_match_t* matches, U32* nbMatches,
+ const ZSTD_optLdm_t* optLdm, U32 currPosInBlock)
+{
+ U32 const posDiff = currPosInBlock - optLdm->startPosInBlock;
+ /* Note: ZSTD_match_t actually contains offCode and matchLength (before subtracting MINMATCH) */
+ U32 const candidateMatchLength = optLdm->endPosInBlock - optLdm->startPosInBlock - posDiff;
+
+ /* Ensure that current block position is not outside of the match */
+ if (currPosInBlock < optLdm->startPosInBlock
+ || currPosInBlock >= optLdm->endPosInBlock
+ || candidateMatchLength < MINMATCH) {
+ return;
+ }
+
+ if (*nbMatches == 0 || ((candidateMatchLength > matches[*nbMatches-1].len) && *nbMatches < ZSTD_OPT_NUM)) {
+ U32 const candidateOffCode = STORE_OFFSET(optLdm->offset);
+ DEBUGLOG(6, "ZSTD_optLdm_maybeAddMatch(): Adding ldm candidate match (offCode: %u matchLength %u) at block position=%u",
+ candidateOffCode, candidateMatchLength, currPosInBlock);
+ matches[*nbMatches].len = candidateMatchLength;
+ matches[*nbMatches].off = candidateOffCode;
+ (*nbMatches)++;
+ }
+}
+
+/* ZSTD_optLdm_processMatchCandidate():
+ * Wrapper function to update ldm seq store and call ldm functions as necessary.
+ */
+static void
+ZSTD_optLdm_processMatchCandidate(ZSTD_optLdm_t* optLdm,
+ ZSTD_match_t* matches, U32* nbMatches,
+ U32 currPosInBlock, U32 remainingBytes)
+{
+ if (optLdm->seqStore.size == 0 || optLdm->seqStore.pos >= optLdm->seqStore.size) {
+ return;
+ }
+
+ if (currPosInBlock >= optLdm->endPosInBlock) {
+ if (currPosInBlock > optLdm->endPosInBlock) {
+ /* The position at which ZSTD_optLdm_processMatchCandidate() is called is not necessarily
+ * at the end of a match from the ldm seq store, and will often be some bytes
+ * over beyond matchEndPosInBlock. As such, we need to correct for these "overshoots"
+ */
+ U32 const posOvershoot = currPosInBlock - optLdm->endPosInBlock;
+ ZSTD_optLdm_skipRawSeqStoreBytes(&optLdm->seqStore, posOvershoot);
+ }
+ ZSTD_opt_getNextMatchAndUpdateSeqStore(optLdm, currPosInBlock, remainingBytes);
+ }
+ ZSTD_optLdm_maybeAddMatch(matches, nbMatches, optLdm, currPosInBlock);
+}
+
+
+/*-*******************************
+* Optimal parser
+*********************************/
+
+static U32 ZSTD_totalLen(ZSTD_optimal_t sol)
+{
+ return sol.litlen + sol.mlen;
+}
+
+#if 0 /* debug */
+
+static void
+listStats(const U32* table, int lastEltID)
+{
+ int const nbElts = lastEltID + 1;
+ int enb;
+ for (enb=0; enb < nbElts; enb++) {
+ (void)table;
+ /* RAWLOG(2, "%3i:%3i, ", enb, table[enb]); */
+ RAWLOG(2, "%4i,", table[enb]);
+ }
+ RAWLOG(2, " \n");
+}
+
+#endif
+
+FORCE_INLINE_TEMPLATE size_t
+ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms,
+ seqStore_t* seqStore,
+ U32 rep[ZSTD_REP_NUM],
+ const void* src, size_t srcSize,
+ const int optLevel,
+ const ZSTD_dictMode_e dictMode)
+{
+ optState_t* const optStatePtr = &ms->opt;
+ const BYTE* const istart = (const BYTE*)src;
+ const BYTE* ip = istart;
+ const BYTE* anchor = istart;
+ const BYTE* const iend = istart + srcSize;
+ const BYTE* const ilimit = iend - 8;
+ const BYTE* const base = ms->window.base;
+ const BYTE* const prefixStart = base + ms->window.dictLimit;
+ const ZSTD_compressionParameters* const cParams = &ms->cParams;
+
+ ZSTD_getAllMatchesFn getAllMatches = ZSTD_selectBtGetAllMatches(ms, dictMode);
+
+ U32 const sufficient_len = MIN(cParams->targetLength, ZSTD_OPT_NUM -1);
+ U32 const minMatch = (cParams->minMatch == 3) ? 3 : 4;
+ U32 nextToUpdate3 = ms->nextToUpdate;
+
+ ZSTD_optimal_t* const opt = optStatePtr->priceTable;
+ ZSTD_match_t* const matches = optStatePtr->matchTable;
+ ZSTD_optimal_t lastSequence;
+ ZSTD_optLdm_t optLdm;
+
+ optLdm.seqStore = ms->ldmSeqStore ? *ms->ldmSeqStore : kNullRawSeqStore;
+ optLdm.endPosInBlock = optLdm.startPosInBlock = optLdm.offset = 0;
+ ZSTD_opt_getNextMatchAndUpdateSeqStore(&optLdm, (U32)(ip-istart), (U32)(iend-ip));
+
+ /* init */
+ DEBUGLOG(5, "ZSTD_compressBlock_opt_generic: current=%u, prefix=%u, nextToUpdate=%u",
+ (U32)(ip - base), ms->window.dictLimit, ms->nextToUpdate);
+ assert(optLevel <= 2);
+ ZSTD_rescaleFreqs(optStatePtr, (const BYTE*)src, srcSize, optLevel);
+ ip += (ip==prefixStart);
+
+ /* Match Loop */
+ while (ip < ilimit) {
+ U32 cur, last_pos = 0;
+
+ /* find first match */
+ { U32 const litlen = (U32)(ip - anchor);
+ U32 const ll0 = !litlen;
+ U32 nbMatches = getAllMatches(matches, ms, &nextToUpdate3, ip, iend, rep, ll0, minMatch);
+ ZSTD_optLdm_processMatchCandidate(&optLdm, matches, &nbMatches,
+ (U32)(ip-istart), (U32)(iend - ip));
+ if (!nbMatches) { ip++; continue; }
+
+ /* initialize opt[0] */
+ { U32 i ; for (i=0; i<ZSTD_REP_NUM; i++) opt[0].rep[i] = rep[i]; }
+ opt[0].mlen = 0; /* means is_a_literal */
+ opt[0].litlen = litlen;
+ /* We don't need to include the actual price of the literals because
+ * it is static for the duration of the forward pass, and is included
+ * in every price. We include the literal length to avoid negative
+ * prices when we subtract the previous literal length.
+ */
+ opt[0].price = (int)ZSTD_litLengthPrice(litlen, optStatePtr, optLevel);
+
+ /* large match -> immediate encoding */
+ { U32 const maxML = matches[nbMatches-1].len;
+ U32 const maxOffcode = matches[nbMatches-1].off;
+ DEBUGLOG(6, "found %u matches of maxLength=%u and maxOffCode=%u at cPos=%u => start new series",
+ nbMatches, maxML, maxOffcode, (U32)(ip-prefixStart));
+
+ if (maxML > sufficient_len) {
+ lastSequence.litlen = litlen;
+ lastSequence.mlen = maxML;
+ lastSequence.off = maxOffcode;
+ DEBUGLOG(6, "large match (%u>%u), immediate encoding",
+ maxML, sufficient_len);
+ cur = 0;
+ last_pos = ZSTD_totalLen(lastSequence);
+ goto _shortestPath;
+ } }
+
+ /* set prices for first matches starting position == 0 */
+ assert(opt[0].price >= 0);
+ { U32 const literalsPrice = (U32)opt[0].price + ZSTD_litLengthPrice(0, optStatePtr, optLevel);
+ U32 pos;
+ U32 matchNb;
+ for (pos = 1; pos < minMatch; pos++) {
+ opt[pos].price = ZSTD_MAX_PRICE; /* mlen, litlen and price will be fixed during forward scanning */
+ }
+ for (matchNb = 0; matchNb < nbMatches; matchNb++) {
+ U32 const offcode = matches[matchNb].off;
+ U32 const end = matches[matchNb].len;
+ for ( ; pos <= end ; pos++ ) {
+ U32 const matchPrice = ZSTD_getMatchPrice(offcode, pos, optStatePtr, optLevel);
+ U32 const sequencePrice = literalsPrice + matchPrice;
+ DEBUGLOG(7, "rPos:%u => set initial price : %.2f",
+ pos, ZSTD_fCost(sequencePrice));
+ opt[pos].mlen = pos;
+ opt[pos].off = offcode;
+ opt[pos].litlen = litlen;
+ opt[pos].price = (int)sequencePrice;
+ } }
+ last_pos = pos-1;
+ }
+ }
+
+ /* check further positions */
+ for (cur = 1; cur <= last_pos; cur++) {
+ const BYTE* const inr = ip + cur;
+ assert(cur < ZSTD_OPT_NUM);
+ DEBUGLOG(7, "cPos:%zi==rPos:%u", inr-istart, cur)
+
+ /* Fix current position with one literal if cheaper */
+ { U32 const litlen = (opt[cur-1].mlen == 0) ? opt[cur-1].litlen + 1 : 1;
+ int const price = opt[cur-1].price
+ + (int)ZSTD_rawLiteralsCost(ip+cur-1, 1, optStatePtr, optLevel)
+ + (int)ZSTD_litLengthPrice(litlen, optStatePtr, optLevel)
+ - (int)ZSTD_litLengthPrice(litlen-1, optStatePtr, optLevel);
+ assert(price < 1000000000); /* overflow check */
+ if (price <= opt[cur].price) {
+ DEBUGLOG(7, "cPos:%zi==rPos:%u : better price (%.2f<=%.2f) using literal (ll==%u) (hist:%u,%u,%u)",
+ inr-istart, cur, ZSTD_fCost(price), ZSTD_fCost(opt[cur].price), litlen,
+ opt[cur-1].rep[0], opt[cur-1].rep[1], opt[cur-1].rep[2]);
+ opt[cur].mlen = 0;
+ opt[cur].off = 0;
+ opt[cur].litlen = litlen;
+ opt[cur].price = price;
+ } else {
+ DEBUGLOG(7, "cPos:%zi==rPos:%u : literal would cost more (%.2f>%.2f) (hist:%u,%u,%u)",
+ inr-istart, cur, ZSTD_fCost(price), ZSTD_fCost(opt[cur].price),
+ opt[cur].rep[0], opt[cur].rep[1], opt[cur].rep[2]);
+ }
+ }
+
+ /* Set the repcodes of the current position. We must do it here
+ * because we rely on the repcodes of the 2nd to last sequence being
+ * correct to set the next chunks repcodes during the backward
+ * traversal.
+ */
+ ZSTD_STATIC_ASSERT(sizeof(opt[cur].rep) == sizeof(repcodes_t));
+ assert(cur >= opt[cur].mlen);
+ if (opt[cur].mlen != 0) {
+ U32 const prev = cur - opt[cur].mlen;
+ repcodes_t const newReps = ZSTD_newRep(opt[prev].rep, opt[cur].off, opt[cur].litlen==0);
+ ZSTD_memcpy(opt[cur].rep, &newReps, sizeof(repcodes_t));
+ } else {
+ ZSTD_memcpy(opt[cur].rep, opt[cur - 1].rep, sizeof(repcodes_t));
+ }
+
+ /* last match must start at a minimum distance of 8 from oend */
+ if (inr > ilimit) continue;
+
+ if (cur == last_pos) break;
+
+ if ( (optLevel==0) /*static_test*/
+ && (opt[cur+1].price <= opt[cur].price + (BITCOST_MULTIPLIER/2)) ) {
+ DEBUGLOG(7, "move to next rPos:%u : price is <=", cur+1);
+ continue; /* skip unpromising positions; about ~+6% speed, -0.01 ratio */
+ }
+
+ assert(opt[cur].price >= 0);
+ { U32 const ll0 = (opt[cur].mlen != 0);
+ U32 const litlen = (opt[cur].mlen == 0) ? opt[cur].litlen : 0;
+ U32 const previousPrice = (U32)opt[cur].price;
+ U32 const basePrice = previousPrice + ZSTD_litLengthPrice(0, optStatePtr, optLevel);
+ U32 nbMatches = getAllMatches(matches, ms, &nextToUpdate3, inr, iend, opt[cur].rep, ll0, minMatch);
+ U32 matchNb;
+
+ ZSTD_optLdm_processMatchCandidate(&optLdm, matches, &nbMatches,
+ (U32)(inr-istart), (U32)(iend-inr));
+
+ if (!nbMatches) {
+ DEBUGLOG(7, "rPos:%u : no match found", cur);
+ continue;
+ }
+
+ { U32 const maxML = matches[nbMatches-1].len;
+ DEBUGLOG(7, "cPos:%zi==rPos:%u, found %u matches, of maxLength=%u",
+ inr-istart, cur, nbMatches, maxML);
+
+ if ( (maxML > sufficient_len)
+ || (cur + maxML >= ZSTD_OPT_NUM) ) {
+ lastSequence.mlen = maxML;
+ lastSequence.off = matches[nbMatches-1].off;
+ lastSequence.litlen = litlen;
+ cur -= (opt[cur].mlen==0) ? opt[cur].litlen : 0; /* last sequence is actually only literals, fix cur to last match - note : may underflow, in which case, it's first sequence, and it's okay */
+ last_pos = cur + ZSTD_totalLen(lastSequence);
+ if (cur > ZSTD_OPT_NUM) cur = 0; /* underflow => first match */
+ goto _shortestPath;
+ } }
+
+ /* set prices using matches found at position == cur */
+ for (matchNb = 0; matchNb < nbMatches; matchNb++) {
+ U32 const offset = matches[matchNb].off;
+ U32 const lastML = matches[matchNb].len;
+ U32 const startML = (matchNb>0) ? matches[matchNb-1].len+1 : minMatch;
+ U32 mlen;
+
+ DEBUGLOG(7, "testing match %u => offCode=%4u, mlen=%2u, llen=%2u",
+ matchNb, matches[matchNb].off, lastML, litlen);
+
+ for (mlen = lastML; mlen >= startML; mlen--) { /* scan downward */
+ U32 const pos = cur + mlen;
+ int const price = (int)basePrice + (int)ZSTD_getMatchPrice(offset, mlen, optStatePtr, optLevel);
+
+ if ((pos > last_pos) || (price < opt[pos].price)) {
+ DEBUGLOG(7, "rPos:%u (ml=%2u) => new better price (%.2f<%.2f)",
+ pos, mlen, ZSTD_fCost(price), ZSTD_fCost(opt[pos].price));
+ while (last_pos < pos) { opt[last_pos+1].price = ZSTD_MAX_PRICE; last_pos++; } /* fill empty positions */
+ opt[pos].mlen = mlen;
+ opt[pos].off = offset;
+ opt[pos].litlen = litlen;
+ opt[pos].price = price;
+ } else {
+ DEBUGLOG(7, "rPos:%u (ml=%2u) => new price is worse (%.2f>=%.2f)",
+ pos, mlen, ZSTD_fCost(price), ZSTD_fCost(opt[pos].price));
+ if (optLevel==0) break; /* early update abort; gets ~+10% speed for about -0.01 ratio loss */
+ }
+ } } }
+ } /* for (cur = 1; cur <= last_pos; cur++) */
+
+ lastSequence = opt[last_pos];
+ cur = last_pos > ZSTD_totalLen(lastSequence) ? last_pos - ZSTD_totalLen(lastSequence) : 0; /* single sequence, and it starts before `ip` */
+ assert(cur < ZSTD_OPT_NUM); /* control overflow*/
+
+_shortestPath: /* cur, last_pos, best_mlen, best_off have to be set */
+ assert(opt[0].mlen == 0);
+
+ /* Set the next chunk's repcodes based on the repcodes of the beginning
+ * of the last match, and the last sequence. This avoids us having to
+ * update them while traversing the sequences.
+ */
+ if (lastSequence.mlen != 0) {
+ repcodes_t const reps = ZSTD_newRep(opt[cur].rep, lastSequence.off, lastSequence.litlen==0);
+ ZSTD_memcpy(rep, &reps, sizeof(reps));
+ } else {
+ ZSTD_memcpy(rep, opt[cur].rep, sizeof(repcodes_t));
+ }
+
+ { U32 const storeEnd = cur + 1;
+ U32 storeStart = storeEnd;
+ U32 seqPos = cur;
+
+ DEBUGLOG(6, "start reverse traversal (last_pos:%u, cur:%u)",
+ last_pos, cur); (void)last_pos;
+ assert(storeEnd < ZSTD_OPT_NUM);
+ DEBUGLOG(6, "last sequence copied into pos=%u (llen=%u,mlen=%u,ofc=%u)",
+ storeEnd, lastSequence.litlen, lastSequence.mlen, lastSequence.off);
+ opt[storeEnd] = lastSequence;
+ while (seqPos > 0) {
+ U32 const backDist = ZSTD_totalLen(opt[seqPos]);
+ storeStart--;
+ DEBUGLOG(6, "sequence from rPos=%u copied into pos=%u (llen=%u,mlen=%u,ofc=%u)",
+ seqPos, storeStart, opt[seqPos].litlen, opt[seqPos].mlen, opt[seqPos].off);
+ opt[storeStart] = opt[seqPos];
+ seqPos = (seqPos > backDist) ? seqPos - backDist : 0;
+ }
+
+ /* save sequences */
+ DEBUGLOG(6, "sending selected sequences into seqStore")
+ { U32 storePos;
+ for (storePos=storeStart; storePos <= storeEnd; storePos++) {
+ U32 const llen = opt[storePos].litlen;
+ U32 const mlen = opt[storePos].mlen;
+ U32 const offCode = opt[storePos].off;
+ U32 const advance = llen + mlen;
+ DEBUGLOG(6, "considering seq starting at %zi, llen=%u, mlen=%u",
+ anchor - istart, (unsigned)llen, (unsigned)mlen);
+
+ if (mlen==0) { /* only literals => must be last "sequence", actually starting a new stream of sequences */
+ assert(storePos == storeEnd); /* must be last sequence */
+ ip = anchor + llen; /* last "sequence" is a bunch of literals => don't progress anchor */
+ continue; /* will finish */
+ }
+
+ assert(anchor + llen <= iend);
+ ZSTD_updateStats(optStatePtr, llen, anchor, offCode, mlen);
+ ZSTD_storeSeq(seqStore, llen, anchor, iend, offCode, mlen);
+ anchor += advance;
+ ip = anchor;
+ } }
+ ZSTD_setBasePrices(optStatePtr, optLevel);
+ }
+ } /* while (ip < ilimit) */
+
+ /* Return the last literals size */
+ return (size_t)(iend - anchor);
+}
+
+static size_t ZSTD_compressBlock_opt0(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ const void* src, size_t srcSize, const ZSTD_dictMode_e dictMode)
+{
+ return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 0 /* optLevel */, dictMode);
+}
+
+static size_t ZSTD_compressBlock_opt2(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ const void* src, size_t srcSize, const ZSTD_dictMode_e dictMode)
+{
+ return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 2 /* optLevel */, dictMode);
+}
+
+size_t ZSTD_compressBlock_btopt(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ const void* src, size_t srcSize)
+{
+ DEBUGLOG(5, "ZSTD_compressBlock_btopt");
+ return ZSTD_compressBlock_opt0(ms, seqStore, rep, src, srcSize, ZSTD_noDict);
+}
+
+
+
+
+/* ZSTD_initStats_ultra():
+ * make a first compression pass, just to seed stats with more accurate starting values.
+ * only works on first block, with no dictionary and no ldm.
+ * this function cannot error, hence its contract must be respected.
+ */
+static void
+ZSTD_initStats_ultra(ZSTD_matchState_t* ms,
+ seqStore_t* seqStore,
+ U32 rep[ZSTD_REP_NUM],
+ const void* src, size_t srcSize)
+{
+ U32 tmpRep[ZSTD_REP_NUM]; /* updated rep codes will sink here */
+ ZSTD_memcpy(tmpRep, rep, sizeof(tmpRep));
+
+ DEBUGLOG(4, "ZSTD_initStats_ultra (srcSize=%zu)", srcSize);
+ assert(ms->opt.litLengthSum == 0); /* first block */
+ assert(seqStore->sequences == seqStore->sequencesStart); /* no ldm */
+ assert(ms->window.dictLimit == ms->window.lowLimit); /* no dictionary */
+ assert(ms->window.dictLimit - ms->nextToUpdate <= 1); /* no prefix (note: intentional overflow, defined as 2-complement) */
+
+ ZSTD_compressBlock_opt2(ms, seqStore, tmpRep, src, srcSize, ZSTD_noDict); /* generate stats into ms->opt*/
+
+ /* invalidate first scan from history */
+ ZSTD_resetSeqStore(seqStore);
+ ms->window.base -= srcSize;
+ ms->window.dictLimit += (U32)srcSize;
+ ms->window.lowLimit = ms->window.dictLimit;
+ ms->nextToUpdate = ms->window.dictLimit;
+
+}
+
+size_t ZSTD_compressBlock_btultra(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ const void* src, size_t srcSize)
+{
+ DEBUGLOG(5, "ZSTD_compressBlock_btultra (srcSize=%zu)", srcSize);
+ return ZSTD_compressBlock_opt2(ms, seqStore, rep, src, srcSize, ZSTD_noDict);
+}
+
+size_t ZSTD_compressBlock_btultra2(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ const void* src, size_t srcSize)
+{
+ U32 const curr = (U32)((const BYTE*)src - ms->window.base);
+ DEBUGLOG(5, "ZSTD_compressBlock_btultra2 (srcSize=%zu)", srcSize);
+
+ /* 2-pass strategy:
+ * this strategy makes a first pass over first block to collect statistics
+ * and seed next round's statistics with it.
+ * After 1st pass, function forgets everything, and starts a new block.
+ * Consequently, this can only work if no data has been previously loaded in tables,
+ * aka, no dictionary, no prefix, no ldm preprocessing.
+ * The compression ratio gain is generally small (~0.5% on first block),
+ * the cost is 2x cpu time on first block. */
+ assert(srcSize <= ZSTD_BLOCKSIZE_MAX);
+ if ( (ms->opt.litLengthSum==0) /* first block */
+ && (seqStore->sequences == seqStore->sequencesStart) /* no ldm */
+ && (ms->window.dictLimit == ms->window.lowLimit) /* no dictionary */
+ && (curr == ms->window.dictLimit) /* start of frame, nothing already loaded nor skipped */
+ && (srcSize > ZSTD_PREDEF_THRESHOLD)
+ ) {
+ ZSTD_initStats_ultra(ms, seqStore, rep, src, srcSize);
+ }
+
+ return ZSTD_compressBlock_opt2(ms, seqStore, rep, src, srcSize, ZSTD_noDict);
+}
+
+size_t ZSTD_compressBlock_btopt_dictMatchState(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ const void* src, size_t srcSize)
+{
+ return ZSTD_compressBlock_opt0(ms, seqStore, rep, src, srcSize, ZSTD_dictMatchState);
+}
+
+size_t ZSTD_compressBlock_btultra_dictMatchState(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ const void* src, size_t srcSize)
+{
+ return ZSTD_compressBlock_opt2(ms, seqStore, rep, src, srcSize, ZSTD_dictMatchState);
+}
+
+size_t ZSTD_compressBlock_btopt_extDict(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ const void* src, size_t srcSize)
+{
+ return ZSTD_compressBlock_opt0(ms, seqStore, rep, src, srcSize, ZSTD_extDict);
+}
+
+size_t ZSTD_compressBlock_btultra_extDict(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ const void* src, size_t srcSize)
+{
+ return ZSTD_compressBlock_opt2(ms, seqStore, rep, src, srcSize, ZSTD_extDict);
+}
+
+/* note : no btultra2 variant for extDict nor dictMatchState,
+ * because btultra2 is not meant to work with dictionaries
+ * and is only specific for the first block (no prefix) */
diff --git a/lib/zstd/compress/zstd_opt.h b/lib/zstd/compress/zstd_opt.h
new file mode 100644
index 000000000000..22b862858ba7
--- /dev/null
+++ b/lib/zstd/compress/zstd_opt.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#ifndef ZSTD_OPT_H
+#define ZSTD_OPT_H
+
+
+#include "zstd_compress_internal.h"
+
+/* used in ZSTD_loadDictionaryContent() */
+void ZSTD_updateTree(ZSTD_matchState_t* ms, const BYTE* ip, const BYTE* iend);
+
+size_t ZSTD_compressBlock_btopt(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+size_t ZSTD_compressBlock_btultra(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+size_t ZSTD_compressBlock_btultra2(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+
+
+size_t ZSTD_compressBlock_btopt_dictMatchState(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+size_t ZSTD_compressBlock_btultra_dictMatchState(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+
+size_t ZSTD_compressBlock_btopt_extDict(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+size_t ZSTD_compressBlock_btultra_extDict(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+
+ /* note : no btultra2 variant for extDict nor dictMatchState,
+ * because btultra2 is not meant to work with dictionaries
+ * and is only specific for the first block (no prefix) */
+
+
+#endif /* ZSTD_OPT_H */
diff --git a/lib/zstd/decompress.c b/lib/zstd/decompress.c
deleted file mode 100644
index 269ee9a796c1..000000000000
--- a/lib/zstd/decompress.c
+++ /dev/null
@@ -1,2531 +0,0 @@
-/**
- * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
- * All rights reserved.
- *
- * This source code is licensed under the BSD-style license found in the
- * LICENSE file in the root directory of https://github.com/facebook/zstd.
- * An additional grant of patent rights can be found in the PATENTS file in the
- * same directory.
- *
- * This program is free software; you can redistribute it and/or modify it under
- * the terms of the GNU General Public License version 2 as published by the
- * Free Software Foundation. This program is dual-licensed; you may select
- * either version 2 of the GNU General Public License ("GPL") or BSD license
- * ("BSD").
- */
-
-/* ***************************************************************
-* Tuning parameters
-*****************************************************************/
-/*!
-* MAXWINDOWSIZE_DEFAULT :
-* maximum window size accepted by DStream, by default.
-* Frames requiring more memory will be rejected.
-*/
-#ifndef ZSTD_MAXWINDOWSIZE_DEFAULT
-#define ZSTD_MAXWINDOWSIZE_DEFAULT ((1 << ZSTD_WINDOWLOG_MAX) + 1) /* defined within zstd.h */
-#endif
-
-/*-*******************************************************
-* Dependencies
-*********************************************************/
-#include "fse.h"
-#include "huf.h"
-#include "mem.h" /* low level memory routines */
-#include "zstd_internal.h"
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/string.h> /* memcpy, memmove, memset */
-
-#define ZSTD_PREFETCH(ptr) __builtin_prefetch(ptr, 0, 0)
-
-/*-*************************************
-* Macros
-***************************************/
-#define ZSTD_isError ERR_isError /* for inlining */
-#define FSE_isError ERR_isError
-#define HUF_isError ERR_isError
-
-/*_*******************************************************
-* Memory operations
-**********************************************************/
-static void ZSTD_copy4(void *dst, const void *src) { memcpy(dst, src, 4); }
-
-/*-*************************************************************
-* Context management
-***************************************************************/
-typedef enum {
- ZSTDds_getFrameHeaderSize,
- ZSTDds_decodeFrameHeader,
- ZSTDds_decodeBlockHeader,
- ZSTDds_decompressBlock,
- ZSTDds_decompressLastBlock,
- ZSTDds_checkChecksum,
- ZSTDds_decodeSkippableHeader,
- ZSTDds_skipFrame
-} ZSTD_dStage;
-
-typedef struct {
- FSE_DTable LLTable[FSE_DTABLE_SIZE_U32(LLFSELog)];
- FSE_DTable OFTable[FSE_DTABLE_SIZE_U32(OffFSELog)];
- FSE_DTable MLTable[FSE_DTABLE_SIZE_U32(MLFSELog)];
- HUF_DTable hufTable[HUF_DTABLE_SIZE(HufLog)]; /* can accommodate HUF_decompress4X */
- U64 workspace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32 / 2];
- U32 rep[ZSTD_REP_NUM];
-} ZSTD_entropyTables_t;
-
-struct ZSTD_DCtx_s {
- const FSE_DTable *LLTptr;
- const FSE_DTable *MLTptr;
- const FSE_DTable *OFTptr;
- const HUF_DTable *HUFptr;
- ZSTD_entropyTables_t entropy;
- const void *previousDstEnd; /* detect continuity */
- const void *base; /* start of curr segment */
- const void *vBase; /* virtual start of previous segment if it was just before curr one */
- const void *dictEnd; /* end of previous segment */
- size_t expected;
- ZSTD_frameParams fParams;
- blockType_e bType; /* used in ZSTD_decompressContinue(), to transfer blockType between header decoding and block decoding stages */
- ZSTD_dStage stage;
- U32 litEntropy;
- U32 fseEntropy;
- struct xxh64_state xxhState;
- size_t headerSize;
- U32 dictID;
- const BYTE *litPtr;
- ZSTD_customMem customMem;
- size_t litSize;
- size_t rleSize;
- BYTE litBuffer[ZSTD_BLOCKSIZE_ABSOLUTEMAX + WILDCOPY_OVERLENGTH];
- BYTE headerBuffer[ZSTD_FRAMEHEADERSIZE_MAX];
-}; /* typedef'd to ZSTD_DCtx within "zstd.h" */
-
-size_t ZSTD_DCtxWorkspaceBound(void) { return ZSTD_ALIGN(sizeof(ZSTD_stack)) + ZSTD_ALIGN(sizeof(ZSTD_DCtx)); }
-
-size_t ZSTD_decompressBegin(ZSTD_DCtx *dctx)
-{
- dctx->expected = ZSTD_frameHeaderSize_prefix;
- dctx->stage = ZSTDds_getFrameHeaderSize;
- dctx->previousDstEnd = NULL;
- dctx->base = NULL;
- dctx->vBase = NULL;
- dctx->dictEnd = NULL;
- dctx->entropy.hufTable[0] = (HUF_DTable)((HufLog)*0x1000001); /* cover both little and big endian */
- dctx->litEntropy = dctx->fseEntropy = 0;
- dctx->dictID = 0;
- ZSTD_STATIC_ASSERT(sizeof(dctx->entropy.rep) == sizeof(repStartValue));
- memcpy(dctx->entropy.rep, repStartValue, sizeof(repStartValue)); /* initial repcodes */
- dctx->LLTptr = dctx->entropy.LLTable;
- dctx->MLTptr = dctx->entropy.MLTable;
- dctx->OFTptr = dctx->entropy.OFTable;
- dctx->HUFptr = dctx->entropy.hufTable;
- return 0;
-}
-
-ZSTD_DCtx *ZSTD_createDCtx_advanced(ZSTD_customMem customMem)
-{
- ZSTD_DCtx *dctx;
-
- if (!customMem.customAlloc || !customMem.customFree)
- return NULL;
-
- dctx = (ZSTD_DCtx *)ZSTD_malloc(sizeof(ZSTD_DCtx), customMem);
- if (!dctx)
- return NULL;
- memcpy(&dctx->customMem, &customMem, sizeof(customMem));
- ZSTD_decompressBegin(dctx);
- return dctx;
-}
-
-ZSTD_DCtx *ZSTD_initDCtx(void *workspace, size_t workspaceSize)
-{
- ZSTD_customMem const stackMem = ZSTD_initStack(workspace, workspaceSize);
- return ZSTD_createDCtx_advanced(stackMem);
-}
-
-size_t ZSTD_freeDCtx(ZSTD_DCtx *dctx)
-{
- if (dctx == NULL)
- return 0; /* support free on NULL */
- ZSTD_free(dctx, dctx->customMem);
- return 0; /* reserved as a potential error code in the future */
-}
-
-void ZSTD_copyDCtx(ZSTD_DCtx *dstDCtx, const ZSTD_DCtx *srcDCtx)
-{
- size_t const workSpaceSize = (ZSTD_BLOCKSIZE_ABSOLUTEMAX + WILDCOPY_OVERLENGTH) + ZSTD_frameHeaderSize_max;
- memcpy(dstDCtx, srcDCtx, sizeof(ZSTD_DCtx) - workSpaceSize); /* no need to copy workspace */
-}
-
-static void ZSTD_refDDict(ZSTD_DCtx *dstDCtx, const ZSTD_DDict *ddict);
-
-/*-*************************************************************
-* Decompression section
-***************************************************************/
-
-/*! ZSTD_isFrame() :
- * Tells if the content of `buffer` starts with a valid Frame Identifier.
- * Note : Frame Identifier is 4 bytes. If `size < 4`, @return will always be 0.
- * Note 2 : Legacy Frame Identifiers are considered valid only if Legacy Support is enabled.
- * Note 3 : Skippable Frame Identifiers are considered valid. */
-unsigned ZSTD_isFrame(const void *buffer, size_t size)
-{
- if (size < 4)
- return 0;
- {
- U32 const magic = ZSTD_readLE32(buffer);
- if (magic == ZSTD_MAGICNUMBER)
- return 1;
- if ((magic & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START)
- return 1;
- }
- return 0;
-}
-
-/** ZSTD_frameHeaderSize() :
-* srcSize must be >= ZSTD_frameHeaderSize_prefix.
-* @return : size of the Frame Header */
-static size_t ZSTD_frameHeaderSize(const void *src, size_t srcSize)
-{
- if (srcSize < ZSTD_frameHeaderSize_prefix)
- return ERROR(srcSize_wrong);
- {
- BYTE const fhd = ((const BYTE *)src)[4];
- U32 const dictID = fhd & 3;
- U32 const singleSegment = (fhd >> 5) & 1;
- U32 const fcsId = fhd >> 6;
- return ZSTD_frameHeaderSize_prefix + !singleSegment + ZSTD_did_fieldSize[dictID] + ZSTD_fcs_fieldSize[fcsId] + (singleSegment && !fcsId);
- }
-}
-
-/** ZSTD_getFrameParams() :
-* decode Frame Header, or require larger `srcSize`.
-* @return : 0, `fparamsPtr` is correctly filled,
-* >0, `srcSize` is too small, result is expected `srcSize`,
-* or an error code, which can be tested using ZSTD_isError() */
-size_t ZSTD_getFrameParams(ZSTD_frameParams *fparamsPtr, const void *src, size_t srcSize)
-{
- const BYTE *ip = (const BYTE *)src;
-
- if (srcSize < ZSTD_frameHeaderSize_prefix)
- return ZSTD_frameHeaderSize_prefix;
- if (ZSTD_readLE32(src) != ZSTD_MAGICNUMBER) {
- if ((ZSTD_readLE32(src) & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START) {
- if (srcSize < ZSTD_skippableHeaderSize)
- return ZSTD_skippableHeaderSize; /* magic number + skippable frame length */
- memset(fparamsPtr, 0, sizeof(*fparamsPtr));
- fparamsPtr->frameContentSize = ZSTD_readLE32((const char *)src + 4);
- fparamsPtr->windowSize = 0; /* windowSize==0 means a frame is skippable */
- return 0;
- }
- return ERROR(prefix_unknown);
- }
-
- /* ensure there is enough `srcSize` to fully read/decode frame header */
- {
- size_t const fhsize = ZSTD_frameHeaderSize(src, srcSize);
- if (srcSize < fhsize)
- return fhsize;
- }
-
- {
- BYTE const fhdByte = ip[4];
- size_t pos = 5;
- U32 const dictIDSizeCode = fhdByte & 3;
- U32 const checksumFlag = (fhdByte >> 2) & 1;
- U32 const singleSegment = (fhdByte >> 5) & 1;
- U32 const fcsID = fhdByte >> 6;
- U32 const windowSizeMax = 1U << ZSTD_WINDOWLOG_MAX;
- U32 windowSize = 0;
- U32 dictID = 0;
- U64 frameContentSize = 0;
- if ((fhdByte & 0x08) != 0)
- return ERROR(frameParameter_unsupported); /* reserved bits, which must be zero */
- if (!singleSegment) {
- BYTE const wlByte = ip[pos++];
- U32 const windowLog = (wlByte >> 3) + ZSTD_WINDOWLOG_ABSOLUTEMIN;
- if (windowLog > ZSTD_WINDOWLOG_MAX)
- return ERROR(frameParameter_windowTooLarge); /* avoids issue with 1 << windowLog */
- windowSize = (1U << windowLog);
- windowSize += (windowSize >> 3) * (wlByte & 7);
- }
-
- switch (dictIDSizeCode) {
- default: /* impossible */
- case 0: break;
- case 1:
- dictID = ip[pos];
- pos++;
- break;
- case 2:
- dictID = ZSTD_readLE16(ip + pos);
- pos += 2;
- break;
- case 3:
- dictID = ZSTD_readLE32(ip + pos);
- pos += 4;
- break;
- }
- switch (fcsID) {
- default: /* impossible */
- case 0:
- if (singleSegment)
- frameContentSize = ip[pos];
- break;
- case 1: frameContentSize = ZSTD_readLE16(ip + pos) + 256; break;
- case 2: frameContentSize = ZSTD_readLE32(ip + pos); break;
- case 3: frameContentSize = ZSTD_readLE64(ip + pos); break;
- }
- if (!windowSize)
- windowSize = (U32)frameContentSize;
- if (windowSize > windowSizeMax)
- return ERROR(frameParameter_windowTooLarge);
- fparamsPtr->frameContentSize = frameContentSize;
- fparamsPtr->windowSize = windowSize;
- fparamsPtr->dictID = dictID;
- fparamsPtr->checksumFlag = checksumFlag;
- }
- return 0;
-}
-
-/** ZSTD_getFrameContentSize() :
-* compatible with legacy mode
-* @return : decompressed size of the single frame pointed to be `src` if known, otherwise
-* - ZSTD_CONTENTSIZE_UNKNOWN if the size cannot be determined
-* - ZSTD_CONTENTSIZE_ERROR if an error occurred (e.g. invalid magic number, srcSize too small) */
-unsigned long long ZSTD_getFrameContentSize(const void *src, size_t srcSize)
-{
- {
- ZSTD_frameParams fParams;
- if (ZSTD_getFrameParams(&fParams, src, srcSize) != 0)
- return ZSTD_CONTENTSIZE_ERROR;
- if (fParams.windowSize == 0) {
- /* Either skippable or empty frame, size == 0 either way */
- return 0;
- } else if (fParams.frameContentSize != 0) {
- return fParams.frameContentSize;
- } else {
- return ZSTD_CONTENTSIZE_UNKNOWN;
- }
- }
-}
-
-/** ZSTD_findDecompressedSize() :
- * compatible with legacy mode
- * `srcSize` must be the exact length of some number of ZSTD compressed and/or
- * skippable frames
- * @return : decompressed size of the frames contained */
-unsigned long long ZSTD_findDecompressedSize(const void *src, size_t srcSize)
-{
- {
- unsigned long long totalDstSize = 0;
- while (srcSize >= ZSTD_frameHeaderSize_prefix) {
- const U32 magicNumber = ZSTD_readLE32(src);
-
- if ((magicNumber & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START) {
- size_t skippableSize;
- if (srcSize < ZSTD_skippableHeaderSize)
- return ERROR(srcSize_wrong);
- skippableSize = ZSTD_readLE32((const BYTE *)src + 4) + ZSTD_skippableHeaderSize;
- if (srcSize < skippableSize) {
- return ZSTD_CONTENTSIZE_ERROR;
- }
-
- src = (const BYTE *)src + skippableSize;
- srcSize -= skippableSize;
- continue;
- }
-
- {
- unsigned long long const ret = ZSTD_getFrameContentSize(src, srcSize);
- if (ret >= ZSTD_CONTENTSIZE_ERROR)
- return ret;
-
- /* check for overflow */
- if (totalDstSize + ret < totalDstSize)
- return ZSTD_CONTENTSIZE_ERROR;
- totalDstSize += ret;
- }
- {
- size_t const frameSrcSize = ZSTD_findFrameCompressedSize(src, srcSize);
- if (ZSTD_isError(frameSrcSize)) {
- return ZSTD_CONTENTSIZE_ERROR;
- }
-
- src = (const BYTE *)src + frameSrcSize;
- srcSize -= frameSrcSize;
- }
- }
-
- if (srcSize) {
- return ZSTD_CONTENTSIZE_ERROR;
- }
-
- return totalDstSize;
- }
-}
-
-/** ZSTD_decodeFrameHeader() :
-* `headerSize` must be the size provided by ZSTD_frameHeaderSize().
-* @return : 0 if success, or an error code, which can be tested using ZSTD_isError() */
-static size_t ZSTD_decodeFrameHeader(ZSTD_DCtx *dctx, const void *src, size_t headerSize)
-{
- size_t const result = ZSTD_getFrameParams(&(dctx->fParams), src, headerSize);
- if (ZSTD_isError(result))
- return result; /* invalid header */
- if (result > 0)
- return ERROR(srcSize_wrong); /* headerSize too small */
- if (dctx->fParams.dictID && (dctx->dictID != dctx->fParams.dictID))
- return ERROR(dictionary_wrong);
- if (dctx->fParams.checksumFlag)
- xxh64_reset(&dctx->xxhState, 0);
- return 0;
-}
-
-typedef struct {
- blockType_e blockType;
- U32 lastBlock;
- U32 origSize;
-} blockProperties_t;
-
-/*! ZSTD_getcBlockSize() :
-* Provides the size of compressed block from block header `src` */
-size_t ZSTD_getcBlockSize(const void *src, size_t srcSize, blockProperties_t *bpPtr)
-{
- if (srcSize < ZSTD_blockHeaderSize)
- return ERROR(srcSize_wrong);
- {
- U32 const cBlockHeader = ZSTD_readLE24(src);
- U32 const cSize = cBlockHeader >> 3;
- bpPtr->lastBlock = cBlockHeader & 1;
- bpPtr->blockType = (blockType_e)((cBlockHeader >> 1) & 3);
- bpPtr->origSize = cSize; /* only useful for RLE */
- if (bpPtr->blockType == bt_rle)
- return 1;
- if (bpPtr->blockType == bt_reserved)
- return ERROR(corruption_detected);
- return cSize;
- }
-}
-
-static size_t ZSTD_copyRawBlock(void *dst, size_t dstCapacity, const void *src, size_t srcSize)
-{
- if (srcSize > dstCapacity)
- return ERROR(dstSize_tooSmall);
- memcpy(dst, src, srcSize);
- return srcSize;
-}
-
-static size_t ZSTD_setRleBlock(void *dst, size_t dstCapacity, const void *src, size_t srcSize, size_t regenSize)
-{
- if (srcSize != 1)
- return ERROR(srcSize_wrong);
- if (regenSize > dstCapacity)
- return ERROR(dstSize_tooSmall);
- memset(dst, *(const BYTE *)src, regenSize);
- return regenSize;
-}
-
-/*! ZSTD_decodeLiteralsBlock() :
- @return : nb of bytes read from src (< srcSize ) */
-size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx *dctx, const void *src, size_t srcSize) /* note : srcSize < BLOCKSIZE */
-{
- if (srcSize < MIN_CBLOCK_SIZE)
- return ERROR(corruption_detected);
-
- {
- const BYTE *const istart = (const BYTE *)src;
- symbolEncodingType_e const litEncType = (symbolEncodingType_e)(istart[0] & 3);
-
- switch (litEncType) {
- case set_repeat:
- if (dctx->litEntropy == 0)
- return ERROR(dictionary_corrupted);
- /* fall-through */
- case set_compressed:
- if (srcSize < 5)
- return ERROR(corruption_detected); /* srcSize >= MIN_CBLOCK_SIZE == 3; here we need up to 5 for case 3 */
- {
- size_t lhSize, litSize, litCSize;
- U32 singleStream = 0;
- U32 const lhlCode = (istart[0] >> 2) & 3;
- U32 const lhc = ZSTD_readLE32(istart);
- switch (lhlCode) {
- case 0:
- case 1:
- default: /* note : default is impossible, since lhlCode into [0..3] */
- /* 2 - 2 - 10 - 10 */
- singleStream = !lhlCode;
- lhSize = 3;
- litSize = (lhc >> 4) & 0x3FF;
- litCSize = (lhc >> 14) & 0x3FF;
- break;
- case 2:
- /* 2 - 2 - 14 - 14 */
- lhSize = 4;
- litSize = (lhc >> 4) & 0x3FFF;
- litCSize = lhc >> 18;
- break;
- case 3:
- /* 2 - 2 - 18 - 18 */
- lhSize = 5;
- litSize = (lhc >> 4) & 0x3FFFF;
- litCSize = (lhc >> 22) + (istart[4] << 10);
- break;
- }
- if (litSize > ZSTD_BLOCKSIZE_ABSOLUTEMAX)
- return ERROR(corruption_detected);
- if (litCSize + lhSize > srcSize)
- return ERROR(corruption_detected);
-
- if (HUF_isError(
- (litEncType == set_repeat)
- ? (singleStream ? HUF_decompress1X_usingDTable(dctx->litBuffer, litSize, istart + lhSize, litCSize, dctx->HUFptr)
- : HUF_decompress4X_usingDTable(dctx->litBuffer, litSize, istart + lhSize, litCSize, dctx->HUFptr))
- : (singleStream
- ? HUF_decompress1X2_DCtx_wksp(dctx->entropy.hufTable, dctx->litBuffer, litSize, istart + lhSize, litCSize,
- dctx->entropy.workspace, sizeof(dctx->entropy.workspace))
- : HUF_decompress4X_hufOnly_wksp(dctx->entropy.hufTable, dctx->litBuffer, litSize, istart + lhSize, litCSize,
- dctx->entropy.workspace, sizeof(dctx->entropy.workspace)))))
- return ERROR(corruption_detected);
-
- dctx->litPtr = dctx->litBuffer;
- dctx->litSize = litSize;
- dctx->litEntropy = 1;
- if (litEncType == set_compressed)
- dctx->HUFptr = dctx->entropy.hufTable;
- memset(dctx->litBuffer + dctx->litSize, 0, WILDCOPY_OVERLENGTH);
- return litCSize + lhSize;
- }
-
- case set_basic: {
- size_t litSize, lhSize;
- U32 const lhlCode = ((istart[0]) >> 2) & 3;
- switch (lhlCode) {
- case 0:
- case 2:
- default: /* note : default is impossible, since lhlCode into [0..3] */
- lhSize = 1;
- litSize = istart[0] >> 3;
- break;
- case 1:
- lhSize = 2;
- litSize = ZSTD_readLE16(istart) >> 4;
- break;
- case 3:
- lhSize = 3;
- litSize = ZSTD_readLE24(istart) >> 4;
- break;
- }
-
- if (lhSize + litSize + WILDCOPY_OVERLENGTH > srcSize) { /* risk reading beyond src buffer with wildcopy */
- if (litSize + lhSize > srcSize)
- return ERROR(corruption_detected);
- memcpy(dctx->litBuffer, istart + lhSize, litSize);
- dctx->litPtr = dctx->litBuffer;
- dctx->litSize = litSize;
- memset(dctx->litBuffer + dctx->litSize, 0, WILDCOPY_OVERLENGTH);
- return lhSize + litSize;
- }
- /* direct reference into compressed stream */
- dctx->litPtr = istart + lhSize;
- dctx->litSize = litSize;
- return lhSize + litSize;
- }
-
- case set_rle: {
- U32 const lhlCode = ((istart[0]) >> 2) & 3;
- size_t litSize, lhSize;
- switch (lhlCode) {
- case 0:
- case 2:
- default: /* note : default is impossible, since lhlCode into [0..3] */
- lhSize = 1;
- litSize = istart[0] >> 3;
- break;
- case 1:
- lhSize = 2;
- litSize = ZSTD_readLE16(istart) >> 4;
- break;
- case 3:
- lhSize = 3;
- litSize = ZSTD_readLE24(istart) >> 4;
- if (srcSize < 4)
- return ERROR(corruption_detected); /* srcSize >= MIN_CBLOCK_SIZE == 3; here we need lhSize+1 = 4 */
- break;
- }
- if (litSize > ZSTD_BLOCKSIZE_ABSOLUTEMAX)
- return ERROR(corruption_detected);
- memset(dctx->litBuffer, istart[lhSize], litSize + WILDCOPY_OVERLENGTH);
- dctx->litPtr = dctx->litBuffer;
- dctx->litSize = litSize;
- return lhSize + 1;
- }
- default:
- return ERROR(corruption_detected); /* impossible */
- }
- }
-}
-
-typedef union {
- FSE_decode_t realData;
- U32 alignedBy4;
-} FSE_decode_t4;
-
-static const FSE_decode_t4 LL_defaultDTable[(1 << LL_DEFAULTNORMLOG) + 1] = {
- {{LL_DEFAULTNORMLOG, 1, 1}}, /* header : tableLog, fastMode, fastMode */
- {{0, 0, 4}}, /* 0 : base, symbol, bits */
- {{16, 0, 4}},
- {{32, 1, 5}},
- {{0, 3, 5}},
- {{0, 4, 5}},
- {{0, 6, 5}},
- {{0, 7, 5}},
- {{0, 9, 5}},
- {{0, 10, 5}},
- {{0, 12, 5}},
- {{0, 14, 6}},
- {{0, 16, 5}},
- {{0, 18, 5}},
- {{0, 19, 5}},
- {{0, 21, 5}},
- {{0, 22, 5}},
- {{0, 24, 5}},
- {{32, 25, 5}},
- {{0, 26, 5}},
- {{0, 27, 6}},
- {{0, 29, 6}},
- {{0, 31, 6}},
- {{32, 0, 4}},
- {{0, 1, 4}},
- {{0, 2, 5}},
- {{32, 4, 5}},
- {{0, 5, 5}},
- {{32, 7, 5}},
- {{0, 8, 5}},
- {{32, 10, 5}},
- {{0, 11, 5}},
- {{0, 13, 6}},
- {{32, 16, 5}},
- {{0, 17, 5}},
- {{32, 19, 5}},
- {{0, 20, 5}},
- {{32, 22, 5}},
- {{0, 23, 5}},
- {{0, 25, 4}},
- {{16, 25, 4}},
- {{32, 26, 5}},
- {{0, 28, 6}},
- {{0, 30, 6}},
- {{48, 0, 4}},
- {{16, 1, 4}},
- {{32, 2, 5}},
- {{32, 3, 5}},
- {{32, 5, 5}},
- {{32, 6, 5}},
- {{32, 8, 5}},
- {{32, 9, 5}},
- {{32, 11, 5}},
- {{32, 12, 5}},
- {{0, 15, 6}},
- {{32, 17, 5}},
- {{32, 18, 5}},
- {{32, 20, 5}},
- {{32, 21, 5}},
- {{32, 23, 5}},
- {{32, 24, 5}},
- {{0, 35, 6}},
- {{0, 34, 6}},
- {{0, 33, 6}},
- {{0, 32, 6}},
-}; /* LL_defaultDTable */
-
-static const FSE_decode_t4 ML_defaultDTable[(1 << ML_DEFAULTNORMLOG) + 1] = {
- {{ML_DEFAULTNORMLOG, 1, 1}}, /* header : tableLog, fastMode, fastMode */
- {{0, 0, 6}}, /* 0 : base, symbol, bits */
- {{0, 1, 4}},
- {{32, 2, 5}},
- {{0, 3, 5}},
- {{0, 5, 5}},
- {{0, 6, 5}},
- {{0, 8, 5}},
- {{0, 10, 6}},
- {{0, 13, 6}},
- {{0, 16, 6}},
- {{0, 19, 6}},
- {{0, 22, 6}},
- {{0, 25, 6}},
- {{0, 28, 6}},
- {{0, 31, 6}},
- {{0, 33, 6}},
- {{0, 35, 6}},
- {{0, 37, 6}},
- {{0, 39, 6}},
- {{0, 41, 6}},
- {{0, 43, 6}},
- {{0, 45, 6}},
- {{16, 1, 4}},
- {{0, 2, 4}},
- {{32, 3, 5}},
- {{0, 4, 5}},
- {{32, 6, 5}},
- {{0, 7, 5}},
- {{0, 9, 6}},
- {{0, 12, 6}},
- {{0, 15, 6}},
- {{0, 18, 6}},
- {{0, 21, 6}},
- {{0, 24, 6}},
- {{0, 27, 6}},
- {{0, 30, 6}},
- {{0, 32, 6}},
- {{0, 34, 6}},
- {{0, 36, 6}},
- {{0, 38, 6}},
- {{0, 40, 6}},
- {{0, 42, 6}},
- {{0, 44, 6}},
- {{32, 1, 4}},
- {{48, 1, 4}},
- {{16, 2, 4}},
- {{32, 4, 5}},
- {{32, 5, 5}},
- {{32, 7, 5}},
- {{32, 8, 5}},
- {{0, 11, 6}},
- {{0, 14, 6}},
- {{0, 17, 6}},
- {{0, 20, 6}},
- {{0, 23, 6}},
- {{0, 26, 6}},
- {{0, 29, 6}},
- {{0, 52, 6}},
- {{0, 51, 6}},
- {{0, 50, 6}},
- {{0, 49, 6}},
- {{0, 48, 6}},
- {{0, 47, 6}},
- {{0, 46, 6}},
-}; /* ML_defaultDTable */
-
-static const FSE_decode_t4 OF_defaultDTable[(1 << OF_DEFAULTNORMLOG) + 1] = {
- {{OF_DEFAULTNORMLOG, 1, 1}}, /* header : tableLog, fastMode, fastMode */
- {{0, 0, 5}}, /* 0 : base, symbol, bits */
- {{0, 6, 4}},
- {{0, 9, 5}},
- {{0, 15, 5}},
- {{0, 21, 5}},
- {{0, 3, 5}},
- {{0, 7, 4}},
- {{0, 12, 5}},
- {{0, 18, 5}},
- {{0, 23, 5}},
- {{0, 5, 5}},
- {{0, 8, 4}},
- {{0, 14, 5}},
- {{0, 20, 5}},
- {{0, 2, 5}},
- {{16, 7, 4}},
- {{0, 11, 5}},
- {{0, 17, 5}},
- {{0, 22, 5}},
- {{0, 4, 5}},
- {{16, 8, 4}},
- {{0, 13, 5}},
- {{0, 19, 5}},
- {{0, 1, 5}},
- {{16, 6, 4}},
- {{0, 10, 5}},
- {{0, 16, 5}},
- {{0, 28, 5}},
- {{0, 27, 5}},
- {{0, 26, 5}},
- {{0, 25, 5}},
- {{0, 24, 5}},
-}; /* OF_defaultDTable */
-
-/*! ZSTD_buildSeqTable() :
- @return : nb bytes read from src,
- or an error code if it fails, testable with ZSTD_isError()
-*/
-static size_t ZSTD_buildSeqTable(FSE_DTable *DTableSpace, const FSE_DTable **DTablePtr, symbolEncodingType_e type, U32 max, U32 maxLog, const void *src,
- size_t srcSize, const FSE_decode_t4 *defaultTable, U32 flagRepeatTable, void *workspace, size_t workspaceSize)
-{
- const void *const tmpPtr = defaultTable; /* bypass strict aliasing */
- switch (type) {
- case set_rle:
- if (!srcSize)
- return ERROR(srcSize_wrong);
- if ((*(const BYTE *)src) > max)
- return ERROR(corruption_detected);
- FSE_buildDTable_rle(DTableSpace, *(const BYTE *)src);
- *DTablePtr = DTableSpace;
- return 1;
- case set_basic: *DTablePtr = (const FSE_DTable *)tmpPtr; return 0;
- case set_repeat:
- if (!flagRepeatTable)
- return ERROR(corruption_detected);
- return 0;
- default: /* impossible */
- case set_compressed: {
- U32 tableLog;
- S16 *norm = (S16 *)workspace;
- size_t const spaceUsed32 = ALIGN(sizeof(S16) * (MaxSeq + 1), sizeof(U32)) >> 2;
-
- if ((spaceUsed32 << 2) > workspaceSize)
- return ERROR(GENERIC);
- workspace = (U32 *)workspace + spaceUsed32;
- workspaceSize -= (spaceUsed32 << 2);
- {
- size_t const headerSize = FSE_readNCount(norm, &max, &tableLog, src, srcSize);
- if (FSE_isError(headerSize))
- return ERROR(corruption_detected);
- if (tableLog > maxLog)
- return ERROR(corruption_detected);
- FSE_buildDTable_wksp(DTableSpace, norm, max, tableLog, workspace, workspaceSize);
- *DTablePtr = DTableSpace;
- return headerSize;
- }
- }
- }
-}
-
-size_t ZSTD_decodeSeqHeaders(ZSTD_DCtx *dctx, int *nbSeqPtr, const void *src, size_t srcSize)
-{
- const BYTE *const istart = (const BYTE *const)src;
- const BYTE *const iend = istart + srcSize;
- const BYTE *ip = istart;
-
- /* check */
- if (srcSize < MIN_SEQUENCES_SIZE)
- return ERROR(srcSize_wrong);
-
- /* SeqHead */
- {
- int nbSeq = *ip++;
- if (!nbSeq) {
- *nbSeqPtr = 0;
- return 1;
- }
- if (nbSeq > 0x7F) {
- if (nbSeq == 0xFF) {
- if (ip + 2 > iend)
- return ERROR(srcSize_wrong);
- nbSeq = ZSTD_readLE16(ip) + LONGNBSEQ, ip += 2;
- } else {
- if (ip >= iend)
- return ERROR(srcSize_wrong);
- nbSeq = ((nbSeq - 0x80) << 8) + *ip++;
- }
- }
- *nbSeqPtr = nbSeq;
- }
-
- /* FSE table descriptors */
- if (ip + 4 > iend)
- return ERROR(srcSize_wrong); /* minimum possible size */
- {
- symbolEncodingType_e const LLtype = (symbolEncodingType_e)(*ip >> 6);
- symbolEncodingType_e const OFtype = (symbolEncodingType_e)((*ip >> 4) & 3);
- symbolEncodingType_e const MLtype = (symbolEncodingType_e)((*ip >> 2) & 3);
- ip++;
-
- /* Build DTables */
- {
- size_t const llhSize = ZSTD_buildSeqTable(dctx->entropy.LLTable, &dctx->LLTptr, LLtype, MaxLL, LLFSELog, ip, iend - ip,
- LL_defaultDTable, dctx->fseEntropy, dctx->entropy.workspace, sizeof(dctx->entropy.workspace));
- if (ZSTD_isError(llhSize))
- return ERROR(corruption_detected);
- ip += llhSize;
- }
- {
- size_t const ofhSize = ZSTD_buildSeqTable(dctx->entropy.OFTable, &dctx->OFTptr, OFtype, MaxOff, OffFSELog, ip, iend - ip,
- OF_defaultDTable, dctx->fseEntropy, dctx->entropy.workspace, sizeof(dctx->entropy.workspace));
- if (ZSTD_isError(ofhSize))
- return ERROR(corruption_detected);
- ip += ofhSize;
- }
- {
- size_t const mlhSize = ZSTD_buildSeqTable(dctx->entropy.MLTable, &dctx->MLTptr, MLtype, MaxML, MLFSELog, ip, iend - ip,
- ML_defaultDTable, dctx->fseEntropy, dctx->entropy.workspace, sizeof(dctx->entropy.workspace));
- if (ZSTD_isError(mlhSize))
- return ERROR(corruption_detected);
- ip += mlhSize;
- }
- }
-
- return ip - istart;
-}
-
-typedef struct {
- size_t litLength;
- size_t matchLength;
- size_t offset;
- const BYTE *match;
-} seq_t;
-
-typedef struct {
- BIT_DStream_t DStream;
- FSE_DState_t stateLL;
- FSE_DState_t stateOffb;
- FSE_DState_t stateML;
- size_t prevOffset[ZSTD_REP_NUM];
- const BYTE *base;
- size_t pos;
- uPtrDiff gotoDict;
-} seqState_t;
-
-FORCE_NOINLINE
-size_t ZSTD_execSequenceLast7(BYTE *op, BYTE *const oend, seq_t sequence, const BYTE **litPtr, const BYTE *const litLimit, const BYTE *const base,
- const BYTE *const vBase, const BYTE *const dictEnd)
-{
- BYTE *const oLitEnd = op + sequence.litLength;
- size_t const sequenceLength = sequence.litLength + sequence.matchLength;
- BYTE *const oMatchEnd = op + sequenceLength; /* risk : address space overflow (32-bits) */
- BYTE *const oend_w = oend - WILDCOPY_OVERLENGTH;
- const BYTE *const iLitEnd = *litPtr + sequence.litLength;
- const BYTE *match = oLitEnd - sequence.offset;
-
- /* check */
- if (oMatchEnd > oend)
- return ERROR(dstSize_tooSmall); /* last match must start at a minimum distance of WILDCOPY_OVERLENGTH from oend */
- if (iLitEnd > litLimit)
- return ERROR(corruption_detected); /* over-read beyond lit buffer */
- if (oLitEnd <= oend_w)
- return ERROR(GENERIC); /* Precondition */
-
- /* copy literals */
- if (op < oend_w) {
- ZSTD_wildcopy(op, *litPtr, oend_w - op);
- *litPtr += oend_w - op;
- op = oend_w;
- }
- while (op < oLitEnd)
- *op++ = *(*litPtr)++;
-
- /* copy Match */
- if (sequence.offset > (size_t)(oLitEnd - base)) {
- /* offset beyond prefix */
- if (sequence.offset > (size_t)(oLitEnd - vBase))
- return ERROR(corruption_detected);
- match = dictEnd - (base - match);
- if (match + sequence.matchLength <= dictEnd) {
- memmove(oLitEnd, match, sequence.matchLength);
- return sequenceLength;
- }
- /* span extDict & currPrefixSegment */
- {
- size_t const length1 = dictEnd - match;
- memmove(oLitEnd, match, length1);
- op = oLitEnd + length1;
- sequence.matchLength -= length1;
- match = base;
- }
- }
- while (op < oMatchEnd)
- *op++ = *match++;
- return sequenceLength;
-}
-
-static seq_t ZSTD_decodeSequence(seqState_t *seqState)
-{
- seq_t seq;
-
- U32 const llCode = FSE_peekSymbol(&seqState->stateLL);
- U32 const mlCode = FSE_peekSymbol(&seqState->stateML);
- U32 const ofCode = FSE_peekSymbol(&seqState->stateOffb); /* <= maxOff, by table construction */
-
- U32 const llBits = LL_bits[llCode];
- U32 const mlBits = ML_bits[mlCode];
- U32 const ofBits = ofCode;
- U32 const totalBits = llBits + mlBits + ofBits;
-
- static const U32 LL_base[MaxLL + 1] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 18,
- 20, 22, 24, 28, 32, 40, 48, 64, 0x80, 0x100, 0x200, 0x400, 0x800, 0x1000, 0x2000, 0x4000, 0x8000, 0x10000};
-
- static const U32 ML_base[MaxML + 1] = {3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
- 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 37, 39, 41,
- 43, 47, 51, 59, 67, 83, 99, 0x83, 0x103, 0x203, 0x403, 0x803, 0x1003, 0x2003, 0x4003, 0x8003, 0x10003};
-
- static const U32 OF_base[MaxOff + 1] = {0, 1, 1, 5, 0xD, 0x1D, 0x3D, 0x7D, 0xFD, 0x1FD,
- 0x3FD, 0x7FD, 0xFFD, 0x1FFD, 0x3FFD, 0x7FFD, 0xFFFD, 0x1FFFD, 0x3FFFD, 0x7FFFD,
- 0xFFFFD, 0x1FFFFD, 0x3FFFFD, 0x7FFFFD, 0xFFFFFD, 0x1FFFFFD, 0x3FFFFFD, 0x7FFFFFD, 0xFFFFFFD};
-
- /* sequence */
- {
- size_t offset;
- if (!ofCode)
- offset = 0;
- else {
- offset = OF_base[ofCode] + BIT_readBitsFast(&seqState->DStream, ofBits); /* <= (ZSTD_WINDOWLOG_MAX-1) bits */
- if (ZSTD_32bits())
- BIT_reloadDStream(&seqState->DStream);
- }
-
- if (ofCode <= 1) {
- offset += (llCode == 0);
- if (offset) {
- size_t temp = (offset == 3) ? seqState->prevOffset[0] - 1 : seqState->prevOffset[offset];
- temp += !temp; /* 0 is not valid; input is corrupted; force offset to 1 */
- if (offset != 1)
- seqState->prevOffset[2] = seqState->prevOffset[1];
- seqState->prevOffset[1] = seqState->prevOffset[0];
- seqState->prevOffset[0] = offset = temp;
- } else {
- offset = seqState->prevOffset[0];
- }
- } else {
- seqState->prevOffset[2] = seqState->prevOffset[1];
- seqState->prevOffset[1] = seqState->prevOffset[0];
- seqState->prevOffset[0] = offset;
- }
- seq.offset = offset;
- }
-
- seq.matchLength = ML_base[mlCode] + ((mlCode > 31) ? BIT_readBitsFast(&seqState->DStream, mlBits) : 0); /* <= 16 bits */
- if (ZSTD_32bits() && (mlBits + llBits > 24))
- BIT_reloadDStream(&seqState->DStream);
-
- seq.litLength = LL_base[llCode] + ((llCode > 15) ? BIT_readBitsFast(&seqState->DStream, llBits) : 0); /* <= 16 bits */
- if (ZSTD_32bits() || (totalBits > 64 - 7 - (LLFSELog + MLFSELog + OffFSELog)))
- BIT_reloadDStream(&seqState->DStream);
-
- /* ANS state update */
- FSE_updateState(&seqState->stateLL, &seqState->DStream); /* <= 9 bits */
- FSE_updateState(&seqState->stateML, &seqState->DStream); /* <= 9 bits */
- if (ZSTD_32bits())
- BIT_reloadDStream(&seqState->DStream); /* <= 18 bits */
- FSE_updateState(&seqState->stateOffb, &seqState->DStream); /* <= 8 bits */
-
- seq.match = NULL;
-
- return seq;
-}
-
-FORCE_INLINE
-size_t ZSTD_execSequence(BYTE *op, BYTE *const oend, seq_t sequence, const BYTE **litPtr, const BYTE *const litLimit, const BYTE *const base,
- const BYTE *const vBase, const BYTE *const dictEnd)
-{
- BYTE *const oLitEnd = op + sequence.litLength;
- size_t const sequenceLength = sequence.litLength + sequence.matchLength;
- BYTE *const oMatchEnd = op + sequenceLength; /* risk : address space overflow (32-bits) */
- BYTE *const oend_w = oend - WILDCOPY_OVERLENGTH;
- const BYTE *const iLitEnd = *litPtr + sequence.litLength;
- const BYTE *match = oLitEnd - sequence.offset;
-
- /* check */
- if (oMatchEnd > oend)
- return ERROR(dstSize_tooSmall); /* last match must start at a minimum distance of WILDCOPY_OVERLENGTH from oend */
- if (iLitEnd > litLimit)
- return ERROR(corruption_detected); /* over-read beyond lit buffer */
- if (oLitEnd > oend_w)
- return ZSTD_execSequenceLast7(op, oend, sequence, litPtr, litLimit, base, vBase, dictEnd);
-
- /* copy Literals */
- ZSTD_copy8(op, *litPtr);
- if (sequence.litLength > 8)
- ZSTD_wildcopy(op + 8, (*litPtr) + 8,
- sequence.litLength - 8); /* note : since oLitEnd <= oend-WILDCOPY_OVERLENGTH, no risk of overwrite beyond oend */
- op = oLitEnd;
- *litPtr = iLitEnd; /* update for next sequence */
-
- /* copy Match */
- if (sequence.offset > (size_t)(oLitEnd - base)) {
- /* offset beyond prefix */
- if (sequence.offset > (size_t)(oLitEnd - vBase))
- return ERROR(corruption_detected);
- match = dictEnd + (match - base);
- if (match + sequence.matchLength <= dictEnd) {
- memmove(oLitEnd, match, sequence.matchLength);
- return sequenceLength;
- }
- /* span extDict & currPrefixSegment */
- {
- size_t const length1 = dictEnd - match;
- memmove(oLitEnd, match, length1);
- op = oLitEnd + length1;
- sequence.matchLength -= length1;
- match = base;
- if (op > oend_w || sequence.matchLength < MINMATCH) {
- U32 i;
- for (i = 0; i < sequence.matchLength; ++i)
- op[i] = match[i];
- return sequenceLength;
- }
- }
- }
- /* Requirement: op <= oend_w && sequence.matchLength >= MINMATCH */
-
- /* match within prefix */
- if (sequence.offset < 8) {
- /* close range match, overlap */
- static const U32 dec32table[] = {0, 1, 2, 1, 4, 4, 4, 4}; /* added */
- static const int dec64table[] = {8, 8, 8, 7, 8, 9, 10, 11}; /* subtracted */
- int const sub2 = dec64table[sequence.offset];
- op[0] = match[0];
- op[1] = match[1];
- op[2] = match[2];
- op[3] = match[3];
- match += dec32table[sequence.offset];
- ZSTD_copy4(op + 4, match);
- match -= sub2;
- } else {
- ZSTD_copy8(op, match);
- }
- op += 8;
- match += 8;
-
- if (oMatchEnd > oend - (16 - MINMATCH)) {
- if (op < oend_w) {
- ZSTD_wildcopy(op, match, oend_w - op);
- match += oend_w - op;
- op = oend_w;
- }
- while (op < oMatchEnd)
- *op++ = *match++;
- } else {
- ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength - 8); /* works even if matchLength < 8 */
- }
- return sequenceLength;
-}
-
-static size_t ZSTD_decompressSequences(ZSTD_DCtx *dctx, void *dst, size_t maxDstSize, const void *seqStart, size_t seqSize)
-{
- const BYTE *ip = (const BYTE *)seqStart;
- const BYTE *const iend = ip + seqSize;
- BYTE *const ostart = (BYTE * const)dst;
- BYTE *const oend = ostart + maxDstSize;
- BYTE *op = ostart;
- const BYTE *litPtr = dctx->litPtr;
- const BYTE *const litEnd = litPtr + dctx->litSize;
- const BYTE *const base = (const BYTE *)(dctx->base);
- const BYTE *const vBase = (const BYTE *)(dctx->vBase);
- const BYTE *const dictEnd = (const BYTE *)(dctx->dictEnd);
- int nbSeq;
-
- /* Build Decoding Tables */
- {
- size_t const seqHSize = ZSTD_decodeSeqHeaders(dctx, &nbSeq, ip, seqSize);
- if (ZSTD_isError(seqHSize))
- return seqHSize;
- ip += seqHSize;
- }
-
- /* Regen sequences */
- if (nbSeq) {
- seqState_t seqState;
- dctx->fseEntropy = 1;
- {
- U32 i;
- for (i = 0; i < ZSTD_REP_NUM; i++)
- seqState.prevOffset[i] = dctx->entropy.rep[i];
- }
- CHECK_E(BIT_initDStream(&seqState.DStream, ip, iend - ip), corruption_detected);
- FSE_initDState(&seqState.stateLL, &seqState.DStream, dctx->LLTptr);
- FSE_initDState(&seqState.stateOffb, &seqState.DStream, dctx->OFTptr);
- FSE_initDState(&seqState.stateML, &seqState.DStream, dctx->MLTptr);
-
- for (; (BIT_reloadDStream(&(seqState.DStream)) <= BIT_DStream_completed) && nbSeq;) {
- nbSeq--;
- {
- seq_t const sequence = ZSTD_decodeSequence(&seqState);
- size_t const oneSeqSize = ZSTD_execSequence(op, oend, sequence, &litPtr, litEnd, base, vBase, dictEnd);
- if (ZSTD_isError(oneSeqSize))
- return oneSeqSize;
- op += oneSeqSize;
- }
- }
-
- /* check if reached exact end */
- if (nbSeq)
- return ERROR(corruption_detected);
- /* save reps for next block */
- {
- U32 i;
- for (i = 0; i < ZSTD_REP_NUM; i++)
- dctx->entropy.rep[i] = (U32)(seqState.prevOffset[i]);
- }
- }
-
- /* last literal segment */
- {
- size_t const lastLLSize = litEnd - litPtr;
- if (lastLLSize > (size_t)(oend - op))
- return ERROR(dstSize_tooSmall);
- memcpy(op, litPtr, lastLLSize);
- op += lastLLSize;
- }
-
- return op - ostart;
-}
-
-FORCE_INLINE seq_t ZSTD_decodeSequenceLong_generic(seqState_t *seqState, int const longOffsets)
-{
- seq_t seq;
-
- U32 const llCode = FSE_peekSymbol(&seqState->stateLL);
- U32 const mlCode = FSE_peekSymbol(&seqState->stateML);
- U32 const ofCode = FSE_peekSymbol(&seqState->stateOffb); /* <= maxOff, by table construction */
-
- U32 const llBits = LL_bits[llCode];
- U32 const mlBits = ML_bits[mlCode];
- U32 const ofBits = ofCode;
- U32 const totalBits = llBits + mlBits + ofBits;
-
- static const U32 LL_base[MaxLL + 1] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 18,
- 20, 22, 24, 28, 32, 40, 48, 64, 0x80, 0x100, 0x200, 0x400, 0x800, 0x1000, 0x2000, 0x4000, 0x8000, 0x10000};
-
- static const U32 ML_base[MaxML + 1] = {3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
- 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 37, 39, 41,
- 43, 47, 51, 59, 67, 83, 99, 0x83, 0x103, 0x203, 0x403, 0x803, 0x1003, 0x2003, 0x4003, 0x8003, 0x10003};
-
- static const U32 OF_base[MaxOff + 1] = {0, 1, 1, 5, 0xD, 0x1D, 0x3D, 0x7D, 0xFD, 0x1FD,
- 0x3FD, 0x7FD, 0xFFD, 0x1FFD, 0x3FFD, 0x7FFD, 0xFFFD, 0x1FFFD, 0x3FFFD, 0x7FFFD,
- 0xFFFFD, 0x1FFFFD, 0x3FFFFD, 0x7FFFFD, 0xFFFFFD, 0x1FFFFFD, 0x3FFFFFD, 0x7FFFFFD, 0xFFFFFFD};
-
- /* sequence */
- {
- size_t offset;
- if (!ofCode)
- offset = 0;
- else {
- if (longOffsets) {
- int const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN);
- offset = OF_base[ofCode] + (BIT_readBitsFast(&seqState->DStream, ofBits - extraBits) << extraBits);
- if (ZSTD_32bits() || extraBits)
- BIT_reloadDStream(&seqState->DStream);
- if (extraBits)
- offset += BIT_readBitsFast(&seqState->DStream, extraBits);
- } else {
- offset = OF_base[ofCode] + BIT_readBitsFast(&seqState->DStream, ofBits); /* <= (ZSTD_WINDOWLOG_MAX-1) bits */
- if (ZSTD_32bits())
- BIT_reloadDStream(&seqState->DStream);
- }
- }
-
- if (ofCode <= 1) {
- offset += (llCode == 0);
- if (offset) {
- size_t temp = (offset == 3) ? seqState->prevOffset[0] - 1 : seqState->prevOffset[offset];
- temp += !temp; /* 0 is not valid; input is corrupted; force offset to 1 */
- if (offset != 1)
- seqState->prevOffset[2] = seqState->prevOffset[1];
- seqState->prevOffset[1] = seqState->prevOffset[0];
- seqState->prevOffset[0] = offset = temp;
- } else {
- offset = seqState->prevOffset[0];
- }
- } else {
- seqState->prevOffset[2] = seqState->prevOffset[1];
- seqState->prevOffset[1] = seqState->prevOffset[0];
- seqState->prevOffset[0] = offset;
- }
- seq.offset = offset;
- }
-
- seq.matchLength = ML_base[mlCode] + ((mlCode > 31) ? BIT_readBitsFast(&seqState->DStream, mlBits) : 0); /* <= 16 bits */
- if (ZSTD_32bits() && (mlBits + llBits > 24))
- BIT_reloadDStream(&seqState->DStream);
-
- seq.litLength = LL_base[llCode] + ((llCode > 15) ? BIT_readBitsFast(&seqState->DStream, llBits) : 0); /* <= 16 bits */
- if (ZSTD_32bits() || (totalBits > 64 - 7 - (LLFSELog + MLFSELog + OffFSELog)))
- BIT_reloadDStream(&seqState->DStream);
-
- {
- size_t const pos = seqState->pos + seq.litLength;
- seq.match = seqState->base + pos - seq.offset; /* single memory segment */
- if (seq.offset > pos)
- seq.match += seqState->gotoDict; /* separate memory segment */
- seqState->pos = pos + seq.matchLength;
- }
-
- /* ANS state update */
- FSE_updateState(&seqState->stateLL, &seqState->DStream); /* <= 9 bits */
- FSE_updateState(&seqState->stateML, &seqState->DStream); /* <= 9 bits */
- if (ZSTD_32bits())
- BIT_reloadDStream(&seqState->DStream); /* <= 18 bits */
- FSE_updateState(&seqState->stateOffb, &seqState->DStream); /* <= 8 bits */
-
- return seq;
-}
-
-static seq_t ZSTD_decodeSequenceLong(seqState_t *seqState, unsigned const windowSize)
-{
- if (ZSTD_highbit32(windowSize) > STREAM_ACCUMULATOR_MIN) {
- return ZSTD_decodeSequenceLong_generic(seqState, 1);
- } else {
- return ZSTD_decodeSequenceLong_generic(seqState, 0);
- }
-}
-
-FORCE_INLINE
-size_t ZSTD_execSequenceLong(BYTE *op, BYTE *const oend, seq_t sequence, const BYTE **litPtr, const BYTE *const litLimit, const BYTE *const base,
- const BYTE *const vBase, const BYTE *const dictEnd)
-{
- BYTE *const oLitEnd = op + sequence.litLength;
- size_t const sequenceLength = sequence.litLength + sequence.matchLength;
- BYTE *const oMatchEnd = op + sequenceLength; /* risk : address space overflow (32-bits) */
- BYTE *const oend_w = oend - WILDCOPY_OVERLENGTH;
- const BYTE *const iLitEnd = *litPtr + sequence.litLength;
- const BYTE *match = sequence.match;
-
- /* check */
- if (oMatchEnd > oend)
- return ERROR(dstSize_tooSmall); /* last match must start at a minimum distance of WILDCOPY_OVERLENGTH from oend */
- if (iLitEnd > litLimit)
- return ERROR(corruption_detected); /* over-read beyond lit buffer */
- if (oLitEnd > oend_w)
- return ZSTD_execSequenceLast7(op, oend, sequence, litPtr, litLimit, base, vBase, dictEnd);
-
- /* copy Literals */
- ZSTD_copy8(op, *litPtr);
- if (sequence.litLength > 8)
- ZSTD_wildcopy(op + 8, (*litPtr) + 8,
- sequence.litLength - 8); /* note : since oLitEnd <= oend-WILDCOPY_OVERLENGTH, no risk of overwrite beyond oend */
- op = oLitEnd;
- *litPtr = iLitEnd; /* update for next sequence */
-
- /* copy Match */
- if (sequence.offset > (size_t)(oLitEnd - base)) {
- /* offset beyond prefix */
- if (sequence.offset > (size_t)(oLitEnd - vBase))
- return ERROR(corruption_detected);
- if (match + sequence.matchLength <= dictEnd) {
- memmove(oLitEnd, match, sequence.matchLength);
- return sequenceLength;
- }
- /* span extDict & currPrefixSegment */
- {
- size_t const length1 = dictEnd - match;
- memmove(oLitEnd, match, length1);
- op = oLitEnd + length1;
- sequence.matchLength -= length1;
- match = base;
- if (op > oend_w || sequence.matchLength < MINMATCH) {
- U32 i;
- for (i = 0; i < sequence.matchLength; ++i)
- op[i] = match[i];
- return sequenceLength;
- }
- }
- }
- /* Requirement: op <= oend_w && sequence.matchLength >= MINMATCH */
-
- /* match within prefix */
- if (sequence.offset < 8) {
- /* close range match, overlap */
- static const U32 dec32table[] = {0, 1, 2, 1, 4, 4, 4, 4}; /* added */
- static const int dec64table[] = {8, 8, 8, 7, 8, 9, 10, 11}; /* subtracted */
- int const sub2 = dec64table[sequence.offset];
- op[0] = match[0];
- op[1] = match[1];
- op[2] = match[2];
- op[3] = match[3];
- match += dec32table[sequence.offset];
- ZSTD_copy4(op + 4, match);
- match -= sub2;
- } else {
- ZSTD_copy8(op, match);
- }
- op += 8;
- match += 8;
-
- if (oMatchEnd > oend - (16 - MINMATCH)) {
- if (op < oend_w) {
- ZSTD_wildcopy(op, match, oend_w - op);
- match += oend_w - op;
- op = oend_w;
- }
- while (op < oMatchEnd)
- *op++ = *match++;
- } else {
- ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength - 8); /* works even if matchLength < 8 */
- }
- return sequenceLength;
-}
-
-static size_t ZSTD_decompressSequencesLong(ZSTD_DCtx *dctx, void *dst, size_t maxDstSize, const void *seqStart, size_t seqSize)
-{
- const BYTE *ip = (const BYTE *)seqStart;
- const BYTE *const iend = ip + seqSize;
- BYTE *const ostart = (BYTE * const)dst;
- BYTE *const oend = ostart + maxDstSize;
- BYTE *op = ostart;
- const BYTE *litPtr = dctx->litPtr;
- const BYTE *const litEnd = litPtr + dctx->litSize;
- const BYTE *const base = (const BYTE *)(dctx->base);
- const BYTE *const vBase = (const BYTE *)(dctx->vBase);
- const BYTE *const dictEnd = (const BYTE *)(dctx->dictEnd);
- unsigned const windowSize = dctx->fParams.windowSize;
- int nbSeq;
-
- /* Build Decoding Tables */
- {
- size_t const seqHSize = ZSTD_decodeSeqHeaders(dctx, &nbSeq, ip, seqSize);
- if (ZSTD_isError(seqHSize))
- return seqHSize;
- ip += seqHSize;
- }
-
- /* Regen sequences */
- if (nbSeq) {
-#define STORED_SEQS 4
-#define STOSEQ_MASK (STORED_SEQS - 1)
-#define ADVANCED_SEQS 4
- seq_t *sequences = (seq_t *)dctx->entropy.workspace;
- int const seqAdvance = MIN(nbSeq, ADVANCED_SEQS);
- seqState_t seqState;
- int seqNb;
- ZSTD_STATIC_ASSERT(sizeof(dctx->entropy.workspace) >= sizeof(seq_t) * STORED_SEQS);
- dctx->fseEntropy = 1;
- {
- U32 i;
- for (i = 0; i < ZSTD_REP_NUM; i++)
- seqState.prevOffset[i] = dctx->entropy.rep[i];
- }
- seqState.base = base;
- seqState.pos = (size_t)(op - base);
- seqState.gotoDict = (uPtrDiff)dictEnd - (uPtrDiff)base; /* cast to avoid undefined behaviour */
- CHECK_E(BIT_initDStream(&seqState.DStream, ip, iend - ip), corruption_detected);
- FSE_initDState(&seqState.stateLL, &seqState.DStream, dctx->LLTptr);
- FSE_initDState(&seqState.stateOffb, &seqState.DStream, dctx->OFTptr);
- FSE_initDState(&seqState.stateML, &seqState.DStream, dctx->MLTptr);
-
- /* prepare in advance */
- for (seqNb = 0; (BIT_reloadDStream(&seqState.DStream) <= BIT_DStream_completed) && seqNb < seqAdvance; seqNb++) {
- sequences[seqNb] = ZSTD_decodeSequenceLong(&seqState, windowSize);
- }
- if (seqNb < seqAdvance)
- return ERROR(corruption_detected);
-
- /* decode and decompress */
- for (; (BIT_reloadDStream(&(seqState.DStream)) <= BIT_DStream_completed) && seqNb < nbSeq; seqNb++) {
- seq_t const sequence = ZSTD_decodeSequenceLong(&seqState, windowSize);
- size_t const oneSeqSize =
- ZSTD_execSequenceLong(op, oend, sequences[(seqNb - ADVANCED_SEQS) & STOSEQ_MASK], &litPtr, litEnd, base, vBase, dictEnd);
- if (ZSTD_isError(oneSeqSize))
- return oneSeqSize;
- ZSTD_PREFETCH(sequence.match);
- sequences[seqNb & STOSEQ_MASK] = sequence;
- op += oneSeqSize;
- }
- if (seqNb < nbSeq)
- return ERROR(corruption_detected);
-
- /* finish queue */
- seqNb -= seqAdvance;
- for (; seqNb < nbSeq; seqNb++) {
- size_t const oneSeqSize = ZSTD_execSequenceLong(op, oend, sequences[seqNb & STOSEQ_MASK], &litPtr, litEnd, base, vBase, dictEnd);
- if (ZSTD_isError(oneSeqSize))
- return oneSeqSize;
- op += oneSeqSize;
- }
-
- /* save reps for next block */
- {
- U32 i;
- for (i = 0; i < ZSTD_REP_NUM; i++)
- dctx->entropy.rep[i] = (U32)(seqState.prevOffset[i]);
- }
- }
-
- /* last literal segment */
- {
- size_t const lastLLSize = litEnd - litPtr;
- if (lastLLSize > (size_t)(oend - op))
- return ERROR(dstSize_tooSmall);
- memcpy(op, litPtr, lastLLSize);
- op += lastLLSize;
- }
-
- return op - ostart;
-}
-
-static size_t ZSTD_decompressBlock_internal(ZSTD_DCtx *dctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize)
-{ /* blockType == blockCompressed */
- const BYTE *ip = (const BYTE *)src;
-
- if (srcSize >= ZSTD_BLOCKSIZE_ABSOLUTEMAX)
- return ERROR(srcSize_wrong);
-
- /* Decode literals section */
- {
- size_t const litCSize = ZSTD_decodeLiteralsBlock(dctx, src, srcSize);
- if (ZSTD_isError(litCSize))
- return litCSize;
- ip += litCSize;
- srcSize -= litCSize;
- }
- if (sizeof(size_t) > 4) /* do not enable prefetching on 32-bits x86, as it's performance detrimental */
- /* likely because of register pressure */
- /* if that's the correct cause, then 32-bits ARM should be affected differently */
- /* it would be good to test this on ARM real hardware, to see if prefetch version improves speed */
- if (dctx->fParams.windowSize > (1 << 23))
- return ZSTD_decompressSequencesLong(dctx, dst, dstCapacity, ip, srcSize);
- return ZSTD_decompressSequences(dctx, dst, dstCapacity, ip, srcSize);
-}
-
-static void ZSTD_checkContinuity(ZSTD_DCtx *dctx, const void *dst)
-{
- if (dst != dctx->previousDstEnd) { /* not contiguous */
- dctx->dictEnd = dctx->previousDstEnd;
- dctx->vBase = (const char *)dst - ((const char *)(dctx->previousDstEnd) - (const char *)(dctx->base));
- dctx->base = dst;
- dctx->previousDstEnd = dst;
- }
-}
-
-size_t ZSTD_decompressBlock(ZSTD_DCtx *dctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize)
-{
- size_t dSize;
- ZSTD_checkContinuity(dctx, dst);
- dSize = ZSTD_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize);
- dctx->previousDstEnd = (char *)dst + dSize;
- return dSize;
-}
-
-/** ZSTD_insertBlock() :
- insert `src` block into `dctx` history. Useful to track uncompressed blocks. */
-size_t ZSTD_insertBlock(ZSTD_DCtx *dctx, const void *blockStart, size_t blockSize)
-{
- ZSTD_checkContinuity(dctx, blockStart);
- dctx->previousDstEnd = (const char *)blockStart + blockSize;
- return blockSize;
-}
-
-size_t ZSTD_generateNxBytes(void *dst, size_t dstCapacity, BYTE byte, size_t length)
-{
- if (length > dstCapacity)
- return ERROR(dstSize_tooSmall);
- memset(dst, byte, length);
- return length;
-}
-
-/** ZSTD_findFrameCompressedSize() :
- * compatible with legacy mode
- * `src` must point to the start of a ZSTD frame, ZSTD legacy frame, or skippable frame
- * `srcSize` must be at least as large as the frame contained
- * @return : the compressed size of the frame starting at `src` */
-size_t ZSTD_findFrameCompressedSize(const void *src, size_t srcSize)
-{
- if (srcSize >= ZSTD_skippableHeaderSize && (ZSTD_readLE32(src) & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START) {
- return ZSTD_skippableHeaderSize + ZSTD_readLE32((const BYTE *)src + 4);
- } else {
- const BYTE *ip = (const BYTE *)src;
- const BYTE *const ipstart = ip;
- size_t remainingSize = srcSize;
- ZSTD_frameParams fParams;
-
- size_t const headerSize = ZSTD_frameHeaderSize(ip, remainingSize);
- if (ZSTD_isError(headerSize))
- return headerSize;
-
- /* Frame Header */
- {
- size_t const ret = ZSTD_getFrameParams(&fParams, ip, remainingSize);
- if (ZSTD_isError(ret))
- return ret;
- if (ret > 0)
- return ERROR(srcSize_wrong);
- }
-
- ip += headerSize;
- remainingSize -= headerSize;
-
- /* Loop on each block */
- while (1) {
- blockProperties_t blockProperties;
- size_t const cBlockSize = ZSTD_getcBlockSize(ip, remainingSize, &blockProperties);
- if (ZSTD_isError(cBlockSize))
- return cBlockSize;
-
- if (ZSTD_blockHeaderSize + cBlockSize > remainingSize)
- return ERROR(srcSize_wrong);
-
- ip += ZSTD_blockHeaderSize + cBlockSize;
- remainingSize -= ZSTD_blockHeaderSize + cBlockSize;
-
- if (blockProperties.lastBlock)
- break;
- }
-
- if (fParams.checksumFlag) { /* Frame content checksum */
- if (remainingSize < 4)
- return ERROR(srcSize_wrong);
- ip += 4;
- remainingSize -= 4;
- }
-
- return ip - ipstart;
- }
-}
-
-/*! ZSTD_decompressFrame() :
-* @dctx must be properly initialized */
-static size_t ZSTD_decompressFrame(ZSTD_DCtx *dctx, void *dst, size_t dstCapacity, const void **srcPtr, size_t *srcSizePtr)
-{
- const BYTE *ip = (const BYTE *)(*srcPtr);
- BYTE *const ostart = (BYTE * const)dst;
- BYTE *const oend = ostart + dstCapacity;
- BYTE *op = ostart;
- size_t remainingSize = *srcSizePtr;
-
- /* check */
- if (remainingSize < ZSTD_frameHeaderSize_min + ZSTD_blockHeaderSize)
- return ERROR(srcSize_wrong);
-
- /* Frame Header */
- {
- size_t const frameHeaderSize = ZSTD_frameHeaderSize(ip, ZSTD_frameHeaderSize_prefix);
- if (ZSTD_isError(frameHeaderSize))
- return frameHeaderSize;
- if (remainingSize < frameHeaderSize + ZSTD_blockHeaderSize)
- return ERROR(srcSize_wrong);
- CHECK_F(ZSTD_decodeFrameHeader(dctx, ip, frameHeaderSize));
- ip += frameHeaderSize;
- remainingSize -= frameHeaderSize;
- }
-
- /* Loop on each block */
- while (1) {
- size_t decodedSize;
- blockProperties_t blockProperties;
- size_t const cBlockSize = ZSTD_getcBlockSize(ip, remainingSize, &blockProperties);
- if (ZSTD_isError(cBlockSize))
- return cBlockSize;
-
- ip += ZSTD_blockHeaderSize;
- remainingSize -= ZSTD_blockHeaderSize;
- if (cBlockSize > remainingSize)
- return ERROR(srcSize_wrong);
-
- switch (blockProperties.blockType) {
- case bt_compressed: decodedSize = ZSTD_decompressBlock_internal(dctx, op, oend - op, ip, cBlockSize); break;
- case bt_raw: decodedSize = ZSTD_copyRawBlock(op, oend - op, ip, cBlockSize); break;
- case bt_rle: decodedSize = ZSTD_generateNxBytes(op, oend - op, *ip, blockProperties.origSize); break;
- case bt_reserved:
- default: return ERROR(corruption_detected);
- }
-
- if (ZSTD_isError(decodedSize))
- return decodedSize;
- if (dctx->fParams.checksumFlag)
- xxh64_update(&dctx->xxhState, op, decodedSize);
- op += decodedSize;
- ip += cBlockSize;
- remainingSize -= cBlockSize;
- if (blockProperties.lastBlock)
- break;
- }
-
- if (dctx->fParams.checksumFlag) { /* Frame content checksum verification */
- U32 const checkCalc = (U32)xxh64_digest(&dctx->xxhState);
- U32 checkRead;
- if (remainingSize < 4)
- return ERROR(checksum_wrong);
- checkRead = ZSTD_readLE32(ip);
- if (checkRead != checkCalc)
- return ERROR(checksum_wrong);
- ip += 4;
- remainingSize -= 4;
- }
-
- /* Allow caller to get size read */
- *srcPtr = ip;
- *srcSizePtr = remainingSize;
- return op - ostart;
-}
-
-static const void *ZSTD_DDictDictContent(const ZSTD_DDict *ddict);
-static size_t ZSTD_DDictDictSize(const ZSTD_DDict *ddict);
-
-static size_t ZSTD_decompressMultiFrame(ZSTD_DCtx *dctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize, const void *dict, size_t dictSize,
- const ZSTD_DDict *ddict)
-{
- void *const dststart = dst;
-
- if (ddict) {
- if (dict) {
- /* programmer error, these two cases should be mutually exclusive */
- return ERROR(GENERIC);
- }
-
- dict = ZSTD_DDictDictContent(ddict);
- dictSize = ZSTD_DDictDictSize(ddict);
- }
-
- while (srcSize >= ZSTD_frameHeaderSize_prefix) {
- U32 magicNumber;
-
- magicNumber = ZSTD_readLE32(src);
- if (magicNumber != ZSTD_MAGICNUMBER) {
- if ((magicNumber & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START) {
- size_t skippableSize;
- if (srcSize < ZSTD_skippableHeaderSize)
- return ERROR(srcSize_wrong);
- skippableSize = ZSTD_readLE32((const BYTE *)src + 4) + ZSTD_skippableHeaderSize;
- if (srcSize < skippableSize) {
- return ERROR(srcSize_wrong);
- }
-
- src = (const BYTE *)src + skippableSize;
- srcSize -= skippableSize;
- continue;
- } else {
- return ERROR(prefix_unknown);
- }
- }
-
- if (ddict) {
- /* we were called from ZSTD_decompress_usingDDict */
- ZSTD_refDDict(dctx, ddict);
- } else {
- /* this will initialize correctly with no dict if dict == NULL, so
- * use this in all cases but ddict */
- CHECK_F(ZSTD_decompressBegin_usingDict(dctx, dict, dictSize));
- }
- ZSTD_checkContinuity(dctx, dst);
-
- {
- const size_t res = ZSTD_decompressFrame(dctx, dst, dstCapacity, &src, &srcSize);
- if (ZSTD_isError(res))
- return res;
- /* don't need to bounds check this, ZSTD_decompressFrame will have
- * already */
- dst = (BYTE *)dst + res;
- dstCapacity -= res;
- }
- }
-
- if (srcSize)
- return ERROR(srcSize_wrong); /* input not entirely consumed */
-
- return (BYTE *)dst - (BYTE *)dststart;
-}
-
-size_t ZSTD_decompress_usingDict(ZSTD_DCtx *dctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize, const void *dict, size_t dictSize)
-{
- return ZSTD_decompressMultiFrame(dctx, dst, dstCapacity, src, srcSize, dict, dictSize, NULL);
-}
-
-size_t ZSTD_decompressDCtx(ZSTD_DCtx *dctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize)
-{
- return ZSTD_decompress_usingDict(dctx, dst, dstCapacity, src, srcSize, NULL, 0);
-}
-
-/*-**************************************
-* Advanced Streaming Decompression API
-* Bufferless and synchronous
-****************************************/
-size_t ZSTD_nextSrcSizeToDecompress(ZSTD_DCtx *dctx) { return dctx->expected; }
-
-ZSTD_nextInputType_e ZSTD_nextInputType(ZSTD_DCtx *dctx)
-{
- switch (dctx->stage) {
- default: /* should not happen */
- case ZSTDds_getFrameHeaderSize:
- case ZSTDds_decodeFrameHeader: return ZSTDnit_frameHeader;
- case ZSTDds_decodeBlockHeader: return ZSTDnit_blockHeader;
- case ZSTDds_decompressBlock: return ZSTDnit_block;
- case ZSTDds_decompressLastBlock: return ZSTDnit_lastBlock;
- case ZSTDds_checkChecksum: return ZSTDnit_checksum;
- case ZSTDds_decodeSkippableHeader:
- case ZSTDds_skipFrame: return ZSTDnit_skippableFrame;
- }
-}
-
-int ZSTD_isSkipFrame(ZSTD_DCtx *dctx) { return dctx->stage == ZSTDds_skipFrame; } /* for zbuff */
-
-/** ZSTD_decompressContinue() :
-* @return : nb of bytes generated into `dst` (necessarily <= `dstCapacity)
-* or an error code, which can be tested using ZSTD_isError() */
-size_t ZSTD_decompressContinue(ZSTD_DCtx *dctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize)
-{
- /* Sanity check */
- if (srcSize != dctx->expected)
- return ERROR(srcSize_wrong);
- if (dstCapacity)
- ZSTD_checkContinuity(dctx, dst);
-
- switch (dctx->stage) {
- case ZSTDds_getFrameHeaderSize:
- if (srcSize != ZSTD_frameHeaderSize_prefix)
- return ERROR(srcSize_wrong); /* impossible */
- if ((ZSTD_readLE32(src) & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START) { /* skippable frame */
- memcpy(dctx->headerBuffer, src, ZSTD_frameHeaderSize_prefix);
- dctx->expected = ZSTD_skippableHeaderSize - ZSTD_frameHeaderSize_prefix; /* magic number + skippable frame length */
- dctx->stage = ZSTDds_decodeSkippableHeader;
- return 0;
- }
- dctx->headerSize = ZSTD_frameHeaderSize(src, ZSTD_frameHeaderSize_prefix);
- if (ZSTD_isError(dctx->headerSize))
- return dctx->headerSize;
- memcpy(dctx->headerBuffer, src, ZSTD_frameHeaderSize_prefix);
- if (dctx->headerSize > ZSTD_frameHeaderSize_prefix) {
- dctx->expected = dctx->headerSize - ZSTD_frameHeaderSize_prefix;
- dctx->stage = ZSTDds_decodeFrameHeader;
- return 0;
- }
- dctx->expected = 0; /* not necessary to copy more */
- /* fall through */
-
- case ZSTDds_decodeFrameHeader:
- memcpy(dctx->headerBuffer + ZSTD_frameHeaderSize_prefix, src, dctx->expected);
- CHECK_F(ZSTD_decodeFrameHeader(dctx, dctx->headerBuffer, dctx->headerSize));
- dctx->expected = ZSTD_blockHeaderSize;
- dctx->stage = ZSTDds_decodeBlockHeader;
- return 0;
-
- case ZSTDds_decodeBlockHeader: {
- blockProperties_t bp;
- size_t const cBlockSize = ZSTD_getcBlockSize(src, ZSTD_blockHeaderSize, &bp);
- if (ZSTD_isError(cBlockSize))
- return cBlockSize;
- dctx->expected = cBlockSize;
- dctx->bType = bp.blockType;
- dctx->rleSize = bp.origSize;
- if (cBlockSize) {
- dctx->stage = bp.lastBlock ? ZSTDds_decompressLastBlock : ZSTDds_decompressBlock;
- return 0;
- }
- /* empty block */
- if (bp.lastBlock) {
- if (dctx->fParams.checksumFlag) {
- dctx->expected = 4;
- dctx->stage = ZSTDds_checkChecksum;
- } else {
- dctx->expected = 0; /* end of frame */
- dctx->stage = ZSTDds_getFrameHeaderSize;
- }
- } else {
- dctx->expected = 3; /* go directly to next header */
- dctx->stage = ZSTDds_decodeBlockHeader;
- }
- return 0;
- }
- case ZSTDds_decompressLastBlock:
- case ZSTDds_decompressBlock: {
- size_t rSize;
- switch (dctx->bType) {
- case bt_compressed: rSize = ZSTD_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize); break;
- case bt_raw: rSize = ZSTD_copyRawBlock(dst, dstCapacity, src, srcSize); break;
- case bt_rle: rSize = ZSTD_setRleBlock(dst, dstCapacity, src, srcSize, dctx->rleSize); break;
- case bt_reserved: /* should never happen */
- default: return ERROR(corruption_detected);
- }
- if (ZSTD_isError(rSize))
- return rSize;
- if (dctx->fParams.checksumFlag)
- xxh64_update(&dctx->xxhState, dst, rSize);
-
- if (dctx->stage == ZSTDds_decompressLastBlock) { /* end of frame */
- if (dctx->fParams.checksumFlag) { /* another round for frame checksum */
- dctx->expected = 4;
- dctx->stage = ZSTDds_checkChecksum;
- } else {
- dctx->expected = 0; /* ends here */
- dctx->stage = ZSTDds_getFrameHeaderSize;
- }
- } else {
- dctx->stage = ZSTDds_decodeBlockHeader;
- dctx->expected = ZSTD_blockHeaderSize;
- dctx->previousDstEnd = (char *)dst + rSize;
- }
- return rSize;
- }
- case ZSTDds_checkChecksum: {
- U32 const h32 = (U32)xxh64_digest(&dctx->xxhState);
- U32 const check32 = ZSTD_readLE32(src); /* srcSize == 4, guaranteed by dctx->expected */
- if (check32 != h32)
- return ERROR(checksum_wrong);
- dctx->expected = 0;
- dctx->stage = ZSTDds_getFrameHeaderSize;
- return 0;
- }
- case ZSTDds_decodeSkippableHeader: {
- memcpy(dctx->headerBuffer + ZSTD_frameHeaderSize_prefix, src, dctx->expected);
- dctx->expected = ZSTD_readLE32(dctx->headerBuffer + 4);
- dctx->stage = ZSTDds_skipFrame;
- return 0;
- }
- case ZSTDds_skipFrame: {
- dctx->expected = 0;
- dctx->stage = ZSTDds_getFrameHeaderSize;
- return 0;
- }
- default:
- return ERROR(GENERIC); /* impossible */
- }
-}
-
-static size_t ZSTD_refDictContent(ZSTD_DCtx *dctx, const void *dict, size_t dictSize)
-{
- dctx->dictEnd = dctx->previousDstEnd;
- dctx->vBase = (const char *)dict - ((const char *)(dctx->previousDstEnd) - (const char *)(dctx->base));
- dctx->base = dict;
- dctx->previousDstEnd = (const char *)dict + dictSize;
- return 0;
-}
-
-/* ZSTD_loadEntropy() :
- * dict : must point at beginning of a valid zstd dictionary
- * @return : size of entropy tables read */
-static size_t ZSTD_loadEntropy(ZSTD_entropyTables_t *entropy, const void *const dict, size_t const dictSize)
-{
- const BYTE *dictPtr = (const BYTE *)dict;
- const BYTE *const dictEnd = dictPtr + dictSize;
-
- if (dictSize <= 8)
- return ERROR(dictionary_corrupted);
- dictPtr += 8; /* skip header = magic + dictID */
-
- {
- size_t const hSize = HUF_readDTableX4_wksp(entropy->hufTable, dictPtr, dictEnd - dictPtr, entropy->workspace, sizeof(entropy->workspace));
- if (HUF_isError(hSize))
- return ERROR(dictionary_corrupted);
- dictPtr += hSize;
- }
-
- {
- short offcodeNCount[MaxOff + 1];
- U32 offcodeMaxValue = MaxOff, offcodeLog;
- size_t const offcodeHeaderSize = FSE_readNCount(offcodeNCount, &offcodeMaxValue, &offcodeLog, dictPtr, dictEnd - dictPtr);
- if (FSE_isError(offcodeHeaderSize))
- return ERROR(dictionary_corrupted);
- if (offcodeLog > OffFSELog)
- return ERROR(dictionary_corrupted);
- CHECK_E(FSE_buildDTable_wksp(entropy->OFTable, offcodeNCount, offcodeMaxValue, offcodeLog, entropy->workspace, sizeof(entropy->workspace)), dictionary_corrupted);
- dictPtr += offcodeHeaderSize;
- }
-
- {
- short matchlengthNCount[MaxML + 1];
- unsigned matchlengthMaxValue = MaxML, matchlengthLog;
- size_t const matchlengthHeaderSize = FSE_readNCount(matchlengthNCount, &matchlengthMaxValue, &matchlengthLog, dictPtr, dictEnd - dictPtr);
- if (FSE_isError(matchlengthHeaderSize))
- return ERROR(dictionary_corrupted);
- if (matchlengthLog > MLFSELog)
- return ERROR(dictionary_corrupted);
- CHECK_E(FSE_buildDTable_wksp(entropy->MLTable, matchlengthNCount, matchlengthMaxValue, matchlengthLog, entropy->workspace, sizeof(entropy->workspace)), dictionary_corrupted);
- dictPtr += matchlengthHeaderSize;
- }
-
- {
- short litlengthNCount[MaxLL + 1];
- unsigned litlengthMaxValue = MaxLL, litlengthLog;
- size_t const litlengthHeaderSize = FSE_readNCount(litlengthNCount, &litlengthMaxValue, &litlengthLog, dictPtr, dictEnd - dictPtr);
- if (FSE_isError(litlengthHeaderSize))
- return ERROR(dictionary_corrupted);
- if (litlengthLog > LLFSELog)
- return ERROR(dictionary_corrupted);
- CHECK_E(FSE_buildDTable_wksp(entropy->LLTable, litlengthNCount, litlengthMaxValue, litlengthLog, entropy->workspace, sizeof(entropy->workspace)), dictionary_corrupted);
- dictPtr += litlengthHeaderSize;
- }
-
- if (dictPtr + 12 > dictEnd)
- return ERROR(dictionary_corrupted);
- {
- int i;
- size_t const dictContentSize = (size_t)(dictEnd - (dictPtr + 12));
- for (i = 0; i < 3; i++) {
- U32 const rep = ZSTD_readLE32(dictPtr);
- dictPtr += 4;
- if (rep == 0 || rep >= dictContentSize)
- return ERROR(dictionary_corrupted);
- entropy->rep[i] = rep;
- }
- }
-
- return dictPtr - (const BYTE *)dict;
-}
-
-static size_t ZSTD_decompress_insertDictionary(ZSTD_DCtx *dctx, const void *dict, size_t dictSize)
-{
- if (dictSize < 8)
- return ZSTD_refDictContent(dctx, dict, dictSize);
- {
- U32 const magic = ZSTD_readLE32(dict);
- if (magic != ZSTD_DICT_MAGIC) {
- return ZSTD_refDictContent(dctx, dict, dictSize); /* pure content mode */
- }
- }
- dctx->dictID = ZSTD_readLE32((const char *)dict + 4);
-
- /* load entropy tables */
- {
- size_t const eSize = ZSTD_loadEntropy(&dctx->entropy, dict, dictSize);
- if (ZSTD_isError(eSize))
- return ERROR(dictionary_corrupted);
- dict = (const char *)dict + eSize;
- dictSize -= eSize;
- }
- dctx->litEntropy = dctx->fseEntropy = 1;
-
- /* reference dictionary content */
- return ZSTD_refDictContent(dctx, dict, dictSize);
-}
-
-size_t ZSTD_decompressBegin_usingDict(ZSTD_DCtx *dctx, const void *dict, size_t dictSize)
-{
- CHECK_F(ZSTD_decompressBegin(dctx));
- if (dict && dictSize)
- CHECK_E(ZSTD_decompress_insertDictionary(dctx, dict, dictSize), dictionary_corrupted);
- return 0;
-}
-
-/* ====== ZSTD_DDict ====== */
-
-struct ZSTD_DDict_s {
- void *dictBuffer;
- const void *dictContent;
- size_t dictSize;
- ZSTD_entropyTables_t entropy;
- U32 dictID;
- U32 entropyPresent;
- ZSTD_customMem cMem;
-}; /* typedef'd to ZSTD_DDict within "zstd.h" */
-
-size_t ZSTD_DDictWorkspaceBound(void) { return ZSTD_ALIGN(sizeof(ZSTD_stack)) + ZSTD_ALIGN(sizeof(ZSTD_DDict)); }
-
-static const void *ZSTD_DDictDictContent(const ZSTD_DDict *ddict) { return ddict->dictContent; }
-
-static size_t ZSTD_DDictDictSize(const ZSTD_DDict *ddict) { return ddict->dictSize; }
-
-static void ZSTD_refDDict(ZSTD_DCtx *dstDCtx, const ZSTD_DDict *ddict)
-{
- ZSTD_decompressBegin(dstDCtx); /* init */
- if (ddict) { /* support refDDict on NULL */
- dstDCtx->dictID = ddict->dictID;
- dstDCtx->base = ddict->dictContent;
- dstDCtx->vBase = ddict->dictContent;
- dstDCtx->dictEnd = (const BYTE *)ddict->dictContent + ddict->dictSize;
- dstDCtx->previousDstEnd = dstDCtx->dictEnd;
- if (ddict->entropyPresent) {
- dstDCtx->litEntropy = 1;
- dstDCtx->fseEntropy = 1;
- dstDCtx->LLTptr = ddict->entropy.LLTable;
- dstDCtx->MLTptr = ddict->entropy.MLTable;
- dstDCtx->OFTptr = ddict->entropy.OFTable;
- dstDCtx->HUFptr = ddict->entropy.hufTable;
- dstDCtx->entropy.rep[0] = ddict->entropy.rep[0];
- dstDCtx->entropy.rep[1] = ddict->entropy.rep[1];
- dstDCtx->entropy.rep[2] = ddict->entropy.rep[2];
- } else {
- dstDCtx->litEntropy = 0;
- dstDCtx->fseEntropy = 0;
- }
- }
-}
-
-static size_t ZSTD_loadEntropy_inDDict(ZSTD_DDict *ddict)
-{
- ddict->dictID = 0;
- ddict->entropyPresent = 0;
- if (ddict->dictSize < 8)
- return 0;
- {
- U32 const magic = ZSTD_readLE32(ddict->dictContent);
- if (magic != ZSTD_DICT_MAGIC)
- return 0; /* pure content mode */
- }
- ddict->dictID = ZSTD_readLE32((const char *)ddict->dictContent + 4);
-
- /* load entropy tables */
- CHECK_E(ZSTD_loadEntropy(&ddict->entropy, ddict->dictContent, ddict->dictSize), dictionary_corrupted);
- ddict->entropyPresent = 1;
- return 0;
-}
-
-static ZSTD_DDict *ZSTD_createDDict_advanced(const void *dict, size_t dictSize, unsigned byReference, ZSTD_customMem customMem)
-{
- if (!customMem.customAlloc || !customMem.customFree)
- return NULL;
-
- {
- ZSTD_DDict *const ddict = (ZSTD_DDict *)ZSTD_malloc(sizeof(ZSTD_DDict), customMem);
- if (!ddict)
- return NULL;
- ddict->cMem = customMem;
-
- if ((byReference) || (!dict) || (!dictSize)) {
- ddict->dictBuffer = NULL;
- ddict->dictContent = dict;
- } else {
- void *const internalBuffer = ZSTD_malloc(dictSize, customMem);
- if (!internalBuffer) {
- ZSTD_freeDDict(ddict);
- return NULL;
- }
- memcpy(internalBuffer, dict, dictSize);
- ddict->dictBuffer = internalBuffer;
- ddict->dictContent = internalBuffer;
- }
- ddict->dictSize = dictSize;
- ddict->entropy.hufTable[0] = (HUF_DTable)((HufLog)*0x1000001); /* cover both little and big endian */
- /* parse dictionary content */
- {
- size_t const errorCode = ZSTD_loadEntropy_inDDict(ddict);
- if (ZSTD_isError(errorCode)) {
- ZSTD_freeDDict(ddict);
- return NULL;
- }
- }
-
- return ddict;
- }
-}
-
-/*! ZSTD_initDDict() :
-* Create a digested dictionary, to start decompression without startup delay.
-* `dict` content is copied inside DDict.
-* Consequently, `dict` can be released after `ZSTD_DDict` creation */
-ZSTD_DDict *ZSTD_initDDict(const void *dict, size_t dictSize, void *workspace, size_t workspaceSize)
-{
- ZSTD_customMem const stackMem = ZSTD_initStack(workspace, workspaceSize);
- return ZSTD_createDDict_advanced(dict, dictSize, 1, stackMem);
-}
-
-size_t ZSTD_freeDDict(ZSTD_DDict *ddict)
-{
- if (ddict == NULL)
- return 0; /* support free on NULL */
- {
- ZSTD_customMem const cMem = ddict->cMem;
- ZSTD_free(ddict->dictBuffer, cMem);
- ZSTD_free(ddict, cMem);
- return 0;
- }
-}
-
-/*! ZSTD_getDictID_fromDict() :
- * Provides the dictID stored within dictionary.
- * if @return == 0, the dictionary is not conformant with Zstandard specification.
- * It can still be loaded, but as a content-only dictionary. */
-unsigned ZSTD_getDictID_fromDict(const void *dict, size_t dictSize)
-{
- if (dictSize < 8)
- return 0;
- if (ZSTD_readLE32(dict) != ZSTD_DICT_MAGIC)
- return 0;
- return ZSTD_readLE32((const char *)dict + 4);
-}
-
-/*! ZSTD_getDictID_fromDDict() :
- * Provides the dictID of the dictionary loaded into `ddict`.
- * If @return == 0, the dictionary is not conformant to Zstandard specification, or empty.
- * Non-conformant dictionaries can still be loaded, but as content-only dictionaries. */
-unsigned ZSTD_getDictID_fromDDict(const ZSTD_DDict *ddict)
-{
- if (ddict == NULL)
- return 0;
- return ZSTD_getDictID_fromDict(ddict->dictContent, ddict->dictSize);
-}
-
-/*! ZSTD_getDictID_fromFrame() :
- * Provides the dictID required to decompressed the frame stored within `src`.
- * If @return == 0, the dictID could not be decoded.
- * This could for one of the following reasons :
- * - The frame does not require a dictionary to be decoded (most common case).
- * - The frame was built with dictID intentionally removed. Whatever dictionary is necessary is a hidden information.
- * Note : this use case also happens when using a non-conformant dictionary.
- * - `srcSize` is too small, and as a result, the frame header could not be decoded (only possible if `srcSize < ZSTD_FRAMEHEADERSIZE_MAX`).
- * - This is not a Zstandard frame.
- * When identifying the exact failure cause, it's possible to used ZSTD_getFrameParams(), which will provide a more precise error code. */
-unsigned ZSTD_getDictID_fromFrame(const void *src, size_t srcSize)
-{
- ZSTD_frameParams zfp = {0, 0, 0, 0};
- size_t const hError = ZSTD_getFrameParams(&zfp, src, srcSize);
- if (ZSTD_isError(hError))
- return 0;
- return zfp.dictID;
-}
-
-/*! ZSTD_decompress_usingDDict() :
-* Decompression using a pre-digested Dictionary
-* Use dictionary without significant overhead. */
-size_t ZSTD_decompress_usingDDict(ZSTD_DCtx *dctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize, const ZSTD_DDict *ddict)
-{
- /* pass content and size in case legacy frames are encountered */
- return ZSTD_decompressMultiFrame(dctx, dst, dstCapacity, src, srcSize, NULL, 0, ddict);
-}
-
-/*=====================================
-* Streaming decompression
-*====================================*/
-
-typedef enum { zdss_init, zdss_loadHeader, zdss_read, zdss_load, zdss_flush } ZSTD_dStreamStage;
-
-/* *** Resource management *** */
-struct ZSTD_DStream_s {
- ZSTD_DCtx *dctx;
- ZSTD_DDict *ddictLocal;
- const ZSTD_DDict *ddict;
- ZSTD_frameParams fParams;
- ZSTD_dStreamStage stage;
- char *inBuff;
- size_t inBuffSize;
- size_t inPos;
- size_t maxWindowSize;
- char *outBuff;
- size_t outBuffSize;
- size_t outStart;
- size_t outEnd;
- size_t blockSize;
- BYTE headerBuffer[ZSTD_FRAMEHEADERSIZE_MAX]; /* tmp buffer to store frame header */
- size_t lhSize;
- ZSTD_customMem customMem;
- void *legacyContext;
- U32 previousLegacyVersion;
- U32 legacyVersion;
- U32 hostageByte;
-}; /* typedef'd to ZSTD_DStream within "zstd.h" */
-
-size_t ZSTD_DStreamWorkspaceBound(size_t maxWindowSize)
-{
- size_t const blockSize = MIN(maxWindowSize, ZSTD_BLOCKSIZE_ABSOLUTEMAX);
- size_t const inBuffSize = blockSize;
- size_t const outBuffSize = maxWindowSize + blockSize + WILDCOPY_OVERLENGTH * 2;
- return ZSTD_DCtxWorkspaceBound() + ZSTD_ALIGN(sizeof(ZSTD_DStream)) + ZSTD_ALIGN(inBuffSize) + ZSTD_ALIGN(outBuffSize);
-}
-
-static ZSTD_DStream *ZSTD_createDStream_advanced(ZSTD_customMem customMem)
-{
- ZSTD_DStream *zds;
-
- if (!customMem.customAlloc || !customMem.customFree)
- return NULL;
-
- zds = (ZSTD_DStream *)ZSTD_malloc(sizeof(ZSTD_DStream), customMem);
- if (zds == NULL)
- return NULL;
- memset(zds, 0, sizeof(ZSTD_DStream));
- memcpy(&zds->customMem, &customMem, sizeof(ZSTD_customMem));
- zds->dctx = ZSTD_createDCtx_advanced(customMem);
- if (zds->dctx == NULL) {
- ZSTD_freeDStream(zds);
- return NULL;
- }
- zds->stage = zdss_init;
- zds->maxWindowSize = ZSTD_MAXWINDOWSIZE_DEFAULT;
- return zds;
-}
-
-ZSTD_DStream *ZSTD_initDStream(size_t maxWindowSize, void *workspace, size_t workspaceSize)
-{
- ZSTD_customMem const stackMem = ZSTD_initStack(workspace, workspaceSize);
- ZSTD_DStream *zds = ZSTD_createDStream_advanced(stackMem);
- if (!zds) {
- return NULL;
- }
-
- zds->maxWindowSize = maxWindowSize;
- zds->stage = zdss_loadHeader;
- zds->lhSize = zds->inPos = zds->outStart = zds->outEnd = 0;
- ZSTD_freeDDict(zds->ddictLocal);
- zds->ddictLocal = NULL;
- zds->ddict = zds->ddictLocal;
- zds->legacyVersion = 0;
- zds->hostageByte = 0;
-
- {
- size_t const blockSize = MIN(zds->maxWindowSize, ZSTD_BLOCKSIZE_ABSOLUTEMAX);
- size_t const neededOutSize = zds->maxWindowSize + blockSize + WILDCOPY_OVERLENGTH * 2;
-
- zds->inBuff = (char *)ZSTD_malloc(blockSize, zds->customMem);
- zds->inBuffSize = blockSize;
- zds->outBuff = (char *)ZSTD_malloc(neededOutSize, zds->customMem);
- zds->outBuffSize = neededOutSize;
- if (zds->inBuff == NULL || zds->outBuff == NULL) {
- ZSTD_freeDStream(zds);
- return NULL;
- }
- }
- return zds;
-}
-
-ZSTD_DStream *ZSTD_initDStream_usingDDict(size_t maxWindowSize, const ZSTD_DDict *ddict, void *workspace, size_t workspaceSize)
-{
- ZSTD_DStream *zds = ZSTD_initDStream(maxWindowSize, workspace, workspaceSize);
- if (zds) {
- zds->ddict = ddict;
- }
- return zds;
-}
-
-size_t ZSTD_freeDStream(ZSTD_DStream *zds)
-{
- if (zds == NULL)
- return 0; /* support free on null */
- {
- ZSTD_customMem const cMem = zds->customMem;
- ZSTD_freeDCtx(zds->dctx);
- zds->dctx = NULL;
- ZSTD_freeDDict(zds->ddictLocal);
- zds->ddictLocal = NULL;
- ZSTD_free(zds->inBuff, cMem);
- zds->inBuff = NULL;
- ZSTD_free(zds->outBuff, cMem);
- zds->outBuff = NULL;
- ZSTD_free(zds, cMem);
- return 0;
- }
-}
-
-/* *** Initialization *** */
-
-size_t ZSTD_DStreamInSize(void) { return ZSTD_BLOCKSIZE_ABSOLUTEMAX + ZSTD_blockHeaderSize; }
-size_t ZSTD_DStreamOutSize(void) { return ZSTD_BLOCKSIZE_ABSOLUTEMAX; }
-
-size_t ZSTD_resetDStream(ZSTD_DStream *zds)
-{
- zds->stage = zdss_loadHeader;
- zds->lhSize = zds->inPos = zds->outStart = zds->outEnd = 0;
- zds->legacyVersion = 0;
- zds->hostageByte = 0;
- return ZSTD_frameHeaderSize_prefix;
-}
-
-/* ***** Decompression ***** */
-
-ZSTD_STATIC size_t ZSTD_limitCopy(void *dst, size_t dstCapacity, const void *src, size_t srcSize)
-{
- size_t const length = MIN(dstCapacity, srcSize);
- memcpy(dst, src, length);
- return length;
-}
-
-size_t ZSTD_decompressStream(ZSTD_DStream *zds, ZSTD_outBuffer *output, ZSTD_inBuffer *input)
-{
- const char *const istart = (const char *)(input->src) + input->pos;
- const char *const iend = (const char *)(input->src) + input->size;
- const char *ip = istart;
- char *const ostart = (char *)(output->dst) + output->pos;
- char *const oend = (char *)(output->dst) + output->size;
- char *op = ostart;
- U32 someMoreWork = 1;
-
- while (someMoreWork) {
- switch (zds->stage) {
- case zdss_init:
- ZSTD_resetDStream(zds); /* transparent reset on starting decoding a new frame */
- /* fall-through */
-
- case zdss_loadHeader: {
- size_t const hSize = ZSTD_getFrameParams(&zds->fParams, zds->headerBuffer, zds->lhSize);
- if (ZSTD_isError(hSize))
- return hSize;
- if (hSize != 0) { /* need more input */
- size_t const toLoad = hSize - zds->lhSize; /* if hSize!=0, hSize > zds->lhSize */
- if (toLoad > (size_t)(iend - ip)) { /* not enough input to load full header */
- memcpy(zds->headerBuffer + zds->lhSize, ip, iend - ip);
- zds->lhSize += iend - ip;
- input->pos = input->size;
- return (MAX(ZSTD_frameHeaderSize_min, hSize) - zds->lhSize) +
- ZSTD_blockHeaderSize; /* remaining header bytes + next block header */
- }
- memcpy(zds->headerBuffer + zds->lhSize, ip, toLoad);
- zds->lhSize = hSize;
- ip += toLoad;
- break;
- }
-
- /* check for single-pass mode opportunity */
- if (zds->fParams.frameContentSize && zds->fParams.windowSize /* skippable frame if == 0 */
- && (U64)(size_t)(oend - op) >= zds->fParams.frameContentSize) {
- size_t const cSize = ZSTD_findFrameCompressedSize(istart, iend - istart);
- if (cSize <= (size_t)(iend - istart)) {
- size_t const decompressedSize = ZSTD_decompress_usingDDict(zds->dctx, op, oend - op, istart, cSize, zds->ddict);
- if (ZSTD_isError(decompressedSize))
- return decompressedSize;
- ip = istart + cSize;
- op += decompressedSize;
- zds->dctx->expected = 0;
- zds->stage = zdss_init;
- someMoreWork = 0;
- break;
- }
- }
-
- /* Consume header */
- ZSTD_refDDict(zds->dctx, zds->ddict);
- {
- size_t const h1Size = ZSTD_nextSrcSizeToDecompress(zds->dctx); /* == ZSTD_frameHeaderSize_prefix */
- CHECK_F(ZSTD_decompressContinue(zds->dctx, NULL, 0, zds->headerBuffer, h1Size));
- {
- size_t const h2Size = ZSTD_nextSrcSizeToDecompress(zds->dctx);
- CHECK_F(ZSTD_decompressContinue(zds->dctx, NULL, 0, zds->headerBuffer + h1Size, h2Size));
- }
- }
-
- zds->fParams.windowSize = MAX(zds->fParams.windowSize, 1U << ZSTD_WINDOWLOG_ABSOLUTEMIN);
- if (zds->fParams.windowSize > zds->maxWindowSize)
- return ERROR(frameParameter_windowTooLarge);
-
- /* Buffers are preallocated, but double check */
- {
- size_t const blockSize = MIN(zds->maxWindowSize, ZSTD_BLOCKSIZE_ABSOLUTEMAX);
- size_t const neededOutSize = zds->maxWindowSize + blockSize + WILDCOPY_OVERLENGTH * 2;
- if (zds->inBuffSize < blockSize) {
- return ERROR(GENERIC);
- }
- if (zds->outBuffSize < neededOutSize) {
- return ERROR(GENERIC);
- }
- zds->blockSize = blockSize;
- }
- zds->stage = zdss_read;
- }
- /* fall through */
-
- case zdss_read: {
- size_t const neededInSize = ZSTD_nextSrcSizeToDecompress(zds->dctx);
- if (neededInSize == 0) { /* end of frame */
- zds->stage = zdss_init;
- someMoreWork = 0;
- break;
- }
- if ((size_t)(iend - ip) >= neededInSize) { /* decode directly from src */
- const int isSkipFrame = ZSTD_isSkipFrame(zds->dctx);
- size_t const decodedSize = ZSTD_decompressContinue(zds->dctx, zds->outBuff + zds->outStart,
- (isSkipFrame ? 0 : zds->outBuffSize - zds->outStart), ip, neededInSize);
- if (ZSTD_isError(decodedSize))
- return decodedSize;
- ip += neededInSize;
- if (!decodedSize && !isSkipFrame)
- break; /* this was just a header */
- zds->outEnd = zds->outStart + decodedSize;
- zds->stage = zdss_flush;
- break;
- }
- if (ip == iend) {
- someMoreWork = 0;
- break;
- } /* no more input */
- zds->stage = zdss_load;
- /* pass-through */
- }
- /* fall through */
-
- case zdss_load: {
- size_t const neededInSize = ZSTD_nextSrcSizeToDecompress(zds->dctx);
- size_t const toLoad = neededInSize - zds->inPos; /* should always be <= remaining space within inBuff */
- size_t loadedSize;
- if (toLoad > zds->inBuffSize - zds->inPos)
- return ERROR(corruption_detected); /* should never happen */
- loadedSize = ZSTD_limitCopy(zds->inBuff + zds->inPos, toLoad, ip, iend - ip);
- ip += loadedSize;
- zds->inPos += loadedSize;
- if (loadedSize < toLoad) {
- someMoreWork = 0;
- break;
- } /* not enough input, wait for more */
-
- /* decode loaded input */
- {
- const int isSkipFrame = ZSTD_isSkipFrame(zds->dctx);
- size_t const decodedSize = ZSTD_decompressContinue(zds->dctx, zds->outBuff + zds->outStart, zds->outBuffSize - zds->outStart,
- zds->inBuff, neededInSize);
- if (ZSTD_isError(decodedSize))
- return decodedSize;
- zds->inPos = 0; /* input is consumed */
- if (!decodedSize && !isSkipFrame) {
- zds->stage = zdss_read;
- break;
- } /* this was just a header */
- zds->outEnd = zds->outStart + decodedSize;
- zds->stage = zdss_flush;
- /* pass-through */
- }
- }
- /* fall through */
-
- case zdss_flush: {
- size_t const toFlushSize = zds->outEnd - zds->outStart;
- size_t const flushedSize = ZSTD_limitCopy(op, oend - op, zds->outBuff + zds->outStart, toFlushSize);
- op += flushedSize;
- zds->outStart += flushedSize;
- if (flushedSize == toFlushSize) { /* flush completed */
- zds->stage = zdss_read;
- if (zds->outStart + zds->blockSize > zds->outBuffSize)
- zds->outStart = zds->outEnd = 0;
- break;
- }
- /* cannot complete flush */
- someMoreWork = 0;
- break;
- }
- default:
- return ERROR(GENERIC); /* impossible */
- }
- }
-
- /* result */
- input->pos += (size_t)(ip - istart);
- output->pos += (size_t)(op - ostart);
- {
- size_t nextSrcSizeHint = ZSTD_nextSrcSizeToDecompress(zds->dctx);
- if (!nextSrcSizeHint) { /* frame fully decoded */
- if (zds->outEnd == zds->outStart) { /* output fully flushed */
- if (zds->hostageByte) {
- if (input->pos >= input->size) {
- zds->stage = zdss_read;
- return 1;
- } /* can't release hostage (not present) */
- input->pos++; /* release hostage */
- }
- return 0;
- }
- if (!zds->hostageByte) { /* output not fully flushed; keep last byte as hostage; will be released when all output is flushed */
- input->pos--; /* note : pos > 0, otherwise, impossible to finish reading last block */
- zds->hostageByte = 1;
- }
- return 1;
- }
- nextSrcSizeHint += ZSTD_blockHeaderSize * (ZSTD_nextInputType(zds->dctx) == ZSTDnit_block); /* preload header of next block */
- if (zds->inPos > nextSrcSizeHint)
- return ERROR(GENERIC); /* should never happen */
- nextSrcSizeHint -= zds->inPos; /* already loaded*/
- return nextSrcSizeHint;
- }
-}
-
-EXPORT_SYMBOL(ZSTD_DCtxWorkspaceBound);
-EXPORT_SYMBOL(ZSTD_initDCtx);
-EXPORT_SYMBOL(ZSTD_decompressDCtx);
-EXPORT_SYMBOL(ZSTD_decompress_usingDict);
-
-EXPORT_SYMBOL(ZSTD_DDictWorkspaceBound);
-EXPORT_SYMBOL(ZSTD_initDDict);
-EXPORT_SYMBOL(ZSTD_decompress_usingDDict);
-
-EXPORT_SYMBOL(ZSTD_DStreamWorkspaceBound);
-EXPORT_SYMBOL(ZSTD_initDStream);
-EXPORT_SYMBOL(ZSTD_initDStream_usingDDict);
-EXPORT_SYMBOL(ZSTD_resetDStream);
-EXPORT_SYMBOL(ZSTD_decompressStream);
-EXPORT_SYMBOL(ZSTD_DStreamInSize);
-EXPORT_SYMBOL(ZSTD_DStreamOutSize);
-
-EXPORT_SYMBOL(ZSTD_findFrameCompressedSize);
-EXPORT_SYMBOL(ZSTD_getFrameContentSize);
-EXPORT_SYMBOL(ZSTD_findDecompressedSize);
-
-EXPORT_SYMBOL(ZSTD_isFrame);
-EXPORT_SYMBOL(ZSTD_getDictID_fromDict);
-EXPORT_SYMBOL(ZSTD_getDictID_fromDDict);
-EXPORT_SYMBOL(ZSTD_getDictID_fromFrame);
-
-EXPORT_SYMBOL(ZSTD_getFrameParams);
-EXPORT_SYMBOL(ZSTD_decompressBegin);
-EXPORT_SYMBOL(ZSTD_decompressBegin_usingDict);
-EXPORT_SYMBOL(ZSTD_copyDCtx);
-EXPORT_SYMBOL(ZSTD_nextSrcSizeToDecompress);
-EXPORT_SYMBOL(ZSTD_decompressContinue);
-EXPORT_SYMBOL(ZSTD_nextInputType);
-
-EXPORT_SYMBOL(ZSTD_decompressBlock);
-EXPORT_SYMBOL(ZSTD_insertBlock);
-
-MODULE_LICENSE("Dual BSD/GPL");
-MODULE_DESCRIPTION("Zstd Decompressor");
diff --git a/lib/zstd/decompress/huf_decompress.c b/lib/zstd/decompress/huf_decompress.c
new file mode 100644
index 000000000000..60958afebc41
--- /dev/null
+++ b/lib/zstd/decompress/huf_decompress.c
@@ -0,0 +1,1740 @@
+/* ******************************************************************
+ * huff0 huffman decoder,
+ * part of Finite State Entropy library
+ * Copyright (c) Yann Collet, Facebook, Inc.
+ *
+ * You can contact the author at :
+ * - FSE+HUF source repository : https://github.com/Cyan4973/FiniteStateEntropy
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+****************************************************************** */
+
+/* **************************************************************
+* Dependencies
+****************************************************************/
+#include "../common/zstd_deps.h" /* ZSTD_memcpy, ZSTD_memset */
+#include "../common/compiler.h"
+#include "../common/bitstream.h" /* BIT_* */
+#include "../common/fse.h" /* to compress headers */
+#define HUF_STATIC_LINKING_ONLY
+#include "../common/huf.h"
+#include "../common/error_private.h"
+#include "../common/zstd_internal.h"
+
+/* **************************************************************
+* Constants
+****************************************************************/
+
+#define HUF_DECODER_FAST_TABLELOG 11
+
+/* **************************************************************
+* Macros
+****************************************************************/
+
+/* These two optional macros force the use one way or another of the two
+ * Huffman decompression implementations. You can't force in both directions
+ * at the same time.
+ */
+#if defined(HUF_FORCE_DECOMPRESS_X1) && \
+ defined(HUF_FORCE_DECOMPRESS_X2)
+#error "Cannot force the use of the X1 and X2 decoders at the same time!"
+#endif
+
+#if ZSTD_ENABLE_ASM_X86_64_BMI2 && DYNAMIC_BMI2
+# define HUF_ASM_X86_64_BMI2_ATTRS BMI2_TARGET_ATTRIBUTE
+#else
+# define HUF_ASM_X86_64_BMI2_ATTRS
+#endif
+
+#define HUF_EXTERN_C
+#define HUF_ASM_DECL HUF_EXTERN_C
+
+#if DYNAMIC_BMI2 || (ZSTD_ENABLE_ASM_X86_64_BMI2 && defined(__BMI2__))
+# define HUF_NEED_BMI2_FUNCTION 1
+#else
+# define HUF_NEED_BMI2_FUNCTION 0
+#endif
+
+#if !(ZSTD_ENABLE_ASM_X86_64_BMI2 && defined(__BMI2__))
+# define HUF_NEED_DEFAULT_FUNCTION 1
+#else
+# define HUF_NEED_DEFAULT_FUNCTION 0
+#endif
+
+/* **************************************************************
+* Error Management
+****************************************************************/
+#define HUF_isError ERR_isError
+
+
+/* **************************************************************
+* Byte alignment for workSpace management
+****************************************************************/
+#define HUF_ALIGN(x, a) HUF_ALIGN_MASK((x), (a) - 1)
+#define HUF_ALIGN_MASK(x, mask) (((x) + (mask)) & ~(mask))
+
+
+/* **************************************************************
+* BMI2 Variant Wrappers
+****************************************************************/
+#if DYNAMIC_BMI2
+
+#define HUF_DGEN(fn) \
+ \
+ static size_t fn##_default( \
+ void* dst, size_t dstSize, \
+ const void* cSrc, size_t cSrcSize, \
+ const HUF_DTable* DTable) \
+ { \
+ return fn##_body(dst, dstSize, cSrc, cSrcSize, DTable); \
+ } \
+ \
+ static BMI2_TARGET_ATTRIBUTE size_t fn##_bmi2( \
+ void* dst, size_t dstSize, \
+ const void* cSrc, size_t cSrcSize, \
+ const HUF_DTable* DTable) \
+ { \
+ return fn##_body(dst, dstSize, cSrc, cSrcSize, DTable); \
+ } \
+ \
+ static size_t fn(void* dst, size_t dstSize, void const* cSrc, \
+ size_t cSrcSize, HUF_DTable const* DTable, int bmi2) \
+ { \
+ if (bmi2) { \
+ return fn##_bmi2(dst, dstSize, cSrc, cSrcSize, DTable); \
+ } \
+ return fn##_default(dst, dstSize, cSrc, cSrcSize, DTable); \
+ }
+
+#else
+
+#define HUF_DGEN(fn) \
+ static size_t fn(void* dst, size_t dstSize, void const* cSrc, \
+ size_t cSrcSize, HUF_DTable const* DTable, int bmi2) \
+ { \
+ (void)bmi2; \
+ return fn##_body(dst, dstSize, cSrc, cSrcSize, DTable); \
+ }
+
+#endif
+
+
+/*-***************************/
+/* generic DTableDesc */
+/*-***************************/
+typedef struct { BYTE maxTableLog; BYTE tableType; BYTE tableLog; BYTE reserved; } DTableDesc;
+
+static DTableDesc HUF_getDTableDesc(const HUF_DTable* table)
+{
+ DTableDesc dtd;
+ ZSTD_memcpy(&dtd, table, sizeof(dtd));
+ return dtd;
+}
+
+#if ZSTD_ENABLE_ASM_X86_64_BMI2
+
+static size_t HUF_initDStream(BYTE const* ip) {
+ BYTE const lastByte = ip[7];
+ size_t const bitsConsumed = lastByte ? 8 - BIT_highbit32(lastByte) : 0;
+ size_t const value = MEM_readLEST(ip) | 1;
+ assert(bitsConsumed <= 8);
+ return value << bitsConsumed;
+}
+typedef struct {
+ BYTE const* ip[4];
+ BYTE* op[4];
+ U64 bits[4];
+ void const* dt;
+ BYTE const* ilimit;
+ BYTE* oend;
+ BYTE const* iend[4];
+} HUF_DecompressAsmArgs;
+
+/*
+ * Initializes args for the asm decoding loop.
+ * @returns 0 on success
+ * 1 if the fallback implementation should be used.
+ * Or an error code on failure.
+ */
+static size_t HUF_DecompressAsmArgs_init(HUF_DecompressAsmArgs* args, void* dst, size_t dstSize, void const* src, size_t srcSize, const HUF_DTable* DTable)
+{
+ void const* dt = DTable + 1;
+ U32 const dtLog = HUF_getDTableDesc(DTable).tableLog;
+
+ const BYTE* const ilimit = (const BYTE*)src + 6 + 8;
+
+ BYTE* const oend = (BYTE*)dst + dstSize;
+
+ /* The following condition is false on x32 platform,
+ * but HUF_asm is not compatible with this ABI */
+ if (!(MEM_isLittleEndian() && !MEM_32bits())) return 1;
+
+ /* strict minimum : jump table + 1 byte per stream */
+ if (srcSize < 10)
+ return ERROR(corruption_detected);
+
+ /* Must have at least 8 bytes per stream because we don't handle initializing smaller bit containers.
+ * If table log is not correct at this point, fallback to the old decoder.
+ * On small inputs we don't have enough data to trigger the fast loop, so use the old decoder.
+ */
+ if (dtLog != HUF_DECODER_FAST_TABLELOG)
+ return 1;
+
+ /* Read the jump table. */
+ {
+ const BYTE* const istart = (const BYTE*)src;
+ size_t const length1 = MEM_readLE16(istart);
+ size_t const length2 = MEM_readLE16(istart+2);
+ size_t const length3 = MEM_readLE16(istart+4);
+ size_t const length4 = srcSize - (length1 + length2 + length3 + 6);
+ args->iend[0] = istart + 6; /* jumpTable */
+ args->iend[1] = args->iend[0] + length1;
+ args->iend[2] = args->iend[1] + length2;
+ args->iend[3] = args->iend[2] + length3;
+
+ /* HUF_initDStream() requires this, and this small of an input
+ * won't benefit from the ASM loop anyways.
+ * length1 must be >= 16 so that ip[0] >= ilimit before the loop
+ * starts.
+ */
+ if (length1 < 16 || length2 < 8 || length3 < 8 || length4 < 8)
+ return 1;
+ if (length4 > srcSize) return ERROR(corruption_detected); /* overflow */
+ }
+ /* ip[] contains the position that is currently loaded into bits[]. */
+ args->ip[0] = args->iend[1] - sizeof(U64);
+ args->ip[1] = args->iend[2] - sizeof(U64);
+ args->ip[2] = args->iend[3] - sizeof(U64);
+ args->ip[3] = (BYTE const*)src + srcSize - sizeof(U64);
+
+ /* op[] contains the output pointers. */
+ args->op[0] = (BYTE*)dst;
+ args->op[1] = args->op[0] + (dstSize+3)/4;
+ args->op[2] = args->op[1] + (dstSize+3)/4;
+ args->op[3] = args->op[2] + (dstSize+3)/4;
+
+ /* No point to call the ASM loop for tiny outputs. */
+ if (args->op[3] >= oend)
+ return 1;
+
+ /* bits[] is the bit container.
+ * It is read from the MSB down to the LSB.
+ * It is shifted left as it is read, and zeros are
+ * shifted in. After the lowest valid bit a 1 is
+ * set, so that CountTrailingZeros(bits[]) can be used
+ * to count how many bits we've consumed.
+ */
+ args->bits[0] = HUF_initDStream(args->ip[0]);
+ args->bits[1] = HUF_initDStream(args->ip[1]);
+ args->bits[2] = HUF_initDStream(args->ip[2]);
+ args->bits[3] = HUF_initDStream(args->ip[3]);
+
+ /* If ip[] >= ilimit, it is guaranteed to be safe to
+ * reload bits[]. It may be beyond its section, but is
+ * guaranteed to be valid (>= istart).
+ */
+ args->ilimit = ilimit;
+
+ args->oend = oend;
+ args->dt = dt;
+
+ return 0;
+}
+
+static size_t HUF_initRemainingDStream(BIT_DStream_t* bit, HUF_DecompressAsmArgs const* args, int stream, BYTE* segmentEnd)
+{
+ /* Validate that we haven't overwritten. */
+ if (args->op[stream] > segmentEnd)
+ return ERROR(corruption_detected);
+ /* Validate that we haven't read beyond iend[].
+ * Note that ip[] may be < iend[] because the MSB is
+ * the next bit to read, and we may have consumed 100%
+ * of the stream, so down to iend[i] - 8 is valid.
+ */
+ if (args->ip[stream] < args->iend[stream] - 8)
+ return ERROR(corruption_detected);
+
+ /* Construct the BIT_DStream_t. */
+ bit->bitContainer = MEM_readLE64(args->ip[stream]);
+ bit->bitsConsumed = ZSTD_countTrailingZeros((size_t)args->bits[stream]);
+ bit->start = (const char*)args->iend[0];
+ bit->limitPtr = bit->start + sizeof(size_t);
+ bit->ptr = (const char*)args->ip[stream];
+
+ return 0;
+}
+#endif
+
+
+#ifndef HUF_FORCE_DECOMPRESS_X2
+
+/*-***************************/
+/* single-symbol decoding */
+/*-***************************/
+typedef struct { BYTE nbBits; BYTE byte; } HUF_DEltX1; /* single-symbol decoding */
+
+/*
+ * Packs 4 HUF_DEltX1 structs into a U64. This is used to lay down 4 entries at
+ * a time.
+ */
+static U64 HUF_DEltX1_set4(BYTE symbol, BYTE nbBits) {
+ U64 D4;
+ if (MEM_isLittleEndian()) {
+ D4 = (symbol << 8) + nbBits;
+ } else {
+ D4 = symbol + (nbBits << 8);
+ }
+ D4 *= 0x0001000100010001ULL;
+ return D4;
+}
+
+/*
+ * Increase the tableLog to targetTableLog and rescales the stats.
+ * If tableLog > targetTableLog this is a no-op.
+ * @returns New tableLog
+ */
+static U32 HUF_rescaleStats(BYTE* huffWeight, U32* rankVal, U32 nbSymbols, U32 tableLog, U32 targetTableLog)
+{
+ if (tableLog > targetTableLog)
+ return tableLog;
+ if (tableLog < targetTableLog) {
+ U32 const scale = targetTableLog - tableLog;
+ U32 s;
+ /* Increase the weight for all non-zero probability symbols by scale. */
+ for (s = 0; s < nbSymbols; ++s) {
+ huffWeight[s] += (BYTE)((huffWeight[s] == 0) ? 0 : scale);
+ }
+ /* Update rankVal to reflect the new weights.
+ * All weights except 0 get moved to weight + scale.
+ * Weights [1, scale] are empty.
+ */
+ for (s = targetTableLog; s > scale; --s) {
+ rankVal[s] = rankVal[s - scale];
+ }
+ for (s = scale; s > 0; --s) {
+ rankVal[s] = 0;
+ }
+ }
+ return targetTableLog;
+}
+
+typedef struct {
+ U32 rankVal[HUF_TABLELOG_ABSOLUTEMAX + 1];
+ U32 rankStart[HUF_TABLELOG_ABSOLUTEMAX + 1];
+ U32 statsWksp[HUF_READ_STATS_WORKSPACE_SIZE_U32];
+ BYTE symbols[HUF_SYMBOLVALUE_MAX + 1];
+ BYTE huffWeight[HUF_SYMBOLVALUE_MAX + 1];
+} HUF_ReadDTableX1_Workspace;
+
+
+size_t HUF_readDTableX1_wksp(HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize)
+{
+ return HUF_readDTableX1_wksp_bmi2(DTable, src, srcSize, workSpace, wkspSize, /* bmi2 */ 0);
+}
+
+size_t HUF_readDTableX1_wksp_bmi2(HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize, int bmi2)
+{
+ U32 tableLog = 0;
+ U32 nbSymbols = 0;
+ size_t iSize;
+ void* const dtPtr = DTable + 1;
+ HUF_DEltX1* const dt = (HUF_DEltX1*)dtPtr;
+ HUF_ReadDTableX1_Workspace* wksp = (HUF_ReadDTableX1_Workspace*)workSpace;
+
+ DEBUG_STATIC_ASSERT(HUF_DECOMPRESS_WORKSPACE_SIZE >= sizeof(*wksp));
+ if (sizeof(*wksp) > wkspSize) return ERROR(tableLog_tooLarge);
+
+ DEBUG_STATIC_ASSERT(sizeof(DTableDesc) == sizeof(HUF_DTable));
+ /* ZSTD_memset(huffWeight, 0, sizeof(huffWeight)); */ /* is not necessary, even though some analyzer complain ... */
+
+ iSize = HUF_readStats_wksp(wksp->huffWeight, HUF_SYMBOLVALUE_MAX + 1, wksp->rankVal, &nbSymbols, &tableLog, src, srcSize, wksp->statsWksp, sizeof(wksp->statsWksp), bmi2);
+ if (HUF_isError(iSize)) return iSize;
+
+
+ /* Table header */
+ { DTableDesc dtd = HUF_getDTableDesc(DTable);
+ U32 const maxTableLog = dtd.maxTableLog + 1;
+ U32 const targetTableLog = MIN(maxTableLog, HUF_DECODER_FAST_TABLELOG);
+ tableLog = HUF_rescaleStats(wksp->huffWeight, wksp->rankVal, nbSymbols, tableLog, targetTableLog);
+ if (tableLog > (U32)(dtd.maxTableLog+1)) return ERROR(tableLog_tooLarge); /* DTable too small, Huffman tree cannot fit in */
+ dtd.tableType = 0;
+ dtd.tableLog = (BYTE)tableLog;
+ ZSTD_memcpy(DTable, &dtd, sizeof(dtd));
+ }
+
+ /* Compute symbols and rankStart given rankVal:
+ *
+ * rankVal already contains the number of values of each weight.
+ *
+ * symbols contains the symbols ordered by weight. First are the rankVal[0]
+ * weight 0 symbols, followed by the rankVal[1] weight 1 symbols, and so on.
+ * symbols[0] is filled (but unused) to avoid a branch.
+ *
+ * rankStart contains the offset where each rank belongs in the DTable.
+ * rankStart[0] is not filled because there are no entries in the table for
+ * weight 0.
+ */
+ {
+ int n;
+ int nextRankStart = 0;
+ int const unroll = 4;
+ int const nLimit = (int)nbSymbols - unroll + 1;
+ for (n=0; n<(int)tableLog+1; n++) {
+ U32 const curr = nextRankStart;
+ nextRankStart += wksp->rankVal[n];
+ wksp->rankStart[n] = curr;
+ }
+ for (n=0; n < nLimit; n += unroll) {
+ int u;
+ for (u=0; u < unroll; ++u) {
+ size_t const w = wksp->huffWeight[n+u];
+ wksp->symbols[wksp->rankStart[w]++] = (BYTE)(n+u);
+ }
+ }
+ for (; n < (int)nbSymbols; ++n) {
+ size_t const w = wksp->huffWeight[n];
+ wksp->symbols[wksp->rankStart[w]++] = (BYTE)n;
+ }
+ }
+
+ /* fill DTable
+ * We fill all entries of each weight in order.
+ * That way length is a constant for each iteration of the outer loop.
+ * We can switch based on the length to a different inner loop which is
+ * optimized for that particular case.
+ */
+ {
+ U32 w;
+ int symbol=wksp->rankVal[0];
+ int rankStart=0;
+ for (w=1; w<tableLog+1; ++w) {
+ int const symbolCount = wksp->rankVal[w];
+ int const length = (1 << w) >> 1;
+ int uStart = rankStart;
+ BYTE const nbBits = (BYTE)(tableLog + 1 - w);
+ int s;
+ int u;
+ switch (length) {
+ case 1:
+ for (s=0; s<symbolCount; ++s) {
+ HUF_DEltX1 D;
+ D.byte = wksp->symbols[symbol + s];
+ D.nbBits = nbBits;
+ dt[uStart] = D;
+ uStart += 1;
+ }
+ break;
+ case 2:
+ for (s=0; s<symbolCount; ++s) {
+ HUF_DEltX1 D;
+ D.byte = wksp->symbols[symbol + s];
+ D.nbBits = nbBits;
+ dt[uStart+0] = D;
+ dt[uStart+1] = D;
+ uStart += 2;
+ }
+ break;
+ case 4:
+ for (s=0; s<symbolCount; ++s) {
+ U64 const D4 = HUF_DEltX1_set4(wksp->symbols[symbol + s], nbBits);
+ MEM_write64(dt + uStart, D4);
+ uStart += 4;
+ }
+ break;
+ case 8:
+ for (s=0; s<symbolCount; ++s) {
+ U64 const D4 = HUF_DEltX1_set4(wksp->symbols[symbol + s], nbBits);
+ MEM_write64(dt + uStart, D4);
+ MEM_write64(dt + uStart + 4, D4);
+ uStart += 8;
+ }
+ break;
+ default:
+ for (s=0; s<symbolCount; ++s) {
+ U64 const D4 = HUF_DEltX1_set4(wksp->symbols[symbol + s], nbBits);
+ for (u=0; u < length; u += 16) {
+ MEM_write64(dt + uStart + u + 0, D4);
+ MEM_write64(dt + uStart + u + 4, D4);
+ MEM_write64(dt + uStart + u + 8, D4);
+ MEM_write64(dt + uStart + u + 12, D4);
+ }
+ assert(u == length);
+ uStart += length;
+ }
+ break;
+ }
+ symbol += symbolCount;
+ rankStart += symbolCount * length;
+ }
+ }
+ return iSize;
+}
+
+FORCE_INLINE_TEMPLATE BYTE
+HUF_decodeSymbolX1(BIT_DStream_t* Dstream, const HUF_DEltX1* dt, const U32 dtLog)
+{
+ size_t const val = BIT_lookBitsFast(Dstream, dtLog); /* note : dtLog >= 1 */
+ BYTE const c = dt[val].byte;
+ BIT_skipBits(Dstream, dt[val].nbBits);
+ return c;
+}
+
+#define HUF_DECODE_SYMBOLX1_0(ptr, DStreamPtr) \
+ *ptr++ = HUF_decodeSymbolX1(DStreamPtr, dt, dtLog)
+
+#define HUF_DECODE_SYMBOLX1_1(ptr, DStreamPtr) \
+ if (MEM_64bits() || (HUF_TABLELOG_MAX<=12)) \
+ HUF_DECODE_SYMBOLX1_0(ptr, DStreamPtr)
+
+#define HUF_DECODE_SYMBOLX1_2(ptr, DStreamPtr) \
+ if (MEM_64bits()) \
+ HUF_DECODE_SYMBOLX1_0(ptr, DStreamPtr)
+
+HINT_INLINE size_t
+HUF_decodeStreamX1(BYTE* p, BIT_DStream_t* const bitDPtr, BYTE* const pEnd, const HUF_DEltX1* const dt, const U32 dtLog)
+{
+ BYTE* const pStart = p;
+
+ /* up to 4 symbols at a time */
+ if ((pEnd - p) > 3) {
+ while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p < pEnd-3)) {
+ HUF_DECODE_SYMBOLX1_2(p, bitDPtr);
+ HUF_DECODE_SYMBOLX1_1(p, bitDPtr);
+ HUF_DECODE_SYMBOLX1_2(p, bitDPtr);
+ HUF_DECODE_SYMBOLX1_0(p, bitDPtr);
+ }
+ } else {
+ BIT_reloadDStream(bitDPtr);
+ }
+
+ /* [0-3] symbols remaining */
+ if (MEM_32bits())
+ while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p < pEnd))
+ HUF_DECODE_SYMBOLX1_0(p, bitDPtr);
+
+ /* no more data to retrieve from bitstream, no need to reload */
+ while (p < pEnd)
+ HUF_DECODE_SYMBOLX1_0(p, bitDPtr);
+
+ return pEnd-pStart;
+}
+
+FORCE_INLINE_TEMPLATE size_t
+HUF_decompress1X1_usingDTable_internal_body(
+ void* dst, size_t dstSize,
+ const void* cSrc, size_t cSrcSize,
+ const HUF_DTable* DTable)
+{
+ BYTE* op = (BYTE*)dst;
+ BYTE* const oend = op + dstSize;
+ const void* dtPtr = DTable + 1;
+ const HUF_DEltX1* const dt = (const HUF_DEltX1*)dtPtr;
+ BIT_DStream_t bitD;
+ DTableDesc const dtd = HUF_getDTableDesc(DTable);
+ U32 const dtLog = dtd.tableLog;
+
+ CHECK_F( BIT_initDStream(&bitD, cSrc, cSrcSize) );
+
+ HUF_decodeStreamX1(op, &bitD, oend, dt, dtLog);
+
+ if (!BIT_endOfDStream(&bitD)) return ERROR(corruption_detected);
+
+ return dstSize;
+}
+
+FORCE_INLINE_TEMPLATE size_t
+HUF_decompress4X1_usingDTable_internal_body(
+ void* dst, size_t dstSize,
+ const void* cSrc, size_t cSrcSize,
+ const HUF_DTable* DTable)
+{
+ /* Check */
+ if (cSrcSize < 10) return ERROR(corruption_detected); /* strict minimum : jump table + 1 byte per stream */
+
+ { const BYTE* const istart = (const BYTE*) cSrc;
+ BYTE* const ostart = (BYTE*) dst;
+ BYTE* const oend = ostart + dstSize;
+ BYTE* const olimit = oend - 3;
+ const void* const dtPtr = DTable + 1;
+ const HUF_DEltX1* const dt = (const HUF_DEltX1*)dtPtr;
+
+ /* Init */
+ BIT_DStream_t bitD1;
+ BIT_DStream_t bitD2;
+ BIT_DStream_t bitD3;
+ BIT_DStream_t bitD4;
+ size_t const length1 = MEM_readLE16(istart);
+ size_t const length2 = MEM_readLE16(istart+2);
+ size_t const length3 = MEM_readLE16(istart+4);
+ size_t const length4 = cSrcSize - (length1 + length2 + length3 + 6);
+ const BYTE* const istart1 = istart + 6; /* jumpTable */
+ const BYTE* const istart2 = istart1 + length1;
+ const BYTE* const istart3 = istart2 + length2;
+ const BYTE* const istart4 = istart3 + length3;
+ const size_t segmentSize = (dstSize+3) / 4;
+ BYTE* const opStart2 = ostart + segmentSize;
+ BYTE* const opStart3 = opStart2 + segmentSize;
+ BYTE* const opStart4 = opStart3 + segmentSize;
+ BYTE* op1 = ostart;
+ BYTE* op2 = opStart2;
+ BYTE* op3 = opStart3;
+ BYTE* op4 = opStart4;
+ DTableDesc const dtd = HUF_getDTableDesc(DTable);
+ U32 const dtLog = dtd.tableLog;
+ U32 endSignal = 1;
+
+ if (length4 > cSrcSize) return ERROR(corruption_detected); /* overflow */
+ if (opStart4 > oend) return ERROR(corruption_detected); /* overflow */
+ CHECK_F( BIT_initDStream(&bitD1, istart1, length1) );
+ CHECK_F( BIT_initDStream(&bitD2, istart2, length2) );
+ CHECK_F( BIT_initDStream(&bitD3, istart3, length3) );
+ CHECK_F( BIT_initDStream(&bitD4, istart4, length4) );
+
+ /* up to 16 symbols per loop (4 symbols per stream) in 64-bit mode */
+ if ((size_t)(oend - op4) >= sizeof(size_t)) {
+ for ( ; (endSignal) & (op4 < olimit) ; ) {
+ HUF_DECODE_SYMBOLX1_2(op1, &bitD1);
+ HUF_DECODE_SYMBOLX1_2(op2, &bitD2);
+ HUF_DECODE_SYMBOLX1_2(op3, &bitD3);
+ HUF_DECODE_SYMBOLX1_2(op4, &bitD4);
+ HUF_DECODE_SYMBOLX1_1(op1, &bitD1);
+ HUF_DECODE_SYMBOLX1_1(op2, &bitD2);
+ HUF_DECODE_SYMBOLX1_1(op3, &bitD3);
+ HUF_DECODE_SYMBOLX1_1(op4, &bitD4);
+ HUF_DECODE_SYMBOLX1_2(op1, &bitD1);
+ HUF_DECODE_SYMBOLX1_2(op2, &bitD2);
+ HUF_DECODE_SYMBOLX1_2(op3, &bitD3);
+ HUF_DECODE_SYMBOLX1_2(op4, &bitD4);
+ HUF_DECODE_SYMBOLX1_0(op1, &bitD1);
+ HUF_DECODE_SYMBOLX1_0(op2, &bitD2);
+ HUF_DECODE_SYMBOLX1_0(op3, &bitD3);
+ HUF_DECODE_SYMBOLX1_0(op4, &bitD4);
+ endSignal &= BIT_reloadDStreamFast(&bitD1) == BIT_DStream_unfinished;
+ endSignal &= BIT_reloadDStreamFast(&bitD2) == BIT_DStream_unfinished;
+ endSignal &= BIT_reloadDStreamFast(&bitD3) == BIT_DStream_unfinished;
+ endSignal &= BIT_reloadDStreamFast(&bitD4) == BIT_DStream_unfinished;
+ }
+ }
+
+ /* check corruption */
+ /* note : should not be necessary : op# advance in lock step, and we control op4.
+ * but curiously, binary generated by gcc 7.2 & 7.3 with -mbmi2 runs faster when >=1 test is present */
+ if (op1 > opStart2) return ERROR(corruption_detected);
+ if (op2 > opStart3) return ERROR(corruption_detected);
+ if (op3 > opStart4) return ERROR(corruption_detected);
+ /* note : op4 supposed already verified within main loop */
+
+ /* finish bitStreams one by one */
+ HUF_decodeStreamX1(op1, &bitD1, opStart2, dt, dtLog);
+ HUF_decodeStreamX1(op2, &bitD2, opStart3, dt, dtLog);
+ HUF_decodeStreamX1(op3, &bitD3, opStart4, dt, dtLog);
+ HUF_decodeStreamX1(op4, &bitD4, oend, dt, dtLog);
+
+ /* check */
+ { U32 const endCheck = BIT_endOfDStream(&bitD1) & BIT_endOfDStream(&bitD2) & BIT_endOfDStream(&bitD3) & BIT_endOfDStream(&bitD4);
+ if (!endCheck) return ERROR(corruption_detected); }
+
+ /* decoded size */
+ return dstSize;
+ }
+}
+
+#if HUF_NEED_BMI2_FUNCTION
+static BMI2_TARGET_ATTRIBUTE
+size_t HUF_decompress4X1_usingDTable_internal_bmi2(void* dst, size_t dstSize, void const* cSrc,
+ size_t cSrcSize, HUF_DTable const* DTable) {
+ return HUF_decompress4X1_usingDTable_internal_body(dst, dstSize, cSrc, cSrcSize, DTable);
+}
+#endif
+
+#if HUF_NEED_DEFAULT_FUNCTION
+static
+size_t HUF_decompress4X1_usingDTable_internal_default(void* dst, size_t dstSize, void const* cSrc,
+ size_t cSrcSize, HUF_DTable const* DTable) {
+ return HUF_decompress4X1_usingDTable_internal_body(dst, dstSize, cSrc, cSrcSize, DTable);
+}
+#endif
+
+#if ZSTD_ENABLE_ASM_X86_64_BMI2
+
+HUF_ASM_DECL void HUF_decompress4X1_usingDTable_internal_bmi2_asm_loop(HUF_DecompressAsmArgs* args) ZSTDLIB_HIDDEN;
+
+static HUF_ASM_X86_64_BMI2_ATTRS
+size_t
+HUF_decompress4X1_usingDTable_internal_bmi2_asm(
+ void* dst, size_t dstSize,
+ const void* cSrc, size_t cSrcSize,
+ const HUF_DTable* DTable)
+{
+ void const* dt = DTable + 1;
+ const BYTE* const iend = (const BYTE*)cSrc + 6;
+ BYTE* const oend = (BYTE*)dst + dstSize;
+ HUF_DecompressAsmArgs args;
+ {
+ size_t const ret = HUF_DecompressAsmArgs_init(&args, dst, dstSize, cSrc, cSrcSize, DTable);
+ FORWARD_IF_ERROR(ret, "Failed to init asm args");
+ if (ret != 0)
+ return HUF_decompress4X1_usingDTable_internal_bmi2(dst, dstSize, cSrc, cSrcSize, DTable);
+ }
+
+ assert(args.ip[0] >= args.ilimit);
+ HUF_decompress4X1_usingDTable_internal_bmi2_asm_loop(&args);
+
+ /* Our loop guarantees that ip[] >= ilimit and that we haven't
+ * overwritten any op[].
+ */
+ assert(args.ip[0] >= iend);
+ assert(args.ip[1] >= iend);
+ assert(args.ip[2] >= iend);
+ assert(args.ip[3] >= iend);
+ assert(args.op[3] <= oend);
+ (void)iend;
+
+ /* finish bit streams one by one. */
+ {
+ size_t const segmentSize = (dstSize+3) / 4;
+ BYTE* segmentEnd = (BYTE*)dst;
+ int i;
+ for (i = 0; i < 4; ++i) {
+ BIT_DStream_t bit;
+ if (segmentSize <= (size_t)(oend - segmentEnd))
+ segmentEnd += segmentSize;
+ else
+ segmentEnd = oend;
+ FORWARD_IF_ERROR(HUF_initRemainingDStream(&bit, &args, i, segmentEnd), "corruption");
+ /* Decompress and validate that we've produced exactly the expected length. */
+ args.op[i] += HUF_decodeStreamX1(args.op[i], &bit, segmentEnd, (HUF_DEltX1 const*)dt, HUF_DECODER_FAST_TABLELOG);
+ if (args.op[i] != segmentEnd) return ERROR(corruption_detected);
+ }
+ }
+
+ /* decoded size */
+ return dstSize;
+}
+#endif /* ZSTD_ENABLE_ASM_X86_64_BMI2 */
+
+typedef size_t (*HUF_decompress_usingDTable_t)(void *dst, size_t dstSize,
+ const void *cSrc,
+ size_t cSrcSize,
+ const HUF_DTable *DTable);
+
+HUF_DGEN(HUF_decompress1X1_usingDTable_internal)
+
+static size_t HUF_decompress4X1_usingDTable_internal(void* dst, size_t dstSize, void const* cSrc,
+ size_t cSrcSize, HUF_DTable const* DTable, int bmi2)
+{
+#if DYNAMIC_BMI2
+ if (bmi2) {
+# if ZSTD_ENABLE_ASM_X86_64_BMI2
+ return HUF_decompress4X1_usingDTable_internal_bmi2_asm(dst, dstSize, cSrc, cSrcSize, DTable);
+# else
+ return HUF_decompress4X1_usingDTable_internal_bmi2(dst, dstSize, cSrc, cSrcSize, DTable);
+# endif
+ }
+#else
+ (void)bmi2;
+#endif
+
+#if ZSTD_ENABLE_ASM_X86_64_BMI2 && defined(__BMI2__)
+ return HUF_decompress4X1_usingDTable_internal_bmi2_asm(dst, dstSize, cSrc, cSrcSize, DTable);
+#else
+ return HUF_decompress4X1_usingDTable_internal_default(dst, dstSize, cSrc, cSrcSize, DTable);
+#endif
+}
+
+
+size_t HUF_decompress1X1_usingDTable(
+ void* dst, size_t dstSize,
+ const void* cSrc, size_t cSrcSize,
+ const HUF_DTable* DTable)
+{
+ DTableDesc dtd = HUF_getDTableDesc(DTable);
+ if (dtd.tableType != 0) return ERROR(GENERIC);
+ return HUF_decompress1X1_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
+}
+
+size_t HUF_decompress1X1_DCtx_wksp(HUF_DTable* DCtx, void* dst, size_t dstSize,
+ const void* cSrc, size_t cSrcSize,
+ void* workSpace, size_t wkspSize)
+{
+ const BYTE* ip = (const BYTE*) cSrc;
+
+ size_t const hSize = HUF_readDTableX1_wksp(DCtx, cSrc, cSrcSize, workSpace, wkspSize);
+ if (HUF_isError(hSize)) return hSize;
+ if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
+ ip += hSize; cSrcSize -= hSize;
+
+ return HUF_decompress1X1_usingDTable_internal(dst, dstSize, ip, cSrcSize, DCtx, /* bmi2 */ 0);
+}
+
+
+size_t HUF_decompress4X1_usingDTable(
+ void* dst, size_t dstSize,
+ const void* cSrc, size_t cSrcSize,
+ const HUF_DTable* DTable)
+{
+ DTableDesc dtd = HUF_getDTableDesc(DTable);
+ if (dtd.tableType != 0) return ERROR(GENERIC);
+ return HUF_decompress4X1_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
+}
+
+static size_t HUF_decompress4X1_DCtx_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize,
+ const void* cSrc, size_t cSrcSize,
+ void* workSpace, size_t wkspSize, int bmi2)
+{
+ const BYTE* ip = (const BYTE*) cSrc;
+
+ size_t const hSize = HUF_readDTableX1_wksp_bmi2(dctx, cSrc, cSrcSize, workSpace, wkspSize, bmi2);
+ if (HUF_isError(hSize)) return hSize;
+ if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
+ ip += hSize; cSrcSize -= hSize;
+
+ return HUF_decompress4X1_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx, bmi2);
+}
+
+size_t HUF_decompress4X1_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize,
+ const void* cSrc, size_t cSrcSize,
+ void* workSpace, size_t wkspSize)
+{
+ return HUF_decompress4X1_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, 0);
+}
+
+
+#endif /* HUF_FORCE_DECOMPRESS_X2 */
+
+
+#ifndef HUF_FORCE_DECOMPRESS_X1
+
+/* *************************/
+/* double-symbols decoding */
+/* *************************/
+
+typedef struct { U16 sequence; BYTE nbBits; BYTE length; } HUF_DEltX2; /* double-symbols decoding */
+typedef struct { BYTE symbol; } sortedSymbol_t;
+typedef U32 rankValCol_t[HUF_TABLELOG_MAX + 1];
+typedef rankValCol_t rankVal_t[HUF_TABLELOG_MAX];
+
+/*
+ * Constructs a HUF_DEltX2 in a U32.
+ */
+static U32 HUF_buildDEltX2U32(U32 symbol, U32 nbBits, U32 baseSeq, int level)
+{
+ U32 seq;
+ DEBUG_STATIC_ASSERT(offsetof(HUF_DEltX2, sequence) == 0);
+ DEBUG_STATIC_ASSERT(offsetof(HUF_DEltX2, nbBits) == 2);
+ DEBUG_STATIC_ASSERT(offsetof(HUF_DEltX2, length) == 3);
+ DEBUG_STATIC_ASSERT(sizeof(HUF_DEltX2) == sizeof(U32));
+ if (MEM_isLittleEndian()) {
+ seq = level == 1 ? symbol : (baseSeq + (symbol << 8));
+ return seq + (nbBits << 16) + ((U32)level << 24);
+ } else {
+ seq = level == 1 ? (symbol << 8) : ((baseSeq << 8) + symbol);
+ return (seq << 16) + (nbBits << 8) + (U32)level;
+ }
+}
+
+/*
+ * Constructs a HUF_DEltX2.
+ */
+static HUF_DEltX2 HUF_buildDEltX2(U32 symbol, U32 nbBits, U32 baseSeq, int level)
+{
+ HUF_DEltX2 DElt;
+ U32 const val = HUF_buildDEltX2U32(symbol, nbBits, baseSeq, level);
+ DEBUG_STATIC_ASSERT(sizeof(DElt) == sizeof(val));
+ ZSTD_memcpy(&DElt, &val, sizeof(val));
+ return DElt;
+}
+
+/*
+ * Constructs 2 HUF_DEltX2s and packs them into a U64.
+ */
+static U64 HUF_buildDEltX2U64(U32 symbol, U32 nbBits, U16 baseSeq, int level)
+{
+ U32 DElt = HUF_buildDEltX2U32(symbol, nbBits, baseSeq, level);
+ return (U64)DElt + ((U64)DElt << 32);
+}
+
+/*
+ * Fills the DTable rank with all the symbols from [begin, end) that are each
+ * nbBits long.
+ *
+ * @param DTableRank The start of the rank in the DTable.
+ * @param begin The first symbol to fill (inclusive).
+ * @param end The last symbol to fill (exclusive).
+ * @param nbBits Each symbol is nbBits long.
+ * @param tableLog The table log.
+ * @param baseSeq If level == 1 { 0 } else { the first level symbol }
+ * @param level The level in the table. Must be 1 or 2.
+ */
+static void HUF_fillDTableX2ForWeight(
+ HUF_DEltX2* DTableRank,
+ sortedSymbol_t const* begin, sortedSymbol_t const* end,
+ U32 nbBits, U32 tableLog,
+ U16 baseSeq, int const level)
+{
+ U32 const length = 1U << ((tableLog - nbBits) & 0x1F /* quiet static-analyzer */);
+ const sortedSymbol_t* ptr;
+ assert(level >= 1 && level <= 2);
+ switch (length) {
+ case 1:
+ for (ptr = begin; ptr != end; ++ptr) {
+ HUF_DEltX2 const DElt = HUF_buildDEltX2(ptr->symbol, nbBits, baseSeq, level);
+ *DTableRank++ = DElt;
+ }
+ break;
+ case 2:
+ for (ptr = begin; ptr != end; ++ptr) {
+ HUF_DEltX2 const DElt = HUF_buildDEltX2(ptr->symbol, nbBits, baseSeq, level);
+ DTableRank[0] = DElt;
+ DTableRank[1] = DElt;
+ DTableRank += 2;
+ }
+ break;
+ case 4:
+ for (ptr = begin; ptr != end; ++ptr) {
+ U64 const DEltX2 = HUF_buildDEltX2U64(ptr->symbol, nbBits, baseSeq, level);
+ ZSTD_memcpy(DTableRank + 0, &DEltX2, sizeof(DEltX2));
+ ZSTD_memcpy(DTableRank + 2, &DEltX2, sizeof(DEltX2));
+ DTableRank += 4;
+ }
+ break;
+ case 8:
+ for (ptr = begin; ptr != end; ++ptr) {
+ U64 const DEltX2 = HUF_buildDEltX2U64(ptr->symbol, nbBits, baseSeq, level);
+ ZSTD_memcpy(DTableRank + 0, &DEltX2, sizeof(DEltX2));
+ ZSTD_memcpy(DTableRank + 2, &DEltX2, sizeof(DEltX2));
+ ZSTD_memcpy(DTableRank + 4, &DEltX2, sizeof(DEltX2));
+ ZSTD_memcpy(DTableRank + 6, &DEltX2, sizeof(DEltX2));
+ DTableRank += 8;
+ }
+ break;
+ default:
+ for (ptr = begin; ptr != end; ++ptr) {
+ U64 const DEltX2 = HUF_buildDEltX2U64(ptr->symbol, nbBits, baseSeq, level);
+ HUF_DEltX2* const DTableRankEnd = DTableRank + length;
+ for (; DTableRank != DTableRankEnd; DTableRank += 8) {
+ ZSTD_memcpy(DTableRank + 0, &DEltX2, sizeof(DEltX2));
+ ZSTD_memcpy(DTableRank + 2, &DEltX2, sizeof(DEltX2));
+ ZSTD_memcpy(DTableRank + 4, &DEltX2, sizeof(DEltX2));
+ ZSTD_memcpy(DTableRank + 6, &DEltX2, sizeof(DEltX2));
+ }
+ }
+ break;
+ }
+}
+
+/* HUF_fillDTableX2Level2() :
+ * `rankValOrigin` must be a table of at least (HUF_TABLELOG_MAX + 1) U32 */
+static void HUF_fillDTableX2Level2(HUF_DEltX2* DTable, U32 targetLog, const U32 consumedBits,
+ const U32* rankVal, const int minWeight, const int maxWeight1,
+ const sortedSymbol_t* sortedSymbols, U32 const* rankStart,
+ U32 nbBitsBaseline, U16 baseSeq)
+{
+ /* Fill skipped values (all positions up to rankVal[minWeight]).
+ * These are positions only get a single symbol because the combined weight
+ * is too large.
+ */
+ if (minWeight>1) {
+ U32 const length = 1U << ((targetLog - consumedBits) & 0x1F /* quiet static-analyzer */);
+ U64 const DEltX2 = HUF_buildDEltX2U64(baseSeq, consumedBits, /* baseSeq */ 0, /* level */ 1);
+ int const skipSize = rankVal[minWeight];
+ assert(length > 1);
+ assert((U32)skipSize < length);
+ switch (length) {
+ case 2:
+ assert(skipSize == 1);
+ ZSTD_memcpy(DTable, &DEltX2, sizeof(DEltX2));
+ break;
+ case 4:
+ assert(skipSize <= 4);
+ ZSTD_memcpy(DTable + 0, &DEltX2, sizeof(DEltX2));
+ ZSTD_memcpy(DTable + 2, &DEltX2, sizeof(DEltX2));
+ break;
+ default:
+ {
+ int i;
+ for (i = 0; i < skipSize; i += 8) {
+ ZSTD_memcpy(DTable + i + 0, &DEltX2, sizeof(DEltX2));
+ ZSTD_memcpy(DTable + i + 2, &DEltX2, sizeof(DEltX2));
+ ZSTD_memcpy(DTable + i + 4, &DEltX2, sizeof(DEltX2));
+ ZSTD_memcpy(DTable + i + 6, &DEltX2, sizeof(DEltX2));
+ }
+ }
+ }
+ }
+
+ /* Fill each of the second level symbols by weight. */
+ {
+ int w;
+ for (w = minWeight; w < maxWeight1; ++w) {
+ int const begin = rankStart[w];
+ int const end = rankStart[w+1];
+ U32 const nbBits = nbBitsBaseline - w;
+ U32 const totalBits = nbBits + consumedBits;
+ HUF_fillDTableX2ForWeight(
+ DTable + rankVal[w],
+ sortedSymbols + begin, sortedSymbols + end,
+ totalBits, targetLog,
+ baseSeq, /* level */ 2);
+ }
+ }
+}
+
+static void HUF_fillDTableX2(HUF_DEltX2* DTable, const U32 targetLog,
+ const sortedSymbol_t* sortedList,
+ const U32* rankStart, rankValCol_t *rankValOrigin, const U32 maxWeight,
+ const U32 nbBitsBaseline)
+{
+ U32* const rankVal = rankValOrigin[0];
+ const int scaleLog = nbBitsBaseline - targetLog; /* note : targetLog >= srcLog, hence scaleLog <= 1 */
+ const U32 minBits = nbBitsBaseline - maxWeight;
+ int w;
+ int const wEnd = (int)maxWeight + 1;
+
+ /* Fill DTable in order of weight. */
+ for (w = 1; w < wEnd; ++w) {
+ int const begin = (int)rankStart[w];
+ int const end = (int)rankStart[w+1];
+ U32 const nbBits = nbBitsBaseline - w;
+
+ if (targetLog-nbBits >= minBits) {
+ /* Enough room for a second symbol. */
+ int start = rankVal[w];
+ U32 const length = 1U << ((targetLog - nbBits) & 0x1F /* quiet static-analyzer */);
+ int minWeight = nbBits + scaleLog;
+ int s;
+ if (minWeight < 1) minWeight = 1;
+ /* Fill the DTable for every symbol of weight w.
+ * These symbols get at least 1 second symbol.
+ */
+ for (s = begin; s != end; ++s) {
+ HUF_fillDTableX2Level2(
+ DTable + start, targetLog, nbBits,
+ rankValOrigin[nbBits], minWeight, wEnd,
+ sortedList, rankStart,
+ nbBitsBaseline, sortedList[s].symbol);
+ start += length;
+ }
+ } else {
+ /* Only a single symbol. */
+ HUF_fillDTableX2ForWeight(
+ DTable + rankVal[w],
+ sortedList + begin, sortedList + end,
+ nbBits, targetLog,
+ /* baseSeq */ 0, /* level */ 1);
+ }
+ }
+}
+
+typedef struct {
+ rankValCol_t rankVal[HUF_TABLELOG_MAX];
+ U32 rankStats[HUF_TABLELOG_MAX + 1];
+ U32 rankStart0[HUF_TABLELOG_MAX + 3];
+ sortedSymbol_t sortedSymbol[HUF_SYMBOLVALUE_MAX + 1];
+ BYTE weightList[HUF_SYMBOLVALUE_MAX + 1];
+ U32 calleeWksp[HUF_READ_STATS_WORKSPACE_SIZE_U32];
+} HUF_ReadDTableX2_Workspace;
+
+size_t HUF_readDTableX2_wksp(HUF_DTable* DTable,
+ const void* src, size_t srcSize,
+ void* workSpace, size_t wkspSize)
+{
+ return HUF_readDTableX2_wksp_bmi2(DTable, src, srcSize, workSpace, wkspSize, /* bmi2 */ 0);
+}
+
+size_t HUF_readDTableX2_wksp_bmi2(HUF_DTable* DTable,
+ const void* src, size_t srcSize,
+ void* workSpace, size_t wkspSize, int bmi2)
+{
+ U32 tableLog, maxW, nbSymbols;
+ DTableDesc dtd = HUF_getDTableDesc(DTable);
+ U32 maxTableLog = dtd.maxTableLog;
+ size_t iSize;
+ void* dtPtr = DTable+1; /* force compiler to avoid strict-aliasing */
+ HUF_DEltX2* const dt = (HUF_DEltX2*)dtPtr;
+ U32 *rankStart;
+
+ HUF_ReadDTableX2_Workspace* const wksp = (HUF_ReadDTableX2_Workspace*)workSpace;
+
+ if (sizeof(*wksp) > wkspSize) return ERROR(GENERIC);
+
+ rankStart = wksp->rankStart0 + 1;
+ ZSTD_memset(wksp->rankStats, 0, sizeof(wksp->rankStats));
+ ZSTD_memset(wksp->rankStart0, 0, sizeof(wksp->rankStart0));
+
+ DEBUG_STATIC_ASSERT(sizeof(HUF_DEltX2) == sizeof(HUF_DTable)); /* if compiler fails here, assertion is wrong */
+ if (maxTableLog > HUF_TABLELOG_MAX) return ERROR(tableLog_tooLarge);
+ /* ZSTD_memset(weightList, 0, sizeof(weightList)); */ /* is not necessary, even though some analyzer complain ... */
+
+ iSize = HUF_readStats_wksp(wksp->weightList, HUF_SYMBOLVALUE_MAX + 1, wksp->rankStats, &nbSymbols, &tableLog, src, srcSize, wksp->calleeWksp, sizeof(wksp->calleeWksp), bmi2);
+ if (HUF_isError(iSize)) return iSize;
+
+ /* check result */
+ if (tableLog > maxTableLog) return ERROR(tableLog_tooLarge); /* DTable can't fit code depth */
+ if (tableLog <= HUF_DECODER_FAST_TABLELOG && maxTableLog > HUF_DECODER_FAST_TABLELOG) maxTableLog = HUF_DECODER_FAST_TABLELOG;
+
+ /* find maxWeight */
+ for (maxW = tableLog; wksp->rankStats[maxW]==0; maxW--) {} /* necessarily finds a solution before 0 */
+
+ /* Get start index of each weight */
+ { U32 w, nextRankStart = 0;
+ for (w=1; w<maxW+1; w++) {
+ U32 curr = nextRankStart;
+ nextRankStart += wksp->rankStats[w];
+ rankStart[w] = curr;
+ }
+ rankStart[0] = nextRankStart; /* put all 0w symbols at the end of sorted list*/
+ rankStart[maxW+1] = nextRankStart;
+ }
+
+ /* sort symbols by weight */
+ { U32 s;
+ for (s=0; s<nbSymbols; s++) {
+ U32 const w = wksp->weightList[s];
+ U32 const r = rankStart[w]++;
+ wksp->sortedSymbol[r].symbol = (BYTE)s;
+ }
+ rankStart[0] = 0; /* forget 0w symbols; this is beginning of weight(1) */
+ }
+
+ /* Build rankVal */
+ { U32* const rankVal0 = wksp->rankVal[0];
+ { int const rescale = (maxTableLog-tableLog) - 1; /* tableLog <= maxTableLog */
+ U32 nextRankVal = 0;
+ U32 w;
+ for (w=1; w<maxW+1; w++) {
+ U32 curr = nextRankVal;
+ nextRankVal += wksp->rankStats[w] << (w+rescale);
+ rankVal0[w] = curr;
+ } }
+ { U32 const minBits = tableLog+1 - maxW;
+ U32 consumed;
+ for (consumed = minBits; consumed < maxTableLog - minBits + 1; consumed++) {
+ U32* const rankValPtr = wksp->rankVal[consumed];
+ U32 w;
+ for (w = 1; w < maxW+1; w++) {
+ rankValPtr[w] = rankVal0[w] >> consumed;
+ } } } }
+
+ HUF_fillDTableX2(dt, maxTableLog,
+ wksp->sortedSymbol,
+ wksp->rankStart0, wksp->rankVal, maxW,
+ tableLog+1);
+
+ dtd.tableLog = (BYTE)maxTableLog;
+ dtd.tableType = 1;
+ ZSTD_memcpy(DTable, &dtd, sizeof(dtd));
+ return iSize;
+}
+
+
+FORCE_INLINE_TEMPLATE U32
+HUF_decodeSymbolX2(void* op, BIT_DStream_t* DStream, const HUF_DEltX2* dt, const U32 dtLog)
+{
+ size_t const val = BIT_lookBitsFast(DStream, dtLog); /* note : dtLog >= 1 */
+ ZSTD_memcpy(op, &dt[val].sequence, 2);
+ BIT_skipBits(DStream, dt[val].nbBits);
+ return dt[val].length;
+}
+
+FORCE_INLINE_TEMPLATE U32
+HUF_decodeLastSymbolX2(void* op, BIT_DStream_t* DStream, const HUF_DEltX2* dt, const U32 dtLog)
+{
+ size_t const val = BIT_lookBitsFast(DStream, dtLog); /* note : dtLog >= 1 */
+ ZSTD_memcpy(op, &dt[val].sequence, 1);
+ if (dt[val].length==1) {
+ BIT_skipBits(DStream, dt[val].nbBits);
+ } else {
+ if (DStream->bitsConsumed < (sizeof(DStream->bitContainer)*8)) {
+ BIT_skipBits(DStream, dt[val].nbBits);
+ if (DStream->bitsConsumed > (sizeof(DStream->bitContainer)*8))
+ /* ugly hack; works only because it's the last symbol. Note : can't easily extract nbBits from just this symbol */
+ DStream->bitsConsumed = (sizeof(DStream->bitContainer)*8);
+ }
+ }
+ return 1;
+}
+
+#define HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr) \
+ ptr += HUF_decodeSymbolX2(ptr, DStreamPtr, dt, dtLog)
+
+#define HUF_DECODE_SYMBOLX2_1(ptr, DStreamPtr) \
+ if (MEM_64bits() || (HUF_TABLELOG_MAX<=12)) \
+ ptr += HUF_decodeSymbolX2(ptr, DStreamPtr, dt, dtLog)
+
+#define HUF_DECODE_SYMBOLX2_2(ptr, DStreamPtr) \
+ if (MEM_64bits()) \
+ ptr += HUF_decodeSymbolX2(ptr, DStreamPtr, dt, dtLog)
+
+HINT_INLINE size_t
+HUF_decodeStreamX2(BYTE* p, BIT_DStream_t* bitDPtr, BYTE* const pEnd,
+ const HUF_DEltX2* const dt, const U32 dtLog)
+{
+ BYTE* const pStart = p;
+
+ /* up to 8 symbols at a time */
+ if ((size_t)(pEnd - p) >= sizeof(bitDPtr->bitContainer)) {
+ if (dtLog <= 11 && MEM_64bits()) {
+ /* up to 10 symbols at a time */
+ while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p < pEnd-9)) {
+ HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
+ HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
+ HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
+ HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
+ HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
+ }
+ } else {
+ /* up to 8 symbols at a time */
+ while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p < pEnd-(sizeof(bitDPtr->bitContainer)-1))) {
+ HUF_DECODE_SYMBOLX2_2(p, bitDPtr);
+ HUF_DECODE_SYMBOLX2_1(p, bitDPtr);
+ HUF_DECODE_SYMBOLX2_2(p, bitDPtr);
+ HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
+ }
+ }
+ } else {
+ BIT_reloadDStream(bitDPtr);
+ }
+
+ /* closer to end : up to 2 symbols at a time */
+ if ((size_t)(pEnd - p) >= 2) {
+ while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p <= pEnd-2))
+ HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
+
+ while (p <= pEnd-2)
+ HUF_DECODE_SYMBOLX2_0(p, bitDPtr); /* no need to reload : reached the end of DStream */
+ }
+
+ if (p < pEnd)
+ p += HUF_decodeLastSymbolX2(p, bitDPtr, dt, dtLog);
+
+ return p-pStart;
+}
+
+FORCE_INLINE_TEMPLATE size_t
+HUF_decompress1X2_usingDTable_internal_body(
+ void* dst, size_t dstSize,
+ const void* cSrc, size_t cSrcSize,
+ const HUF_DTable* DTable)
+{
+ BIT_DStream_t bitD;
+
+ /* Init */
+ CHECK_F( BIT_initDStream(&bitD, cSrc, cSrcSize) );
+
+ /* decode */
+ { BYTE* const ostart = (BYTE*) dst;
+ BYTE* const oend = ostart + dstSize;
+ const void* const dtPtr = DTable+1; /* force compiler to not use strict-aliasing */
+ const HUF_DEltX2* const dt = (const HUF_DEltX2*)dtPtr;
+ DTableDesc const dtd = HUF_getDTableDesc(DTable);
+ HUF_decodeStreamX2(ostart, &bitD, oend, dt, dtd.tableLog);
+ }
+
+ /* check */
+ if (!BIT_endOfDStream(&bitD)) return ERROR(corruption_detected);
+
+ /* decoded size */
+ return dstSize;
+}
+FORCE_INLINE_TEMPLATE size_t
+HUF_decompress4X2_usingDTable_internal_body(
+ void* dst, size_t dstSize,
+ const void* cSrc, size_t cSrcSize,
+ const HUF_DTable* DTable)
+{
+ if (cSrcSize < 10) return ERROR(corruption_detected); /* strict minimum : jump table + 1 byte per stream */
+
+ { const BYTE* const istart = (const BYTE*) cSrc;
+ BYTE* const ostart = (BYTE*) dst;
+ BYTE* const oend = ostart + dstSize;
+ BYTE* const olimit = oend - (sizeof(size_t)-1);
+ const void* const dtPtr = DTable+1;
+ const HUF_DEltX2* const dt = (const HUF_DEltX2*)dtPtr;
+
+ /* Init */
+ BIT_DStream_t bitD1;
+ BIT_DStream_t bitD2;
+ BIT_DStream_t bitD3;
+ BIT_DStream_t bitD4;
+ size_t const length1 = MEM_readLE16(istart);
+ size_t const length2 = MEM_readLE16(istart+2);
+ size_t const length3 = MEM_readLE16(istart+4);
+ size_t const length4 = cSrcSize - (length1 + length2 + length3 + 6);
+ const BYTE* const istart1 = istart + 6; /* jumpTable */
+ const BYTE* const istart2 = istart1 + length1;
+ const BYTE* const istart3 = istart2 + length2;
+ const BYTE* const istart4 = istart3 + length3;
+ size_t const segmentSize = (dstSize+3) / 4;
+ BYTE* const opStart2 = ostart + segmentSize;
+ BYTE* const opStart3 = opStart2 + segmentSize;
+ BYTE* const opStart4 = opStart3 + segmentSize;
+ BYTE* op1 = ostart;
+ BYTE* op2 = opStart2;
+ BYTE* op3 = opStart3;
+ BYTE* op4 = opStart4;
+ U32 endSignal = 1;
+ DTableDesc const dtd = HUF_getDTableDesc(DTable);
+ U32 const dtLog = dtd.tableLog;
+
+ if (length4 > cSrcSize) return ERROR(corruption_detected); /* overflow */
+ if (opStart4 > oend) return ERROR(corruption_detected); /* overflow */
+ CHECK_F( BIT_initDStream(&bitD1, istart1, length1) );
+ CHECK_F( BIT_initDStream(&bitD2, istart2, length2) );
+ CHECK_F( BIT_initDStream(&bitD3, istart3, length3) );
+ CHECK_F( BIT_initDStream(&bitD4, istart4, length4) );
+
+ /* 16-32 symbols per loop (4-8 symbols per stream) */
+ if ((size_t)(oend - op4) >= sizeof(size_t)) {
+ for ( ; (endSignal) & (op4 < olimit); ) {
+#if defined(__clang__) && (defined(__x86_64__) || defined(__i386__))
+ HUF_DECODE_SYMBOLX2_2(op1, &bitD1);
+ HUF_DECODE_SYMBOLX2_1(op1, &bitD1);
+ HUF_DECODE_SYMBOLX2_2(op1, &bitD1);
+ HUF_DECODE_SYMBOLX2_0(op1, &bitD1);
+ HUF_DECODE_SYMBOLX2_2(op2, &bitD2);
+ HUF_DECODE_SYMBOLX2_1(op2, &bitD2);
+ HUF_DECODE_SYMBOLX2_2(op2, &bitD2);
+ HUF_DECODE_SYMBOLX2_0(op2, &bitD2);
+ endSignal &= BIT_reloadDStreamFast(&bitD1) == BIT_DStream_unfinished;
+ endSignal &= BIT_reloadDStreamFast(&bitD2) == BIT_DStream_unfinished;
+ HUF_DECODE_SYMBOLX2_2(op3, &bitD3);
+ HUF_DECODE_SYMBOLX2_1(op3, &bitD3);
+ HUF_DECODE_SYMBOLX2_2(op3, &bitD3);
+ HUF_DECODE_SYMBOLX2_0(op3, &bitD3);
+ HUF_DECODE_SYMBOLX2_2(op4, &bitD4);
+ HUF_DECODE_SYMBOLX2_1(op4, &bitD4);
+ HUF_DECODE_SYMBOLX2_2(op4, &bitD4);
+ HUF_DECODE_SYMBOLX2_0(op4, &bitD4);
+ endSignal &= BIT_reloadDStreamFast(&bitD3) == BIT_DStream_unfinished;
+ endSignal &= BIT_reloadDStreamFast(&bitD4) == BIT_DStream_unfinished;
+#else
+ HUF_DECODE_SYMBOLX2_2(op1, &bitD1);
+ HUF_DECODE_SYMBOLX2_2(op2, &bitD2);
+ HUF_DECODE_SYMBOLX2_2(op3, &bitD3);
+ HUF_DECODE_SYMBOLX2_2(op4, &bitD4);
+ HUF_DECODE_SYMBOLX2_1(op1, &bitD1);
+ HUF_DECODE_SYMBOLX2_1(op2, &bitD2);
+ HUF_DECODE_SYMBOLX2_1(op3, &bitD3);
+ HUF_DECODE_SYMBOLX2_1(op4, &bitD4);
+ HUF_DECODE_SYMBOLX2_2(op1, &bitD1);
+ HUF_DECODE_SYMBOLX2_2(op2, &bitD2);
+ HUF_DECODE_SYMBOLX2_2(op3, &bitD3);
+ HUF_DECODE_SYMBOLX2_2(op4, &bitD4);
+ HUF_DECODE_SYMBOLX2_0(op1, &bitD1);
+ HUF_DECODE_SYMBOLX2_0(op2, &bitD2);
+ HUF_DECODE_SYMBOLX2_0(op3, &bitD3);
+ HUF_DECODE_SYMBOLX2_0(op4, &bitD4);
+ endSignal = (U32)LIKELY((U32)
+ (BIT_reloadDStreamFast(&bitD1) == BIT_DStream_unfinished)
+ & (BIT_reloadDStreamFast(&bitD2) == BIT_DStream_unfinished)
+ & (BIT_reloadDStreamFast(&bitD3) == BIT_DStream_unfinished)
+ & (BIT_reloadDStreamFast(&bitD4) == BIT_DStream_unfinished));
+#endif
+ }
+ }
+
+ /* check corruption */
+ if (op1 > opStart2) return ERROR(corruption_detected);
+ if (op2 > opStart3) return ERROR(corruption_detected);
+ if (op3 > opStart4) return ERROR(corruption_detected);
+ /* note : op4 already verified within main loop */
+
+ /* finish bitStreams one by one */
+ HUF_decodeStreamX2(op1, &bitD1, opStart2, dt, dtLog);
+ HUF_decodeStreamX2(op2, &bitD2, opStart3, dt, dtLog);
+ HUF_decodeStreamX2(op3, &bitD3, opStart4, dt, dtLog);
+ HUF_decodeStreamX2(op4, &bitD4, oend, dt, dtLog);
+
+ /* check */
+ { U32 const endCheck = BIT_endOfDStream(&bitD1) & BIT_endOfDStream(&bitD2) & BIT_endOfDStream(&bitD3) & BIT_endOfDStream(&bitD4);
+ if (!endCheck) return ERROR(corruption_detected); }
+
+ /* decoded size */
+ return dstSize;
+ }
+}
+
+#if HUF_NEED_BMI2_FUNCTION
+static BMI2_TARGET_ATTRIBUTE
+size_t HUF_decompress4X2_usingDTable_internal_bmi2(void* dst, size_t dstSize, void const* cSrc,
+ size_t cSrcSize, HUF_DTable const* DTable) {
+ return HUF_decompress4X2_usingDTable_internal_body(dst, dstSize, cSrc, cSrcSize, DTable);
+}
+#endif
+
+#if HUF_NEED_DEFAULT_FUNCTION
+static
+size_t HUF_decompress4X2_usingDTable_internal_default(void* dst, size_t dstSize, void const* cSrc,
+ size_t cSrcSize, HUF_DTable const* DTable) {
+ return HUF_decompress4X2_usingDTable_internal_body(dst, dstSize, cSrc, cSrcSize, DTable);
+}
+#endif
+
+#if ZSTD_ENABLE_ASM_X86_64_BMI2
+
+HUF_ASM_DECL void HUF_decompress4X2_usingDTable_internal_bmi2_asm_loop(HUF_DecompressAsmArgs* args) ZSTDLIB_HIDDEN;
+
+static HUF_ASM_X86_64_BMI2_ATTRS size_t
+HUF_decompress4X2_usingDTable_internal_bmi2_asm(
+ void* dst, size_t dstSize,
+ const void* cSrc, size_t cSrcSize,
+ const HUF_DTable* DTable) {
+ void const* dt = DTable + 1;
+ const BYTE* const iend = (const BYTE*)cSrc + 6;
+ BYTE* const oend = (BYTE*)dst + dstSize;
+ HUF_DecompressAsmArgs args;
+ {
+ size_t const ret = HUF_DecompressAsmArgs_init(&args, dst, dstSize, cSrc, cSrcSize, DTable);
+ FORWARD_IF_ERROR(ret, "Failed to init asm args");
+ if (ret != 0)
+ return HUF_decompress4X2_usingDTable_internal_bmi2(dst, dstSize, cSrc, cSrcSize, DTable);
+ }
+
+ assert(args.ip[0] >= args.ilimit);
+ HUF_decompress4X2_usingDTable_internal_bmi2_asm_loop(&args);
+
+ /* note : op4 already verified within main loop */
+ assert(args.ip[0] >= iend);
+ assert(args.ip[1] >= iend);
+ assert(args.ip[2] >= iend);
+ assert(args.ip[3] >= iend);
+ assert(args.op[3] <= oend);
+ (void)iend;
+
+ /* finish bitStreams one by one */
+ {
+ size_t const segmentSize = (dstSize+3) / 4;
+ BYTE* segmentEnd = (BYTE*)dst;
+ int i;
+ for (i = 0; i < 4; ++i) {
+ BIT_DStream_t bit;
+ if (segmentSize <= (size_t)(oend - segmentEnd))
+ segmentEnd += segmentSize;
+ else
+ segmentEnd = oend;
+ FORWARD_IF_ERROR(HUF_initRemainingDStream(&bit, &args, i, segmentEnd), "corruption");
+ args.op[i] += HUF_decodeStreamX2(args.op[i], &bit, segmentEnd, (HUF_DEltX2 const*)dt, HUF_DECODER_FAST_TABLELOG);
+ if (args.op[i] != segmentEnd)
+ return ERROR(corruption_detected);
+ }
+ }
+
+ /* decoded size */
+ return dstSize;
+}
+#endif /* ZSTD_ENABLE_ASM_X86_64_BMI2 */
+
+static size_t HUF_decompress4X2_usingDTable_internal(void* dst, size_t dstSize, void const* cSrc,
+ size_t cSrcSize, HUF_DTable const* DTable, int bmi2)
+{
+#if DYNAMIC_BMI2
+ if (bmi2) {
+# if ZSTD_ENABLE_ASM_X86_64_BMI2
+ return HUF_decompress4X2_usingDTable_internal_bmi2_asm(dst, dstSize, cSrc, cSrcSize, DTable);
+# else
+ return HUF_decompress4X2_usingDTable_internal_bmi2(dst, dstSize, cSrc, cSrcSize, DTable);
+# endif
+ }
+#else
+ (void)bmi2;
+#endif
+
+#if ZSTD_ENABLE_ASM_X86_64_BMI2 && defined(__BMI2__)
+ return HUF_decompress4X2_usingDTable_internal_bmi2_asm(dst, dstSize, cSrc, cSrcSize, DTable);
+#else
+ return HUF_decompress4X2_usingDTable_internal_default(dst, dstSize, cSrc, cSrcSize, DTable);
+#endif
+}
+
+HUF_DGEN(HUF_decompress1X2_usingDTable_internal)
+
+size_t HUF_decompress1X2_usingDTable(
+ void* dst, size_t dstSize,
+ const void* cSrc, size_t cSrcSize,
+ const HUF_DTable* DTable)
+{
+ DTableDesc dtd = HUF_getDTableDesc(DTable);
+ if (dtd.tableType != 1) return ERROR(GENERIC);
+ return HUF_decompress1X2_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
+}
+
+size_t HUF_decompress1X2_DCtx_wksp(HUF_DTable* DCtx, void* dst, size_t dstSize,
+ const void* cSrc, size_t cSrcSize,
+ void* workSpace, size_t wkspSize)
+{
+ const BYTE* ip = (const BYTE*) cSrc;
+
+ size_t const hSize = HUF_readDTableX2_wksp(DCtx, cSrc, cSrcSize,
+ workSpace, wkspSize);
+ if (HUF_isError(hSize)) return hSize;
+ if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
+ ip += hSize; cSrcSize -= hSize;
+
+ return HUF_decompress1X2_usingDTable_internal(dst, dstSize, ip, cSrcSize, DCtx, /* bmi2 */ 0);
+}
+
+
+size_t HUF_decompress4X2_usingDTable(
+ void* dst, size_t dstSize,
+ const void* cSrc, size_t cSrcSize,
+ const HUF_DTable* DTable)
+{
+ DTableDesc dtd = HUF_getDTableDesc(DTable);
+ if (dtd.tableType != 1) return ERROR(GENERIC);
+ return HUF_decompress4X2_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
+}
+
+static size_t HUF_decompress4X2_DCtx_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize,
+ const void* cSrc, size_t cSrcSize,
+ void* workSpace, size_t wkspSize, int bmi2)
+{
+ const BYTE* ip = (const BYTE*) cSrc;
+
+ size_t hSize = HUF_readDTableX2_wksp(dctx, cSrc, cSrcSize,
+ workSpace, wkspSize);
+ if (HUF_isError(hSize)) return hSize;
+ if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
+ ip += hSize; cSrcSize -= hSize;
+
+ return HUF_decompress4X2_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx, bmi2);
+}
+
+size_t HUF_decompress4X2_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize,
+ const void* cSrc, size_t cSrcSize,
+ void* workSpace, size_t wkspSize)
+{
+ return HUF_decompress4X2_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, /* bmi2 */ 0);
+}
+
+
+#endif /* HUF_FORCE_DECOMPRESS_X1 */
+
+
+/* ***********************************/
+/* Universal decompression selectors */
+/* ***********************************/
+
+size_t HUF_decompress1X_usingDTable(void* dst, size_t maxDstSize,
+ const void* cSrc, size_t cSrcSize,
+ const HUF_DTable* DTable)
+{
+ DTableDesc const dtd = HUF_getDTableDesc(DTable);
+#if defined(HUF_FORCE_DECOMPRESS_X1)
+ (void)dtd;
+ assert(dtd.tableType == 0);
+ return HUF_decompress1X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
+#elif defined(HUF_FORCE_DECOMPRESS_X2)
+ (void)dtd;
+ assert(dtd.tableType == 1);
+ return HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
+#else
+ return dtd.tableType ? HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0) :
+ HUF_decompress1X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
+#endif
+}
+
+size_t HUF_decompress4X_usingDTable(void* dst, size_t maxDstSize,
+ const void* cSrc, size_t cSrcSize,
+ const HUF_DTable* DTable)
+{
+ DTableDesc const dtd = HUF_getDTableDesc(DTable);
+#if defined(HUF_FORCE_DECOMPRESS_X1)
+ (void)dtd;
+ assert(dtd.tableType == 0);
+ return HUF_decompress4X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
+#elif defined(HUF_FORCE_DECOMPRESS_X2)
+ (void)dtd;
+ assert(dtd.tableType == 1);
+ return HUF_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
+#else
+ return dtd.tableType ? HUF_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0) :
+ HUF_decompress4X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
+#endif
+}
+
+
+#if !defined(HUF_FORCE_DECOMPRESS_X1) && !defined(HUF_FORCE_DECOMPRESS_X2)
+typedef struct { U32 tableTime; U32 decode256Time; } algo_time_t;
+static const algo_time_t algoTime[16 /* Quantization */][2 /* single, double */] =
+{
+ /* single, double, quad */
+ {{0,0}, {1,1}}, /* Q==0 : impossible */
+ {{0,0}, {1,1}}, /* Q==1 : impossible */
+ {{ 150,216}, { 381,119}}, /* Q == 2 : 12-18% */
+ {{ 170,205}, { 514,112}}, /* Q == 3 : 18-25% */
+ {{ 177,199}, { 539,110}}, /* Q == 4 : 25-32% */
+ {{ 197,194}, { 644,107}}, /* Q == 5 : 32-38% */
+ {{ 221,192}, { 735,107}}, /* Q == 6 : 38-44% */
+ {{ 256,189}, { 881,106}}, /* Q == 7 : 44-50% */
+ {{ 359,188}, {1167,109}}, /* Q == 8 : 50-56% */
+ {{ 582,187}, {1570,114}}, /* Q == 9 : 56-62% */
+ {{ 688,187}, {1712,122}}, /* Q ==10 : 62-69% */
+ {{ 825,186}, {1965,136}}, /* Q ==11 : 69-75% */
+ {{ 976,185}, {2131,150}}, /* Q ==12 : 75-81% */
+ {{1180,186}, {2070,175}}, /* Q ==13 : 81-87% */
+ {{1377,185}, {1731,202}}, /* Q ==14 : 87-93% */
+ {{1412,185}, {1695,202}}, /* Q ==15 : 93-99% */
+};
+#endif
+
+/* HUF_selectDecoder() :
+ * Tells which decoder is likely to decode faster,
+ * based on a set of pre-computed metrics.
+ * @return : 0==HUF_decompress4X1, 1==HUF_decompress4X2 .
+ * Assumption : 0 < dstSize <= 128 KB */
+U32 HUF_selectDecoder (size_t dstSize, size_t cSrcSize)
+{
+ assert(dstSize > 0);
+ assert(dstSize <= 128*1024);
+#if defined(HUF_FORCE_DECOMPRESS_X1)
+ (void)dstSize;
+ (void)cSrcSize;
+ return 0;
+#elif defined(HUF_FORCE_DECOMPRESS_X2)
+ (void)dstSize;
+ (void)cSrcSize;
+ return 1;
+#else
+ /* decoder timing evaluation */
+ { U32 const Q = (cSrcSize >= dstSize) ? 15 : (U32)(cSrcSize * 16 / dstSize); /* Q < 16 */
+ U32 const D256 = (U32)(dstSize >> 8);
+ U32 const DTime0 = algoTime[Q][0].tableTime + (algoTime[Q][0].decode256Time * D256);
+ U32 DTime1 = algoTime[Q][1].tableTime + (algoTime[Q][1].decode256Time * D256);
+ DTime1 += DTime1 >> 5; /* small advantage to algorithm using less memory, to reduce cache eviction */
+ return DTime1 < DTime0;
+ }
+#endif
+}
+
+
+size_t HUF_decompress4X_hufOnly_wksp(HUF_DTable* dctx, void* dst,
+ size_t dstSize, const void* cSrc,
+ size_t cSrcSize, void* workSpace,
+ size_t wkspSize)
+{
+ /* validation checks */
+ if (dstSize == 0) return ERROR(dstSize_tooSmall);
+ if (cSrcSize == 0) return ERROR(corruption_detected);
+
+ { U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize);
+#if defined(HUF_FORCE_DECOMPRESS_X1)
+ (void)algoNb;
+ assert(algoNb == 0);
+ return HUF_decompress4X1_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize);
+#elif defined(HUF_FORCE_DECOMPRESS_X2)
+ (void)algoNb;
+ assert(algoNb == 1);
+ return HUF_decompress4X2_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize);
+#else
+ return algoNb ? HUF_decompress4X2_DCtx_wksp(dctx, dst, dstSize, cSrc,
+ cSrcSize, workSpace, wkspSize):
+ HUF_decompress4X1_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize);
+#endif
+ }
+}
+
+size_t HUF_decompress1X_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize,
+ const void* cSrc, size_t cSrcSize,
+ void* workSpace, size_t wkspSize)
+{
+ /* validation checks */
+ if (dstSize == 0) return ERROR(dstSize_tooSmall);
+ if (cSrcSize > dstSize) return ERROR(corruption_detected); /* invalid */
+ if (cSrcSize == dstSize) { ZSTD_memcpy(dst, cSrc, dstSize); return dstSize; } /* not compressed */
+ if (cSrcSize == 1) { ZSTD_memset(dst, *(const BYTE*)cSrc, dstSize); return dstSize; } /* RLE */
+
+ { U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize);
+#if defined(HUF_FORCE_DECOMPRESS_X1)
+ (void)algoNb;
+ assert(algoNb == 0);
+ return HUF_decompress1X1_DCtx_wksp(dctx, dst, dstSize, cSrc,
+ cSrcSize, workSpace, wkspSize);
+#elif defined(HUF_FORCE_DECOMPRESS_X2)
+ (void)algoNb;
+ assert(algoNb == 1);
+ return HUF_decompress1X2_DCtx_wksp(dctx, dst, dstSize, cSrc,
+ cSrcSize, workSpace, wkspSize);
+#else
+ return algoNb ? HUF_decompress1X2_DCtx_wksp(dctx, dst, dstSize, cSrc,
+ cSrcSize, workSpace, wkspSize):
+ HUF_decompress1X1_DCtx_wksp(dctx, dst, dstSize, cSrc,
+ cSrcSize, workSpace, wkspSize);
+#endif
+ }
+}
+
+
+size_t HUF_decompress1X_usingDTable_bmi2(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int bmi2)
+{
+ DTableDesc const dtd = HUF_getDTableDesc(DTable);
+#if defined(HUF_FORCE_DECOMPRESS_X1)
+ (void)dtd;
+ assert(dtd.tableType == 0);
+ return HUF_decompress1X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2);
+#elif defined(HUF_FORCE_DECOMPRESS_X2)
+ (void)dtd;
+ assert(dtd.tableType == 1);
+ return HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2);
+#else
+ return dtd.tableType ? HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2) :
+ HUF_decompress1X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2);
+#endif
+}
+
+#ifndef HUF_FORCE_DECOMPRESS_X2
+size_t HUF_decompress1X1_DCtx_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int bmi2)
+{
+ const BYTE* ip = (const BYTE*) cSrc;
+
+ size_t const hSize = HUF_readDTableX1_wksp_bmi2(dctx, cSrc, cSrcSize, workSpace, wkspSize, bmi2);
+ if (HUF_isError(hSize)) return hSize;
+ if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
+ ip += hSize; cSrcSize -= hSize;
+
+ return HUF_decompress1X1_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx, bmi2);
+}
+#endif
+
+size_t HUF_decompress4X_usingDTable_bmi2(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int bmi2)
+{
+ DTableDesc const dtd = HUF_getDTableDesc(DTable);
+#if defined(HUF_FORCE_DECOMPRESS_X1)
+ (void)dtd;
+ assert(dtd.tableType == 0);
+ return HUF_decompress4X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2);
+#elif defined(HUF_FORCE_DECOMPRESS_X2)
+ (void)dtd;
+ assert(dtd.tableType == 1);
+ return HUF_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2);
+#else
+ return dtd.tableType ? HUF_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2) :
+ HUF_decompress4X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2);
+#endif
+}
+
+size_t HUF_decompress4X_hufOnly_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int bmi2)
+{
+ /* validation checks */
+ if (dstSize == 0) return ERROR(dstSize_tooSmall);
+ if (cSrcSize == 0) return ERROR(corruption_detected);
+
+ { U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize);
+#if defined(HUF_FORCE_DECOMPRESS_X1)
+ (void)algoNb;
+ assert(algoNb == 0);
+ return HUF_decompress4X1_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, bmi2);
+#elif defined(HUF_FORCE_DECOMPRESS_X2)
+ (void)algoNb;
+ assert(algoNb == 1);
+ return HUF_decompress4X2_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, bmi2);
+#else
+ return algoNb ? HUF_decompress4X2_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, bmi2) :
+ HUF_decompress4X1_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, bmi2);
+#endif
+ }
+}
+
diff --git a/lib/zstd/decompress/zstd_ddict.c b/lib/zstd/decompress/zstd_ddict.c
new file mode 100644
index 000000000000..dbbc7919de53
--- /dev/null
+++ b/lib/zstd/decompress/zstd_ddict.c
@@ -0,0 +1,241 @@
+/*
+ * Copyright (c) Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+/* zstd_ddict.c :
+ * concentrates all logic that needs to know the internals of ZSTD_DDict object */
+
+/*-*******************************************************
+* Dependencies
+*********************************************************/
+#include "../common/zstd_deps.h" /* ZSTD_memcpy, ZSTD_memmove, ZSTD_memset */
+#include "../common/cpu.h" /* bmi2 */
+#include "../common/mem.h" /* low level memory routines */
+#define FSE_STATIC_LINKING_ONLY
+#include "../common/fse.h"
+#define HUF_STATIC_LINKING_ONLY
+#include "../common/huf.h"
+#include "zstd_decompress_internal.h"
+#include "zstd_ddict.h"
+
+
+
+
+/*-*******************************************************
+* Types
+*********************************************************/
+struct ZSTD_DDict_s {
+ void* dictBuffer;
+ const void* dictContent;
+ size_t dictSize;
+ ZSTD_entropyDTables_t entropy;
+ U32 dictID;
+ U32 entropyPresent;
+ ZSTD_customMem cMem;
+}; /* typedef'd to ZSTD_DDict within "zstd.h" */
+
+const void* ZSTD_DDict_dictContent(const ZSTD_DDict* ddict)
+{
+ assert(ddict != NULL);
+ return ddict->dictContent;
+}
+
+size_t ZSTD_DDict_dictSize(const ZSTD_DDict* ddict)
+{
+ assert(ddict != NULL);
+ return ddict->dictSize;
+}
+
+void ZSTD_copyDDictParameters(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict)
+{
+ DEBUGLOG(4, "ZSTD_copyDDictParameters");
+ assert(dctx != NULL);
+ assert(ddict != NULL);
+ dctx->dictID = ddict->dictID;
+ dctx->prefixStart = ddict->dictContent;
+ dctx->virtualStart = ddict->dictContent;
+ dctx->dictEnd = (const BYTE*)ddict->dictContent + ddict->dictSize;
+ dctx->previousDstEnd = dctx->dictEnd;
+#ifdef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
+ dctx->dictContentBeginForFuzzing = dctx->prefixStart;
+ dctx->dictContentEndForFuzzing = dctx->previousDstEnd;
+#endif
+ if (ddict->entropyPresent) {
+ dctx->litEntropy = 1;
+ dctx->fseEntropy = 1;
+ dctx->LLTptr = ddict->entropy.LLTable;
+ dctx->MLTptr = ddict->entropy.MLTable;
+ dctx->OFTptr = ddict->entropy.OFTable;
+ dctx->HUFptr = ddict->entropy.hufTable;
+ dctx->entropy.rep[0] = ddict->entropy.rep[0];
+ dctx->entropy.rep[1] = ddict->entropy.rep[1];
+ dctx->entropy.rep[2] = ddict->entropy.rep[2];
+ } else {
+ dctx->litEntropy = 0;
+ dctx->fseEntropy = 0;
+ }
+}
+
+
+static size_t
+ZSTD_loadEntropy_intoDDict(ZSTD_DDict* ddict,
+ ZSTD_dictContentType_e dictContentType)
+{
+ ddict->dictID = 0;
+ ddict->entropyPresent = 0;
+ if (dictContentType == ZSTD_dct_rawContent) return 0;
+
+ if (ddict->dictSize < 8) {
+ if (dictContentType == ZSTD_dct_fullDict)
+ return ERROR(dictionary_corrupted); /* only accept specified dictionaries */
+ return 0; /* pure content mode */
+ }
+ { U32 const magic = MEM_readLE32(ddict->dictContent);
+ if (magic != ZSTD_MAGIC_DICTIONARY) {
+ if (dictContentType == ZSTD_dct_fullDict)
+ return ERROR(dictionary_corrupted); /* only accept specified dictionaries */
+ return 0; /* pure content mode */
+ }
+ }
+ ddict->dictID = MEM_readLE32((const char*)ddict->dictContent + ZSTD_FRAMEIDSIZE);
+
+ /* load entropy tables */
+ RETURN_ERROR_IF(ZSTD_isError(ZSTD_loadDEntropy(
+ &ddict->entropy, ddict->dictContent, ddict->dictSize)),
+ dictionary_corrupted, "");
+ ddict->entropyPresent = 1;
+ return 0;
+}
+
+
+static size_t ZSTD_initDDict_internal(ZSTD_DDict* ddict,
+ const void* dict, size_t dictSize,
+ ZSTD_dictLoadMethod_e dictLoadMethod,
+ ZSTD_dictContentType_e dictContentType)
+{
+ if ((dictLoadMethod == ZSTD_dlm_byRef) || (!dict) || (!dictSize)) {
+ ddict->dictBuffer = NULL;
+ ddict->dictContent = dict;
+ if (!dict) dictSize = 0;
+ } else {
+ void* const internalBuffer = ZSTD_customMalloc(dictSize, ddict->cMem);
+ ddict->dictBuffer = internalBuffer;
+ ddict->dictContent = internalBuffer;
+ if (!internalBuffer) return ERROR(memory_allocation);
+ ZSTD_memcpy(internalBuffer, dict, dictSize);
+ }
+ ddict->dictSize = dictSize;
+ ddict->entropy.hufTable[0] = (HUF_DTable)((HufLog)*0x1000001); /* cover both little and big endian */
+
+ /* parse dictionary content */
+ FORWARD_IF_ERROR( ZSTD_loadEntropy_intoDDict(ddict, dictContentType) , "");
+
+ return 0;
+}
+
+ZSTD_DDict* ZSTD_createDDict_advanced(const void* dict, size_t dictSize,
+ ZSTD_dictLoadMethod_e dictLoadMethod,
+ ZSTD_dictContentType_e dictContentType,
+ ZSTD_customMem customMem)
+{
+ if ((!customMem.customAlloc) ^ (!customMem.customFree)) return NULL;
+
+ { ZSTD_DDict* const ddict = (ZSTD_DDict*) ZSTD_customMalloc(sizeof(ZSTD_DDict), customMem);
+ if (ddict == NULL) return NULL;
+ ddict->cMem = customMem;
+ { size_t const initResult = ZSTD_initDDict_internal(ddict,
+ dict, dictSize,
+ dictLoadMethod, dictContentType);
+ if (ZSTD_isError(initResult)) {
+ ZSTD_freeDDict(ddict);
+ return NULL;
+ } }
+ return ddict;
+ }
+}
+
+/*! ZSTD_createDDict() :
+* Create a digested dictionary, to start decompression without startup delay.
+* `dict` content is copied inside DDict.
+* Consequently, `dict` can be released after `ZSTD_DDict` creation */
+ZSTD_DDict* ZSTD_createDDict(const void* dict, size_t dictSize)
+{
+ ZSTD_customMem const allocator = { NULL, NULL, NULL };
+ return ZSTD_createDDict_advanced(dict, dictSize, ZSTD_dlm_byCopy, ZSTD_dct_auto, allocator);
+}
+
+/*! ZSTD_createDDict_byReference() :
+ * Create a digested dictionary, to start decompression without startup delay.
+ * Dictionary content is simply referenced, it will be accessed during decompression.
+ * Warning : dictBuffer must outlive DDict (DDict must be freed before dictBuffer) */
+ZSTD_DDict* ZSTD_createDDict_byReference(const void* dictBuffer, size_t dictSize)
+{
+ ZSTD_customMem const allocator = { NULL, NULL, NULL };
+ return ZSTD_createDDict_advanced(dictBuffer, dictSize, ZSTD_dlm_byRef, ZSTD_dct_auto, allocator);
+}
+
+
+const ZSTD_DDict* ZSTD_initStaticDDict(
+ void* sBuffer, size_t sBufferSize,
+ const void* dict, size_t dictSize,
+ ZSTD_dictLoadMethod_e dictLoadMethod,
+ ZSTD_dictContentType_e dictContentType)
+{
+ size_t const neededSpace = sizeof(ZSTD_DDict)
+ + (dictLoadMethod == ZSTD_dlm_byRef ? 0 : dictSize);
+ ZSTD_DDict* const ddict = (ZSTD_DDict*)sBuffer;
+ assert(sBuffer != NULL);
+ assert(dict != NULL);
+ if ((size_t)sBuffer & 7) return NULL; /* 8-aligned */
+ if (sBufferSize < neededSpace) return NULL;
+ if (dictLoadMethod == ZSTD_dlm_byCopy) {
+ ZSTD_memcpy(ddict+1, dict, dictSize); /* local copy */
+ dict = ddict+1;
+ }
+ if (ZSTD_isError( ZSTD_initDDict_internal(ddict,
+ dict, dictSize,
+ ZSTD_dlm_byRef, dictContentType) ))
+ return NULL;
+ return ddict;
+}
+
+
+size_t ZSTD_freeDDict(ZSTD_DDict* ddict)
+{
+ if (ddict==NULL) return 0; /* support free on NULL */
+ { ZSTD_customMem const cMem = ddict->cMem;
+ ZSTD_customFree(ddict->dictBuffer, cMem);
+ ZSTD_customFree(ddict, cMem);
+ return 0;
+ }
+}
+
+/*! ZSTD_estimateDDictSize() :
+ * Estimate amount of memory that will be needed to create a dictionary for decompression.
+ * Note : dictionary created by reference using ZSTD_dlm_byRef are smaller */
+size_t ZSTD_estimateDDictSize(size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod)
+{
+ return sizeof(ZSTD_DDict) + (dictLoadMethod == ZSTD_dlm_byRef ? 0 : dictSize);
+}
+
+size_t ZSTD_sizeof_DDict(const ZSTD_DDict* ddict)
+{
+ if (ddict==NULL) return 0; /* support sizeof on NULL */
+ return sizeof(*ddict) + (ddict->dictBuffer ? ddict->dictSize : 0) ;
+}
+
+/*! ZSTD_getDictID_fromDDict() :
+ * Provides the dictID of the dictionary loaded into `ddict`.
+ * If @return == 0, the dictionary is not conformant to Zstandard specification, or empty.
+ * Non-conformant dictionaries can still be loaded, but as content-only dictionaries. */
+unsigned ZSTD_getDictID_fromDDict(const ZSTD_DDict* ddict)
+{
+ if (ddict==NULL) return 0;
+ return ZSTD_getDictID_fromDict(ddict->dictContent, ddict->dictSize);
+}
diff --git a/lib/zstd/decompress/zstd_ddict.h b/lib/zstd/decompress/zstd_ddict.h
new file mode 100644
index 000000000000..8c1a79d666f8
--- /dev/null
+++ b/lib/zstd/decompress/zstd_ddict.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+
+#ifndef ZSTD_DDICT_H
+#define ZSTD_DDICT_H
+
+/*-*******************************************************
+ * Dependencies
+ *********************************************************/
+#include "../common/zstd_deps.h" /* size_t */
+#include <linux/zstd.h> /* ZSTD_DDict, and several public functions */
+
+
+/*-*******************************************************
+ * Interface
+ *********************************************************/
+
+/* note: several prototypes are already published in `zstd.h` :
+ * ZSTD_createDDict()
+ * ZSTD_createDDict_byReference()
+ * ZSTD_createDDict_advanced()
+ * ZSTD_freeDDict()
+ * ZSTD_initStaticDDict()
+ * ZSTD_sizeof_DDict()
+ * ZSTD_estimateDDictSize()
+ * ZSTD_getDictID_fromDict()
+ */
+
+const void* ZSTD_DDict_dictContent(const ZSTD_DDict* ddict);
+size_t ZSTD_DDict_dictSize(const ZSTD_DDict* ddict);
+
+void ZSTD_copyDDictParameters(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict);
+
+
+
+#endif /* ZSTD_DDICT_H */
diff --git a/lib/zstd/decompress/zstd_decompress.c b/lib/zstd/decompress/zstd_decompress.c
new file mode 100644
index 000000000000..6b3177c94711
--- /dev/null
+++ b/lib/zstd/decompress/zstd_decompress.c
@@ -0,0 +1,2150 @@
+/*
+ * Copyright (c) Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+
+/* ***************************************************************
+* Tuning parameters
+*****************************************************************/
+/*!
+ * HEAPMODE :
+ * Select how default decompression function ZSTD_decompress() allocates its context,
+ * on stack (0), or into heap (1, default; requires malloc()).
+ * Note that functions with explicit context such as ZSTD_decompressDCtx() are unaffected.
+ */
+#ifndef ZSTD_HEAPMODE
+# define ZSTD_HEAPMODE 1
+#endif
+
+/*!
+* LEGACY_SUPPORT :
+* if set to 1+, ZSTD_decompress() can decode older formats (v0.1+)
+*/
+
+/*!
+ * MAXWINDOWSIZE_DEFAULT :
+ * maximum window size accepted by DStream __by default__.
+ * Frames requiring more memory will be rejected.
+ * It's possible to set a different limit using ZSTD_DCtx_setMaxWindowSize().
+ */
+#ifndef ZSTD_MAXWINDOWSIZE_DEFAULT
+# define ZSTD_MAXWINDOWSIZE_DEFAULT (((U32)1 << ZSTD_WINDOWLOG_LIMIT_DEFAULT) + 1)
+#endif
+
+/*!
+ * NO_FORWARD_PROGRESS_MAX :
+ * maximum allowed nb of calls to ZSTD_decompressStream()
+ * without any forward progress
+ * (defined as: no byte read from input, and no byte flushed to output)
+ * before triggering an error.
+ */
+#ifndef ZSTD_NO_FORWARD_PROGRESS_MAX
+# define ZSTD_NO_FORWARD_PROGRESS_MAX 16
+#endif
+
+
+/*-*******************************************************
+* Dependencies
+*********************************************************/
+#include "../common/zstd_deps.h" /* ZSTD_memcpy, ZSTD_memmove, ZSTD_memset */
+#include "../common/mem.h" /* low level memory routines */
+#define FSE_STATIC_LINKING_ONLY
+#include "../common/fse.h"
+#define HUF_STATIC_LINKING_ONLY
+#include "../common/huf.h"
+#include <linux/xxhash.h> /* xxh64_reset, xxh64_update, xxh64_digest, XXH64 */
+#include "../common/zstd_internal.h" /* blockProperties_t */
+#include "zstd_decompress_internal.h" /* ZSTD_DCtx */
+#include "zstd_ddict.h" /* ZSTD_DDictDictContent */
+#include "zstd_decompress_block.h" /* ZSTD_decompressBlock_internal */
+
+
+
+
+/* ***********************************
+ * Multiple DDicts Hashset internals *
+ *************************************/
+
+#define DDICT_HASHSET_MAX_LOAD_FACTOR_COUNT_MULT 4
+#define DDICT_HASHSET_MAX_LOAD_FACTOR_SIZE_MULT 3 /* These two constants represent SIZE_MULT/COUNT_MULT load factor without using a float.
+ * Currently, that means a 0.75 load factor.
+ * So, if count * COUNT_MULT / size * SIZE_MULT != 0, then we've exceeded
+ * the load factor of the ddict hash set.
+ */
+
+#define DDICT_HASHSET_TABLE_BASE_SIZE 64
+#define DDICT_HASHSET_RESIZE_FACTOR 2
+
+/* Hash function to determine starting position of dict insertion within the table
+ * Returns an index between [0, hashSet->ddictPtrTableSize]
+ */
+static size_t ZSTD_DDictHashSet_getIndex(const ZSTD_DDictHashSet* hashSet, U32 dictID) {
+ const U64 hash = xxh64(&dictID, sizeof(U32), 0);
+ /* DDict ptr table size is a multiple of 2, use size - 1 as mask to get index within [0, hashSet->ddictPtrTableSize) */
+ return hash & (hashSet->ddictPtrTableSize - 1);
+}
+
+/* Adds DDict to a hashset without resizing it.
+ * If inserting a DDict with a dictID that already exists in the set, replaces the one in the set.
+ * Returns 0 if successful, or a zstd error code if something went wrong.
+ */
+static size_t ZSTD_DDictHashSet_emplaceDDict(ZSTD_DDictHashSet* hashSet, const ZSTD_DDict* ddict) {
+ const U32 dictID = ZSTD_getDictID_fromDDict(ddict);
+ size_t idx = ZSTD_DDictHashSet_getIndex(hashSet, dictID);
+ const size_t idxRangeMask = hashSet->ddictPtrTableSize - 1;
+ RETURN_ERROR_IF(hashSet->ddictPtrCount == hashSet->ddictPtrTableSize, GENERIC, "Hash set is full!");
+ DEBUGLOG(4, "Hashed index: for dictID: %u is %zu", dictID, idx);
+ while (hashSet->ddictPtrTable[idx] != NULL) {
+ /* Replace existing ddict if inserting ddict with same dictID */
+ if (ZSTD_getDictID_fromDDict(hashSet->ddictPtrTable[idx]) == dictID) {
+ DEBUGLOG(4, "DictID already exists, replacing rather than adding");
+ hashSet->ddictPtrTable[idx] = ddict;
+ return 0;
+ }
+ idx &= idxRangeMask;
+ idx++;
+ }
+ DEBUGLOG(4, "Final idx after probing for dictID %u is: %zu", dictID, idx);
+ hashSet->ddictPtrTable[idx] = ddict;
+ hashSet->ddictPtrCount++;
+ return 0;
+}
+
+/* Expands hash table by factor of DDICT_HASHSET_RESIZE_FACTOR and
+ * rehashes all values, allocates new table, frees old table.
+ * Returns 0 on success, otherwise a zstd error code.
+ */
+static size_t ZSTD_DDictHashSet_expand(ZSTD_DDictHashSet* hashSet, ZSTD_customMem customMem) {
+ size_t newTableSize = hashSet->ddictPtrTableSize * DDICT_HASHSET_RESIZE_FACTOR;
+ const ZSTD_DDict** newTable = (const ZSTD_DDict**)ZSTD_customCalloc(sizeof(ZSTD_DDict*) * newTableSize, customMem);
+ const ZSTD_DDict** oldTable = hashSet->ddictPtrTable;
+ size_t oldTableSize = hashSet->ddictPtrTableSize;
+ size_t i;
+
+ DEBUGLOG(4, "Expanding DDict hash table! Old size: %zu new size: %zu", oldTableSize, newTableSize);
+ RETURN_ERROR_IF(!newTable, memory_allocation, "Expanded hashset allocation failed!");
+ hashSet->ddictPtrTable = newTable;
+ hashSet->ddictPtrTableSize = newTableSize;
+ hashSet->ddictPtrCount = 0;
+ for (i = 0; i < oldTableSize; ++i) {
+ if (oldTable[i] != NULL) {
+ FORWARD_IF_ERROR(ZSTD_DDictHashSet_emplaceDDict(hashSet, oldTable[i]), "");
+ }
+ }
+ ZSTD_customFree((void*)oldTable, customMem);
+ DEBUGLOG(4, "Finished re-hash");
+ return 0;
+}
+
+/* Fetches a DDict with the given dictID
+ * Returns the ZSTD_DDict* with the requested dictID. If it doesn't exist, then returns NULL.
+ */
+static const ZSTD_DDict* ZSTD_DDictHashSet_getDDict(ZSTD_DDictHashSet* hashSet, U32 dictID) {
+ size_t idx = ZSTD_DDictHashSet_getIndex(hashSet, dictID);
+ const size_t idxRangeMask = hashSet->ddictPtrTableSize - 1;
+ DEBUGLOG(4, "Hashed index: for dictID: %u is %zu", dictID, idx);
+ for (;;) {
+ size_t currDictID = ZSTD_getDictID_fromDDict(hashSet->ddictPtrTable[idx]);
+ if (currDictID == dictID || currDictID == 0) {
+ /* currDictID == 0 implies a NULL ddict entry */
+ break;
+ } else {
+ idx &= idxRangeMask; /* Goes to start of table when we reach the end */
+ idx++;
+ }
+ }
+ DEBUGLOG(4, "Final idx after probing for dictID %u is: %zu", dictID, idx);
+ return hashSet->ddictPtrTable[idx];
+}
+
+/* Allocates space for and returns a ddict hash set
+ * The hash set's ZSTD_DDict* table has all values automatically set to NULL to begin with.
+ * Returns NULL if allocation failed.
+ */
+static ZSTD_DDictHashSet* ZSTD_createDDictHashSet(ZSTD_customMem customMem) {
+ ZSTD_DDictHashSet* ret = (ZSTD_DDictHashSet*)ZSTD_customMalloc(sizeof(ZSTD_DDictHashSet), customMem);
+ DEBUGLOG(4, "Allocating new hash set");
+ if (!ret)
+ return NULL;
+ ret->ddictPtrTable = (const ZSTD_DDict**)ZSTD_customCalloc(DDICT_HASHSET_TABLE_BASE_SIZE * sizeof(ZSTD_DDict*), customMem);
+ if (!ret->ddictPtrTable) {
+ ZSTD_customFree(ret, customMem);
+ return NULL;
+ }
+ ret->ddictPtrTableSize = DDICT_HASHSET_TABLE_BASE_SIZE;
+ ret->ddictPtrCount = 0;
+ return ret;
+}
+
+/* Frees the table of ZSTD_DDict* within a hashset, then frees the hashset itself.
+ * Note: The ZSTD_DDict* within the table are NOT freed.
+ */
+static void ZSTD_freeDDictHashSet(ZSTD_DDictHashSet* hashSet, ZSTD_customMem customMem) {
+ DEBUGLOG(4, "Freeing ddict hash set");
+ if (hashSet && hashSet->ddictPtrTable) {
+ ZSTD_customFree((void*)hashSet->ddictPtrTable, customMem);
+ }
+ if (hashSet) {
+ ZSTD_customFree(hashSet, customMem);
+ }
+}
+
+/* Public function: Adds a DDict into the ZSTD_DDictHashSet, possibly triggering a resize of the hash set.
+ * Returns 0 on success, or a ZSTD error.
+ */
+static size_t ZSTD_DDictHashSet_addDDict(ZSTD_DDictHashSet* hashSet, const ZSTD_DDict* ddict, ZSTD_customMem customMem) {
+ DEBUGLOG(4, "Adding dict ID: %u to hashset with - Count: %zu Tablesize: %zu", ZSTD_getDictID_fromDDict(ddict), hashSet->ddictPtrCount, hashSet->ddictPtrTableSize);
+ if (hashSet->ddictPtrCount * DDICT_HASHSET_MAX_LOAD_FACTOR_COUNT_MULT / hashSet->ddictPtrTableSize * DDICT_HASHSET_MAX_LOAD_FACTOR_SIZE_MULT != 0) {
+ FORWARD_IF_ERROR(ZSTD_DDictHashSet_expand(hashSet, customMem), "");
+ }
+ FORWARD_IF_ERROR(ZSTD_DDictHashSet_emplaceDDict(hashSet, ddict), "");
+ return 0;
+}
+
+/*-*************************************************************
+* Context management
+***************************************************************/
+size_t ZSTD_sizeof_DCtx (const ZSTD_DCtx* dctx)
+{
+ if (dctx==NULL) return 0; /* support sizeof NULL */
+ return sizeof(*dctx)
+ + ZSTD_sizeof_DDict(dctx->ddictLocal)
+ + dctx->inBuffSize + dctx->outBuffSize;
+}
+
+size_t ZSTD_estimateDCtxSize(void) { return sizeof(ZSTD_DCtx); }
+
+
+static size_t ZSTD_startingInputLength(ZSTD_format_e format)
+{
+ size_t const startingInputLength = ZSTD_FRAMEHEADERSIZE_PREFIX(format);
+ /* only supports formats ZSTD_f_zstd1 and ZSTD_f_zstd1_magicless */
+ assert( (format == ZSTD_f_zstd1) || (format == ZSTD_f_zstd1_magicless) );
+ return startingInputLength;
+}
+
+static void ZSTD_DCtx_resetParameters(ZSTD_DCtx* dctx)
+{
+ assert(dctx->streamStage == zdss_init);
+ dctx->format = ZSTD_f_zstd1;
+ dctx->maxWindowSize = ZSTD_MAXWINDOWSIZE_DEFAULT;
+ dctx->outBufferMode = ZSTD_bm_buffered;
+ dctx->forceIgnoreChecksum = ZSTD_d_validateChecksum;
+ dctx->refMultipleDDicts = ZSTD_rmd_refSingleDDict;
+}
+
+static void ZSTD_initDCtx_internal(ZSTD_DCtx* dctx)
+{
+ dctx->staticSize = 0;
+ dctx->ddict = NULL;
+ dctx->ddictLocal = NULL;
+ dctx->dictEnd = NULL;
+ dctx->ddictIsCold = 0;
+ dctx->dictUses = ZSTD_dont_use;
+ dctx->inBuff = NULL;
+ dctx->inBuffSize = 0;
+ dctx->outBuffSize = 0;
+ dctx->streamStage = zdss_init;
+ dctx->noForwardProgress = 0;
+ dctx->oversizedDuration = 0;
+#if DYNAMIC_BMI2
+ dctx->bmi2 = ZSTD_cpuSupportsBmi2();
+#endif
+ dctx->ddictSet = NULL;
+ ZSTD_DCtx_resetParameters(dctx);
+#ifdef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
+ dctx->dictContentEndForFuzzing = NULL;
+#endif
+}
+
+ZSTD_DCtx* ZSTD_initStaticDCtx(void *workspace, size_t workspaceSize)
+{
+ ZSTD_DCtx* const dctx = (ZSTD_DCtx*) workspace;
+
+ if ((size_t)workspace & 7) return NULL; /* 8-aligned */
+ if (workspaceSize < sizeof(ZSTD_DCtx)) return NULL; /* minimum size */
+
+ ZSTD_initDCtx_internal(dctx);
+ dctx->staticSize = workspaceSize;
+ dctx->inBuff = (char*)(dctx+1);
+ return dctx;
+}
+
+static ZSTD_DCtx* ZSTD_createDCtx_internal(ZSTD_customMem customMem) {
+ if ((!customMem.customAlloc) ^ (!customMem.customFree)) return NULL;
+
+ { ZSTD_DCtx* const dctx = (ZSTD_DCtx*)ZSTD_customMalloc(sizeof(*dctx), customMem);
+ if (!dctx) return NULL;
+ dctx->customMem = customMem;
+ ZSTD_initDCtx_internal(dctx);
+ return dctx;
+ }
+}
+
+ZSTD_DCtx* ZSTD_createDCtx_advanced(ZSTD_customMem customMem)
+{
+ return ZSTD_createDCtx_internal(customMem);
+}
+
+ZSTD_DCtx* ZSTD_createDCtx(void)
+{
+ DEBUGLOG(3, "ZSTD_createDCtx");
+ return ZSTD_createDCtx_internal(ZSTD_defaultCMem);
+}
+
+static void ZSTD_clearDict(ZSTD_DCtx* dctx)
+{
+ ZSTD_freeDDict(dctx->ddictLocal);
+ dctx->ddictLocal = NULL;
+ dctx->ddict = NULL;
+ dctx->dictUses = ZSTD_dont_use;
+}
+
+size_t ZSTD_freeDCtx(ZSTD_DCtx* dctx)
+{
+ if (dctx==NULL) return 0; /* support free on NULL */
+ RETURN_ERROR_IF(dctx->staticSize, memory_allocation, "not compatible with static DCtx");
+ { ZSTD_customMem const cMem = dctx->customMem;
+ ZSTD_clearDict(dctx);
+ ZSTD_customFree(dctx->inBuff, cMem);
+ dctx->inBuff = NULL;
+ if (dctx->ddictSet) {
+ ZSTD_freeDDictHashSet(dctx->ddictSet, cMem);
+ dctx->ddictSet = NULL;
+ }
+ ZSTD_customFree(dctx, cMem);
+ return 0;
+ }
+}
+
+/* no longer useful */
+void ZSTD_copyDCtx(ZSTD_DCtx* dstDCtx, const ZSTD_DCtx* srcDCtx)
+{
+ size_t const toCopy = (size_t)((char*)(&dstDCtx->inBuff) - (char*)dstDCtx);
+ ZSTD_memcpy(dstDCtx, srcDCtx, toCopy); /* no need to copy workspace */
+}
+
+/* Given a dctx with a digested frame params, re-selects the correct ZSTD_DDict based on
+ * the requested dict ID from the frame. If there exists a reference to the correct ZSTD_DDict, then
+ * accordingly sets the ddict to be used to decompress the frame.
+ *
+ * If no DDict is found, then no action is taken, and the ZSTD_DCtx::ddict remains as-is.
+ *
+ * ZSTD_d_refMultipleDDicts must be enabled for this function to be called.
+ */
+static void ZSTD_DCtx_selectFrameDDict(ZSTD_DCtx* dctx) {
+ assert(dctx->refMultipleDDicts && dctx->ddictSet);
+ DEBUGLOG(4, "Adjusting DDict based on requested dict ID from frame");
+ if (dctx->ddict) {
+ const ZSTD_DDict* frameDDict = ZSTD_DDictHashSet_getDDict(dctx->ddictSet, dctx->fParams.dictID);
+ if (frameDDict) {
+ DEBUGLOG(4, "DDict found!");
+ ZSTD_clearDict(dctx);
+ dctx->dictID = dctx->fParams.dictID;
+ dctx->ddict = frameDDict;
+ dctx->dictUses = ZSTD_use_indefinitely;
+ }
+ }
+}
+
+
+/*-*************************************************************
+ * Frame header decoding
+ ***************************************************************/
+
+/*! ZSTD_isFrame() :
+ * Tells if the content of `buffer` starts with a valid Frame Identifier.
+ * Note : Frame Identifier is 4 bytes. If `size < 4`, @return will always be 0.
+ * Note 2 : Legacy Frame Identifiers are considered valid only if Legacy Support is enabled.
+ * Note 3 : Skippable Frame Identifiers are considered valid. */
+unsigned ZSTD_isFrame(const void* buffer, size_t size)
+{
+ if (size < ZSTD_FRAMEIDSIZE) return 0;
+ { U32 const magic = MEM_readLE32(buffer);
+ if (magic == ZSTD_MAGICNUMBER) return 1;
+ if ((magic & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) return 1;
+ }
+ return 0;
+}
+
+/*! ZSTD_isSkippableFrame() :
+ * Tells if the content of `buffer` starts with a valid Frame Identifier for a skippable frame.
+ * Note : Frame Identifier is 4 bytes. If `size < 4`, @return will always be 0.
+ */
+unsigned ZSTD_isSkippableFrame(const void* buffer, size_t size)
+{
+ if (size < ZSTD_FRAMEIDSIZE) return 0;
+ { U32 const magic = MEM_readLE32(buffer);
+ if ((magic & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) return 1;
+ }
+ return 0;
+}
+
+/* ZSTD_frameHeaderSize_internal() :
+ * srcSize must be large enough to reach header size fields.
+ * note : only works for formats ZSTD_f_zstd1 and ZSTD_f_zstd1_magicless.
+ * @return : size of the Frame Header
+ * or an error code, which can be tested with ZSTD_isError() */
+static size_t ZSTD_frameHeaderSize_internal(const void* src, size_t srcSize, ZSTD_format_e format)
+{
+ size_t const minInputSize = ZSTD_startingInputLength(format);
+ RETURN_ERROR_IF(srcSize < minInputSize, srcSize_wrong, "");
+
+ { BYTE const fhd = ((const BYTE*)src)[minInputSize-1];
+ U32 const dictID= fhd & 3;
+ U32 const singleSegment = (fhd >> 5) & 1;
+ U32 const fcsId = fhd >> 6;
+ return minInputSize + !singleSegment
+ + ZSTD_did_fieldSize[dictID] + ZSTD_fcs_fieldSize[fcsId]
+ + (singleSegment && !fcsId);
+ }
+}
+
+/* ZSTD_frameHeaderSize() :
+ * srcSize must be >= ZSTD_frameHeaderSize_prefix.
+ * @return : size of the Frame Header,
+ * or an error code (if srcSize is too small) */
+size_t ZSTD_frameHeaderSize(const void* src, size_t srcSize)
+{
+ return ZSTD_frameHeaderSize_internal(src, srcSize, ZSTD_f_zstd1);
+}
+
+
+/* ZSTD_getFrameHeader_advanced() :
+ * decode Frame Header, or require larger `srcSize`.
+ * note : only works for formats ZSTD_f_zstd1 and ZSTD_f_zstd1_magicless
+ * @return : 0, `zfhPtr` is correctly filled,
+ * >0, `srcSize` is too small, value is wanted `srcSize` amount,
+ * or an error code, which can be tested using ZSTD_isError() */
+size_t ZSTD_getFrameHeader_advanced(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize, ZSTD_format_e format)
+{
+ const BYTE* ip = (const BYTE*)src;
+ size_t const minInputSize = ZSTD_startingInputLength(format);
+
+ ZSTD_memset(zfhPtr, 0, sizeof(*zfhPtr)); /* not strictly necessary, but static analyzer do not understand that zfhPtr is only going to be read only if return value is zero, since they are 2 different signals */
+ if (srcSize < minInputSize) return minInputSize;
+ RETURN_ERROR_IF(src==NULL, GENERIC, "invalid parameter");
+
+ if ( (format != ZSTD_f_zstd1_magicless)
+ && (MEM_readLE32(src) != ZSTD_MAGICNUMBER) ) {
+ if ((MEM_readLE32(src) & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) {
+ /* skippable frame */
+ if (srcSize < ZSTD_SKIPPABLEHEADERSIZE)
+ return ZSTD_SKIPPABLEHEADERSIZE; /* magic number + frame length */
+ ZSTD_memset(zfhPtr, 0, sizeof(*zfhPtr));
+ zfhPtr->frameContentSize = MEM_readLE32((const char *)src + ZSTD_FRAMEIDSIZE);
+ zfhPtr->frameType = ZSTD_skippableFrame;
+ return 0;
+ }
+ RETURN_ERROR(prefix_unknown, "");
+ }
+
+ /* ensure there is enough `srcSize` to fully read/decode frame header */
+ { size_t const fhsize = ZSTD_frameHeaderSize_internal(src, srcSize, format);
+ if (srcSize < fhsize) return fhsize;
+ zfhPtr->headerSize = (U32)fhsize;
+ }
+
+ { BYTE const fhdByte = ip[minInputSize-1];
+ size_t pos = minInputSize;
+ U32 const dictIDSizeCode = fhdByte&3;
+ U32 const checksumFlag = (fhdByte>>2)&1;
+ U32 const singleSegment = (fhdByte>>5)&1;
+ U32 const fcsID = fhdByte>>6;
+ U64 windowSize = 0;
+ U32 dictID = 0;
+ U64 frameContentSize = ZSTD_CONTENTSIZE_UNKNOWN;
+ RETURN_ERROR_IF((fhdByte & 0x08) != 0, frameParameter_unsupported,
+ "reserved bits, must be zero");
+
+ if (!singleSegment) {
+ BYTE const wlByte = ip[pos++];
+ U32 const windowLog = (wlByte >> 3) + ZSTD_WINDOWLOG_ABSOLUTEMIN;
+ RETURN_ERROR_IF(windowLog > ZSTD_WINDOWLOG_MAX, frameParameter_windowTooLarge, "");
+ windowSize = (1ULL << windowLog);
+ windowSize += (windowSize >> 3) * (wlByte&7);
+ }
+ switch(dictIDSizeCode)
+ {
+ default:
+ assert(0); /* impossible */
+ ZSTD_FALLTHROUGH;
+ case 0 : break;
+ case 1 : dictID = ip[pos]; pos++; break;
+ case 2 : dictID = MEM_readLE16(ip+pos); pos+=2; break;
+ case 3 : dictID = MEM_readLE32(ip+pos); pos+=4; break;
+ }
+ switch(fcsID)
+ {
+ default:
+ assert(0); /* impossible */
+ ZSTD_FALLTHROUGH;
+ case 0 : if (singleSegment) frameContentSize = ip[pos]; break;
+ case 1 : frameContentSize = MEM_readLE16(ip+pos)+256; break;
+ case 2 : frameContentSize = MEM_readLE32(ip+pos); break;
+ case 3 : frameContentSize = MEM_readLE64(ip+pos); break;
+ }
+ if (singleSegment) windowSize = frameContentSize;
+
+ zfhPtr->frameType = ZSTD_frame;
+ zfhPtr->frameContentSize = frameContentSize;
+ zfhPtr->windowSize = windowSize;
+ zfhPtr->blockSizeMax = (unsigned) MIN(windowSize, ZSTD_BLOCKSIZE_MAX);
+ zfhPtr->dictID = dictID;
+ zfhPtr->checksumFlag = checksumFlag;
+ }
+ return 0;
+}
+
+/* ZSTD_getFrameHeader() :
+ * decode Frame Header, or require larger `srcSize`.
+ * note : this function does not consume input, it only reads it.
+ * @return : 0, `zfhPtr` is correctly filled,
+ * >0, `srcSize` is too small, value is wanted `srcSize` amount,
+ * or an error code, which can be tested using ZSTD_isError() */
+size_t ZSTD_getFrameHeader(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize)
+{
+ return ZSTD_getFrameHeader_advanced(zfhPtr, src, srcSize, ZSTD_f_zstd1);
+}
+
+/* ZSTD_getFrameContentSize() :
+ * compatible with legacy mode
+ * @return : decompressed size of the single frame pointed to be `src` if known, otherwise
+ * - ZSTD_CONTENTSIZE_UNKNOWN if the size cannot be determined
+ * - ZSTD_CONTENTSIZE_ERROR if an error occurred (e.g. invalid magic number, srcSize too small) */
+unsigned long long ZSTD_getFrameContentSize(const void *src, size_t srcSize)
+{
+ { ZSTD_frameHeader zfh;
+ if (ZSTD_getFrameHeader(&zfh, src, srcSize) != 0)
+ return ZSTD_CONTENTSIZE_ERROR;
+ if (zfh.frameType == ZSTD_skippableFrame) {
+ return 0;
+ } else {
+ return zfh.frameContentSize;
+ } }
+}
+
+static size_t readSkippableFrameSize(void const* src, size_t srcSize)
+{
+ size_t const skippableHeaderSize = ZSTD_SKIPPABLEHEADERSIZE;
+ U32 sizeU32;
+
+ RETURN_ERROR_IF(srcSize < ZSTD_SKIPPABLEHEADERSIZE, srcSize_wrong, "");
+
+ sizeU32 = MEM_readLE32((BYTE const*)src + ZSTD_FRAMEIDSIZE);
+ RETURN_ERROR_IF((U32)(sizeU32 + ZSTD_SKIPPABLEHEADERSIZE) < sizeU32,
+ frameParameter_unsupported, "");
+ {
+ size_t const skippableSize = skippableHeaderSize + sizeU32;
+ RETURN_ERROR_IF(skippableSize > srcSize, srcSize_wrong, "");
+ return skippableSize;
+ }
+}
+
+/*! ZSTD_readSkippableFrame() :
+ * Retrieves a zstd skippable frame containing data given by src, and writes it to dst buffer.
+ *
+ * The parameter magicVariant will receive the magicVariant that was supplied when the frame was written,
+ * i.e. magicNumber - ZSTD_MAGIC_SKIPPABLE_START. This can be NULL if the caller is not interested
+ * in the magicVariant.
+ *
+ * Returns an error if destination buffer is not large enough, or if the frame is not skippable.
+ *
+ * @return : number of bytes written or a ZSTD error.
+ */
+ZSTDLIB_API size_t ZSTD_readSkippableFrame(void* dst, size_t dstCapacity, unsigned* magicVariant,
+ const void* src, size_t srcSize)
+{
+ U32 const magicNumber = MEM_readLE32(src);
+ size_t skippableFrameSize = readSkippableFrameSize(src, srcSize);
+ size_t skippableContentSize = skippableFrameSize - ZSTD_SKIPPABLEHEADERSIZE;
+
+ /* check input validity */
+ RETURN_ERROR_IF(!ZSTD_isSkippableFrame(src, srcSize), frameParameter_unsupported, "");
+ RETURN_ERROR_IF(skippableFrameSize < ZSTD_SKIPPABLEHEADERSIZE || skippableFrameSize > srcSize, srcSize_wrong, "");
+ RETURN_ERROR_IF(skippableContentSize > dstCapacity, dstSize_tooSmall, "");
+
+ /* deliver payload */
+ if (skippableContentSize > 0 && dst != NULL)
+ ZSTD_memcpy(dst, (const BYTE *)src + ZSTD_SKIPPABLEHEADERSIZE, skippableContentSize);
+ if (magicVariant != NULL)
+ *magicVariant = magicNumber - ZSTD_MAGIC_SKIPPABLE_START;
+ return skippableContentSize;
+}
+
+/* ZSTD_findDecompressedSize() :
+ * compatible with legacy mode
+ * `srcSize` must be the exact length of some number of ZSTD compressed and/or
+ * skippable frames
+ * @return : decompressed size of the frames contained */
+unsigned long long ZSTD_findDecompressedSize(const void* src, size_t srcSize)
+{
+ unsigned long long totalDstSize = 0;
+
+ while (srcSize >= ZSTD_startingInputLength(ZSTD_f_zstd1)) {
+ U32 const magicNumber = MEM_readLE32(src);
+
+ if ((magicNumber & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) {
+ size_t const skippableSize = readSkippableFrameSize(src, srcSize);
+ if (ZSTD_isError(skippableSize)) {
+ return ZSTD_CONTENTSIZE_ERROR;
+ }
+ assert(skippableSize <= srcSize);
+
+ src = (const BYTE *)src + skippableSize;
+ srcSize -= skippableSize;
+ continue;
+ }
+
+ { unsigned long long const ret = ZSTD_getFrameContentSize(src, srcSize);
+ if (ret >= ZSTD_CONTENTSIZE_ERROR) return ret;
+
+ /* check for overflow */
+ if (totalDstSize + ret < totalDstSize) return ZSTD_CONTENTSIZE_ERROR;
+ totalDstSize += ret;
+ }
+ { size_t const frameSrcSize = ZSTD_findFrameCompressedSize(src, srcSize);
+ if (ZSTD_isError(frameSrcSize)) {
+ return ZSTD_CONTENTSIZE_ERROR;
+ }
+
+ src = (const BYTE *)src + frameSrcSize;
+ srcSize -= frameSrcSize;
+ }
+ } /* while (srcSize >= ZSTD_frameHeaderSize_prefix) */
+
+ if (srcSize) return ZSTD_CONTENTSIZE_ERROR;
+
+ return totalDstSize;
+}
+
+/* ZSTD_getDecompressedSize() :
+ * compatible with legacy mode
+ * @return : decompressed size if known, 0 otherwise
+ note : 0 can mean any of the following :
+ - frame content is empty
+ - decompressed size field is not present in frame header
+ - frame header unknown / not supported
+ - frame header not complete (`srcSize` too small) */
+unsigned long long ZSTD_getDecompressedSize(const void* src, size_t srcSize)
+{
+ unsigned long long const ret = ZSTD_getFrameContentSize(src, srcSize);
+ ZSTD_STATIC_ASSERT(ZSTD_CONTENTSIZE_ERROR < ZSTD_CONTENTSIZE_UNKNOWN);
+ return (ret >= ZSTD_CONTENTSIZE_ERROR) ? 0 : ret;
+}
+
+
+/* ZSTD_decodeFrameHeader() :
+ * `headerSize` must be the size provided by ZSTD_frameHeaderSize().
+ * If multiple DDict references are enabled, also will choose the correct DDict to use.
+ * @return : 0 if success, or an error code, which can be tested using ZSTD_isError() */
+static size_t ZSTD_decodeFrameHeader(ZSTD_DCtx* dctx, const void* src, size_t headerSize)
+{
+ size_t const result = ZSTD_getFrameHeader_advanced(&(dctx->fParams), src, headerSize, dctx->format);
+ if (ZSTD_isError(result)) return result; /* invalid header */
+ RETURN_ERROR_IF(result>0, srcSize_wrong, "headerSize too small");
+
+ /* Reference DDict requested by frame if dctx references multiple ddicts */
+ if (dctx->refMultipleDDicts == ZSTD_rmd_refMultipleDDicts && dctx->ddictSet) {
+ ZSTD_DCtx_selectFrameDDict(dctx);
+ }
+
+#ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
+ /* Skip the dictID check in fuzzing mode, because it makes the search
+ * harder.
+ */
+ RETURN_ERROR_IF(dctx->fParams.dictID && (dctx->dictID != dctx->fParams.dictID),
+ dictionary_wrong, "");
+#endif
+ dctx->validateChecksum = (dctx->fParams.checksumFlag && !dctx->forceIgnoreChecksum) ? 1 : 0;
+ if (dctx->validateChecksum) xxh64_reset(&dctx->xxhState, 0);
+ dctx->processedCSize += headerSize;
+ return 0;
+}
+
+static ZSTD_frameSizeInfo ZSTD_errorFrameSizeInfo(size_t ret)
+{
+ ZSTD_frameSizeInfo frameSizeInfo;
+ frameSizeInfo.compressedSize = ret;
+ frameSizeInfo.decompressedBound = ZSTD_CONTENTSIZE_ERROR;
+ return frameSizeInfo;
+}
+
+static ZSTD_frameSizeInfo ZSTD_findFrameSizeInfo(const void* src, size_t srcSize)
+{
+ ZSTD_frameSizeInfo frameSizeInfo;
+ ZSTD_memset(&frameSizeInfo, 0, sizeof(ZSTD_frameSizeInfo));
+
+
+ if ((srcSize >= ZSTD_SKIPPABLEHEADERSIZE)
+ && (MEM_readLE32(src) & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) {
+ frameSizeInfo.compressedSize = readSkippableFrameSize(src, srcSize);
+ assert(ZSTD_isError(frameSizeInfo.compressedSize) ||
+ frameSizeInfo.compressedSize <= srcSize);
+ return frameSizeInfo;
+ } else {
+ const BYTE* ip = (const BYTE*)src;
+ const BYTE* const ipstart = ip;
+ size_t remainingSize = srcSize;
+ size_t nbBlocks = 0;
+ ZSTD_frameHeader zfh;
+
+ /* Extract Frame Header */
+ { size_t const ret = ZSTD_getFrameHeader(&zfh, src, srcSize);
+ if (ZSTD_isError(ret))
+ return ZSTD_errorFrameSizeInfo(ret);
+ if (ret > 0)
+ return ZSTD_errorFrameSizeInfo(ERROR(srcSize_wrong));
+ }
+
+ ip += zfh.headerSize;
+ remainingSize -= zfh.headerSize;
+
+ /* Iterate over each block */
+ while (1) {
+ blockProperties_t blockProperties;
+ size_t const cBlockSize = ZSTD_getcBlockSize(ip, remainingSize, &blockProperties);
+ if (ZSTD_isError(cBlockSize))
+ return ZSTD_errorFrameSizeInfo(cBlockSize);
+
+ if (ZSTD_blockHeaderSize + cBlockSize > remainingSize)
+ return ZSTD_errorFrameSizeInfo(ERROR(srcSize_wrong));
+
+ ip += ZSTD_blockHeaderSize + cBlockSize;
+ remainingSize -= ZSTD_blockHeaderSize + cBlockSize;
+ nbBlocks++;
+
+ if (blockProperties.lastBlock) break;
+ }
+
+ /* Final frame content checksum */
+ if (zfh.checksumFlag) {
+ if (remainingSize < 4)
+ return ZSTD_errorFrameSizeInfo(ERROR(srcSize_wrong));
+ ip += 4;
+ }
+
+ frameSizeInfo.compressedSize = (size_t)(ip - ipstart);
+ frameSizeInfo.decompressedBound = (zfh.frameContentSize != ZSTD_CONTENTSIZE_UNKNOWN)
+ ? zfh.frameContentSize
+ : nbBlocks * zfh.blockSizeMax;
+ return frameSizeInfo;
+ }
+}
+
+/* ZSTD_findFrameCompressedSize() :
+ * compatible with legacy mode
+ * `src` must point to the start of a ZSTD frame, ZSTD legacy frame, or skippable frame
+ * `srcSize` must be at least as large as the frame contained
+ * @return : the compressed size of the frame starting at `src` */
+size_t ZSTD_findFrameCompressedSize(const void *src, size_t srcSize)
+{
+ ZSTD_frameSizeInfo const frameSizeInfo = ZSTD_findFrameSizeInfo(src, srcSize);
+ return frameSizeInfo.compressedSize;
+}
+
+/* ZSTD_decompressBound() :
+ * compatible with legacy mode
+ * `src` must point to the start of a ZSTD frame or a skippeable frame
+ * `srcSize` must be at least as large as the frame contained
+ * @return : the maximum decompressed size of the compressed source
+ */
+unsigned long long ZSTD_decompressBound(const void* src, size_t srcSize)
+{
+ unsigned long long bound = 0;
+ /* Iterate over each frame */
+ while (srcSize > 0) {
+ ZSTD_frameSizeInfo const frameSizeInfo = ZSTD_findFrameSizeInfo(src, srcSize);
+ size_t const compressedSize = frameSizeInfo.compressedSize;
+ unsigned long long const decompressedBound = frameSizeInfo.decompressedBound;
+ if (ZSTD_isError(compressedSize) || decompressedBound == ZSTD_CONTENTSIZE_ERROR)
+ return ZSTD_CONTENTSIZE_ERROR;
+ assert(srcSize >= compressedSize);
+ src = (const BYTE*)src + compressedSize;
+ srcSize -= compressedSize;
+ bound += decompressedBound;
+ }
+ return bound;
+}
+
+
+/*-*************************************************************
+ * Frame decoding
+ ***************************************************************/
+
+/* ZSTD_insertBlock() :
+ * insert `src` block into `dctx` history. Useful to track uncompressed blocks. */
+size_t ZSTD_insertBlock(ZSTD_DCtx* dctx, const void* blockStart, size_t blockSize)
+{
+ DEBUGLOG(5, "ZSTD_insertBlock: %u bytes", (unsigned)blockSize);
+ ZSTD_checkContinuity(dctx, blockStart, blockSize);
+ dctx->previousDstEnd = (const char*)blockStart + blockSize;
+ return blockSize;
+}
+
+
+static size_t ZSTD_copyRawBlock(void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize)
+{
+ DEBUGLOG(5, "ZSTD_copyRawBlock");
+ RETURN_ERROR_IF(srcSize > dstCapacity, dstSize_tooSmall, "");
+ if (dst == NULL) {
+ if (srcSize == 0) return 0;
+ RETURN_ERROR(dstBuffer_null, "");
+ }
+ ZSTD_memmove(dst, src, srcSize);
+ return srcSize;
+}
+
+static size_t ZSTD_setRleBlock(void* dst, size_t dstCapacity,
+ BYTE b,
+ size_t regenSize)
+{
+ RETURN_ERROR_IF(regenSize > dstCapacity, dstSize_tooSmall, "");
+ if (dst == NULL) {
+ if (regenSize == 0) return 0;
+ RETURN_ERROR(dstBuffer_null, "");
+ }
+ ZSTD_memset(dst, b, regenSize);
+ return regenSize;
+}
+
+static void ZSTD_DCtx_trace_end(ZSTD_DCtx const* dctx, U64 uncompressedSize, U64 compressedSize, unsigned streaming)
+{
+ (void)dctx;
+ (void)uncompressedSize;
+ (void)compressedSize;
+ (void)streaming;
+}
+
+
+/*! ZSTD_decompressFrame() :
+ * @dctx must be properly initialized
+ * will update *srcPtr and *srcSizePtr,
+ * to make *srcPtr progress by one frame. */
+static size_t ZSTD_decompressFrame(ZSTD_DCtx* dctx,
+ void* dst, size_t dstCapacity,
+ const void** srcPtr, size_t *srcSizePtr)
+{
+ const BYTE* const istart = (const BYTE*)(*srcPtr);
+ const BYTE* ip = istart;
+ BYTE* const ostart = (BYTE*)dst;
+ BYTE* const oend = dstCapacity != 0 ? ostart + dstCapacity : ostart;
+ BYTE* op = ostart;
+ size_t remainingSrcSize = *srcSizePtr;
+
+ DEBUGLOG(4, "ZSTD_decompressFrame (srcSize:%i)", (int)*srcSizePtr);
+
+ /* check */
+ RETURN_ERROR_IF(
+ remainingSrcSize < ZSTD_FRAMEHEADERSIZE_MIN(dctx->format)+ZSTD_blockHeaderSize,
+ srcSize_wrong, "");
+
+ /* Frame Header */
+ { size_t const frameHeaderSize = ZSTD_frameHeaderSize_internal(
+ ip, ZSTD_FRAMEHEADERSIZE_PREFIX(dctx->format), dctx->format);
+ if (ZSTD_isError(frameHeaderSize)) return frameHeaderSize;
+ RETURN_ERROR_IF(remainingSrcSize < frameHeaderSize+ZSTD_blockHeaderSize,
+ srcSize_wrong, "");
+ FORWARD_IF_ERROR( ZSTD_decodeFrameHeader(dctx, ip, frameHeaderSize) , "");
+ ip += frameHeaderSize; remainingSrcSize -= frameHeaderSize;
+ }
+
+ /* Loop on each block */
+ while (1) {
+ BYTE* oBlockEnd = oend;
+ size_t decodedSize;
+ blockProperties_t blockProperties;
+ size_t const cBlockSize = ZSTD_getcBlockSize(ip, remainingSrcSize, &blockProperties);
+ if (ZSTD_isError(cBlockSize)) return cBlockSize;
+
+ ip += ZSTD_blockHeaderSize;
+ remainingSrcSize -= ZSTD_blockHeaderSize;
+ RETURN_ERROR_IF(cBlockSize > remainingSrcSize, srcSize_wrong, "");
+
+ if (ip >= op && ip < oBlockEnd) {
+ /* We are decompressing in-place. Limit the output pointer so that we
+ * don't overwrite the block that we are currently reading. This will
+ * fail decompression if the input & output pointers aren't spaced
+ * far enough apart.
+ *
+ * This is important to set, even when the pointers are far enough
+ * apart, because ZSTD_decompressBlock_internal() can decide to store
+ * literals in the output buffer, after the block it is decompressing.
+ * Since we don't want anything to overwrite our input, we have to tell
+ * ZSTD_decompressBlock_internal to never write past ip.
+ *
+ * See ZSTD_allocateLiteralsBuffer() for reference.
+ */
+ oBlockEnd = op + (ip - op);
+ }
+
+ switch(blockProperties.blockType)
+ {
+ case bt_compressed:
+ decodedSize = ZSTD_decompressBlock_internal(dctx, op, (size_t)(oBlockEnd-op), ip, cBlockSize, /* frame */ 1, not_streaming);
+ break;
+ case bt_raw :
+ /* Use oend instead of oBlockEnd because this function is safe to overlap. It uses memmove. */
+ decodedSize = ZSTD_copyRawBlock(op, (size_t)(oend-op), ip, cBlockSize);
+ break;
+ case bt_rle :
+ decodedSize = ZSTD_setRleBlock(op, (size_t)(oBlockEnd-op), *ip, blockProperties.origSize);
+ break;
+ case bt_reserved :
+ default:
+ RETURN_ERROR(corruption_detected, "invalid block type");
+ }
+
+ if (ZSTD_isError(decodedSize)) return decodedSize;
+ if (dctx->validateChecksum)
+ xxh64_update(&dctx->xxhState, op, decodedSize);
+ if (decodedSize != 0)
+ op += decodedSize;
+ assert(ip != NULL);
+ ip += cBlockSize;
+ remainingSrcSize -= cBlockSize;
+ if (blockProperties.lastBlock) break;
+ }
+
+ if (dctx->fParams.frameContentSize != ZSTD_CONTENTSIZE_UNKNOWN) {
+ RETURN_ERROR_IF((U64)(op-ostart) != dctx->fParams.frameContentSize,
+ corruption_detected, "");
+ }
+ if (dctx->fParams.checksumFlag) { /* Frame content checksum verification */
+ RETURN_ERROR_IF(remainingSrcSize<4, checksum_wrong, "");
+ if (!dctx->forceIgnoreChecksum) {
+ U32 const checkCalc = (U32)xxh64_digest(&dctx->xxhState);
+ U32 checkRead;
+ checkRead = MEM_readLE32(ip);
+ RETURN_ERROR_IF(checkRead != checkCalc, checksum_wrong, "");
+ }
+ ip += 4;
+ remainingSrcSize -= 4;
+ }
+ ZSTD_DCtx_trace_end(dctx, (U64)(op-ostart), (U64)(ip-istart), /* streaming */ 0);
+ /* Allow caller to get size read */
+ *srcPtr = ip;
+ *srcSizePtr = remainingSrcSize;
+ return (size_t)(op-ostart);
+}
+
+static size_t ZSTD_decompressMultiFrame(ZSTD_DCtx* dctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ const void* dict, size_t dictSize,
+ const ZSTD_DDict* ddict)
+{
+ void* const dststart = dst;
+ int moreThan1Frame = 0;
+
+ DEBUGLOG(5, "ZSTD_decompressMultiFrame");
+ assert(dict==NULL || ddict==NULL); /* either dict or ddict set, not both */
+
+ if (ddict) {
+ dict = ZSTD_DDict_dictContent(ddict);
+ dictSize = ZSTD_DDict_dictSize(ddict);
+ }
+
+ while (srcSize >= ZSTD_startingInputLength(dctx->format)) {
+
+
+ { U32 const magicNumber = MEM_readLE32(src);
+ DEBUGLOG(4, "reading magic number %08X (expecting %08X)",
+ (unsigned)magicNumber, ZSTD_MAGICNUMBER);
+ if ((magicNumber & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) {
+ size_t const skippableSize = readSkippableFrameSize(src, srcSize);
+ FORWARD_IF_ERROR(skippableSize, "readSkippableFrameSize failed");
+ assert(skippableSize <= srcSize);
+
+ src = (const BYTE *)src + skippableSize;
+ srcSize -= skippableSize;
+ continue;
+ } }
+
+ if (ddict) {
+ /* we were called from ZSTD_decompress_usingDDict */
+ FORWARD_IF_ERROR(ZSTD_decompressBegin_usingDDict(dctx, ddict), "");
+ } else {
+ /* this will initialize correctly with no dict if dict == NULL, so
+ * use this in all cases but ddict */
+ FORWARD_IF_ERROR(ZSTD_decompressBegin_usingDict(dctx, dict, dictSize), "");
+ }
+ ZSTD_checkContinuity(dctx, dst, dstCapacity);
+
+ { const size_t res = ZSTD_decompressFrame(dctx, dst, dstCapacity,
+ &src, &srcSize);
+ RETURN_ERROR_IF(
+ (ZSTD_getErrorCode(res) == ZSTD_error_prefix_unknown)
+ && (moreThan1Frame==1),
+ srcSize_wrong,
+ "At least one frame successfully completed, "
+ "but following bytes are garbage: "
+ "it's more likely to be a srcSize error, "
+ "specifying more input bytes than size of frame(s). "
+ "Note: one could be unlucky, it might be a corruption error instead, "
+ "happening right at the place where we expect zstd magic bytes. "
+ "But this is _much_ less likely than a srcSize field error.");
+ if (ZSTD_isError(res)) return res;
+ assert(res <= dstCapacity);
+ if (res != 0)
+ dst = (BYTE*)dst + res;
+ dstCapacity -= res;
+ }
+ moreThan1Frame = 1;
+ } /* while (srcSize >= ZSTD_frameHeaderSize_prefix) */
+
+ RETURN_ERROR_IF(srcSize, srcSize_wrong, "input not entirely consumed");
+
+ return (size_t)((BYTE*)dst - (BYTE*)dststart);
+}
+
+size_t ZSTD_decompress_usingDict(ZSTD_DCtx* dctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ const void* dict, size_t dictSize)
+{
+ return ZSTD_decompressMultiFrame(dctx, dst, dstCapacity, src, srcSize, dict, dictSize, NULL);
+}
+
+
+static ZSTD_DDict const* ZSTD_getDDict(ZSTD_DCtx* dctx)
+{
+ switch (dctx->dictUses) {
+ default:
+ assert(0 /* Impossible */);
+ ZSTD_FALLTHROUGH;
+ case ZSTD_dont_use:
+ ZSTD_clearDict(dctx);
+ return NULL;
+ case ZSTD_use_indefinitely:
+ return dctx->ddict;
+ case ZSTD_use_once:
+ dctx->dictUses = ZSTD_dont_use;
+ return dctx->ddict;
+ }
+}
+
+size_t ZSTD_decompressDCtx(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize)
+{
+ return ZSTD_decompress_usingDDict(dctx, dst, dstCapacity, src, srcSize, ZSTD_getDDict(dctx));
+}
+
+
+size_t ZSTD_decompress(void* dst, size_t dstCapacity, const void* src, size_t srcSize)
+{
+#if defined(ZSTD_HEAPMODE) && (ZSTD_HEAPMODE>=1)
+ size_t regenSize;
+ ZSTD_DCtx* const dctx = ZSTD_createDCtx_internal(ZSTD_defaultCMem);
+ RETURN_ERROR_IF(dctx==NULL, memory_allocation, "NULL pointer!");
+ regenSize = ZSTD_decompressDCtx(dctx, dst, dstCapacity, src, srcSize);
+ ZSTD_freeDCtx(dctx);
+ return regenSize;
+#else /* stack mode */
+ ZSTD_DCtx dctx;
+ ZSTD_initDCtx_internal(&dctx);
+ return ZSTD_decompressDCtx(&dctx, dst, dstCapacity, src, srcSize);
+#endif
+}
+
+
+/*-**************************************
+* Advanced Streaming Decompression API
+* Bufferless and synchronous
+****************************************/
+size_t ZSTD_nextSrcSizeToDecompress(ZSTD_DCtx* dctx) { return dctx->expected; }
+
+/*
+ * Similar to ZSTD_nextSrcSizeToDecompress(), but when a block input can be streamed,
+ * we allow taking a partial block as the input. Currently only raw uncompressed blocks can
+ * be streamed.
+ *
+ * For blocks that can be streamed, this allows us to reduce the latency until we produce
+ * output, and avoid copying the input.
+ *
+ * @param inputSize - The total amount of input that the caller currently has.
+ */
+static size_t ZSTD_nextSrcSizeToDecompressWithInputSize(ZSTD_DCtx* dctx, size_t inputSize) {
+ if (!(dctx->stage == ZSTDds_decompressBlock || dctx->stage == ZSTDds_decompressLastBlock))
+ return dctx->expected;
+ if (dctx->bType != bt_raw)
+ return dctx->expected;
+ return BOUNDED(1, inputSize, dctx->expected);
+}
+
+ZSTD_nextInputType_e ZSTD_nextInputType(ZSTD_DCtx* dctx) {
+ switch(dctx->stage)
+ {
+ default: /* should not happen */
+ assert(0);
+ ZSTD_FALLTHROUGH;
+ case ZSTDds_getFrameHeaderSize:
+ ZSTD_FALLTHROUGH;
+ case ZSTDds_decodeFrameHeader:
+ return ZSTDnit_frameHeader;
+ case ZSTDds_decodeBlockHeader:
+ return ZSTDnit_blockHeader;
+ case ZSTDds_decompressBlock:
+ return ZSTDnit_block;
+ case ZSTDds_decompressLastBlock:
+ return ZSTDnit_lastBlock;
+ case ZSTDds_checkChecksum:
+ return ZSTDnit_checksum;
+ case ZSTDds_decodeSkippableHeader:
+ ZSTD_FALLTHROUGH;
+ case ZSTDds_skipFrame:
+ return ZSTDnit_skippableFrame;
+ }
+}
+
+static int ZSTD_isSkipFrame(ZSTD_DCtx* dctx) { return dctx->stage == ZSTDds_skipFrame; }
+
+/* ZSTD_decompressContinue() :
+ * srcSize : must be the exact nb of bytes expected (see ZSTD_nextSrcSizeToDecompress())
+ * @return : nb of bytes generated into `dst` (necessarily <= `dstCapacity)
+ * or an error code, which can be tested using ZSTD_isError() */
+size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize)
+{
+ DEBUGLOG(5, "ZSTD_decompressContinue (srcSize:%u)", (unsigned)srcSize);
+ /* Sanity check */
+ RETURN_ERROR_IF(srcSize != ZSTD_nextSrcSizeToDecompressWithInputSize(dctx, srcSize), srcSize_wrong, "not allowed");
+ ZSTD_checkContinuity(dctx, dst, dstCapacity);
+
+ dctx->processedCSize += srcSize;
+
+ switch (dctx->stage)
+ {
+ case ZSTDds_getFrameHeaderSize :
+ assert(src != NULL);
+ if (dctx->format == ZSTD_f_zstd1) { /* allows header */
+ assert(srcSize >= ZSTD_FRAMEIDSIZE); /* to read skippable magic number */
+ if ((MEM_readLE32(src) & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) { /* skippable frame */
+ ZSTD_memcpy(dctx->headerBuffer, src, srcSize);
+ dctx->expected = ZSTD_SKIPPABLEHEADERSIZE - srcSize; /* remaining to load to get full skippable frame header */
+ dctx->stage = ZSTDds_decodeSkippableHeader;
+ return 0;
+ } }
+ dctx->headerSize = ZSTD_frameHeaderSize_internal(src, srcSize, dctx->format);
+ if (ZSTD_isError(dctx->headerSize)) return dctx->headerSize;
+ ZSTD_memcpy(dctx->headerBuffer, src, srcSize);
+ dctx->expected = dctx->headerSize - srcSize;
+ dctx->stage = ZSTDds_decodeFrameHeader;
+ return 0;
+
+ case ZSTDds_decodeFrameHeader:
+ assert(src != NULL);
+ ZSTD_memcpy(dctx->headerBuffer + (dctx->headerSize - srcSize), src, srcSize);
+ FORWARD_IF_ERROR(ZSTD_decodeFrameHeader(dctx, dctx->headerBuffer, dctx->headerSize), "");
+ dctx->expected = ZSTD_blockHeaderSize;
+ dctx->stage = ZSTDds_decodeBlockHeader;
+ return 0;
+
+ case ZSTDds_decodeBlockHeader:
+ { blockProperties_t bp;
+ size_t const cBlockSize = ZSTD_getcBlockSize(src, ZSTD_blockHeaderSize, &bp);
+ if (ZSTD_isError(cBlockSize)) return cBlockSize;
+ RETURN_ERROR_IF(cBlockSize > dctx->fParams.blockSizeMax, corruption_detected, "Block Size Exceeds Maximum");
+ dctx->expected = cBlockSize;
+ dctx->bType = bp.blockType;
+ dctx->rleSize = bp.origSize;
+ if (cBlockSize) {
+ dctx->stage = bp.lastBlock ? ZSTDds_decompressLastBlock : ZSTDds_decompressBlock;
+ return 0;
+ }
+ /* empty block */
+ if (bp.lastBlock) {
+ if (dctx->fParams.checksumFlag) {
+ dctx->expected = 4;
+ dctx->stage = ZSTDds_checkChecksum;
+ } else {
+ dctx->expected = 0; /* end of frame */
+ dctx->stage = ZSTDds_getFrameHeaderSize;
+ }
+ } else {
+ dctx->expected = ZSTD_blockHeaderSize; /* jump to next header */
+ dctx->stage = ZSTDds_decodeBlockHeader;
+ }
+ return 0;
+ }
+
+ case ZSTDds_decompressLastBlock:
+ case ZSTDds_decompressBlock:
+ DEBUGLOG(5, "ZSTD_decompressContinue: case ZSTDds_decompressBlock");
+ { size_t rSize;
+ switch(dctx->bType)
+ {
+ case bt_compressed:
+ DEBUGLOG(5, "ZSTD_decompressContinue: case bt_compressed");
+ rSize = ZSTD_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize, /* frame */ 1, is_streaming);
+ dctx->expected = 0; /* Streaming not supported */
+ break;
+ case bt_raw :
+ assert(srcSize <= dctx->expected);
+ rSize = ZSTD_copyRawBlock(dst, dstCapacity, src, srcSize);
+ FORWARD_IF_ERROR(rSize, "ZSTD_copyRawBlock failed");
+ assert(rSize == srcSize);
+ dctx->expected -= rSize;
+ break;
+ case bt_rle :
+ rSize = ZSTD_setRleBlock(dst, dstCapacity, *(const BYTE*)src, dctx->rleSize);
+ dctx->expected = 0; /* Streaming not supported */
+ break;
+ case bt_reserved : /* should never happen */
+ default:
+ RETURN_ERROR(corruption_detected, "invalid block type");
+ }
+ FORWARD_IF_ERROR(rSize, "");
+ RETURN_ERROR_IF(rSize > dctx->fParams.blockSizeMax, corruption_detected, "Decompressed Block Size Exceeds Maximum");
+ DEBUGLOG(5, "ZSTD_decompressContinue: decoded size from block : %u", (unsigned)rSize);
+ dctx->decodedSize += rSize;
+ if (dctx->validateChecksum) xxh64_update(&dctx->xxhState, dst, rSize);
+ dctx->previousDstEnd = (char*)dst + rSize;
+
+ /* Stay on the same stage until we are finished streaming the block. */
+ if (dctx->expected > 0) {
+ return rSize;
+ }
+
+ if (dctx->stage == ZSTDds_decompressLastBlock) { /* end of frame */
+ DEBUGLOG(4, "ZSTD_decompressContinue: decoded size from frame : %u", (unsigned)dctx->decodedSize);
+ RETURN_ERROR_IF(
+ dctx->fParams.frameContentSize != ZSTD_CONTENTSIZE_UNKNOWN
+ && dctx->decodedSize != dctx->fParams.frameContentSize,
+ corruption_detected, "");
+ if (dctx->fParams.checksumFlag) { /* another round for frame checksum */
+ dctx->expected = 4;
+ dctx->stage = ZSTDds_checkChecksum;
+ } else {
+ ZSTD_DCtx_trace_end(dctx, dctx->decodedSize, dctx->processedCSize, /* streaming */ 1);
+ dctx->expected = 0; /* ends here */
+ dctx->stage = ZSTDds_getFrameHeaderSize;
+ }
+ } else {
+ dctx->stage = ZSTDds_decodeBlockHeader;
+ dctx->expected = ZSTD_blockHeaderSize;
+ }
+ return rSize;
+ }
+
+ case ZSTDds_checkChecksum:
+ assert(srcSize == 4); /* guaranteed by dctx->expected */
+ {
+ if (dctx->validateChecksum) {
+ U32 const h32 = (U32)xxh64_digest(&dctx->xxhState);
+ U32 const check32 = MEM_readLE32(src);
+ DEBUGLOG(4, "ZSTD_decompressContinue: checksum : calculated %08X :: %08X read", (unsigned)h32, (unsigned)check32);
+ RETURN_ERROR_IF(check32 != h32, checksum_wrong, "");
+ }
+ ZSTD_DCtx_trace_end(dctx, dctx->decodedSize, dctx->processedCSize, /* streaming */ 1);
+ dctx->expected = 0;
+ dctx->stage = ZSTDds_getFrameHeaderSize;
+ return 0;
+ }
+
+ case ZSTDds_decodeSkippableHeader:
+ assert(src != NULL);
+ assert(srcSize <= ZSTD_SKIPPABLEHEADERSIZE);
+ ZSTD_memcpy(dctx->headerBuffer + (ZSTD_SKIPPABLEHEADERSIZE - srcSize), src, srcSize); /* complete skippable header */
+ dctx->expected = MEM_readLE32(dctx->headerBuffer + ZSTD_FRAMEIDSIZE); /* note : dctx->expected can grow seriously large, beyond local buffer size */
+ dctx->stage = ZSTDds_skipFrame;
+ return 0;
+
+ case ZSTDds_skipFrame:
+ dctx->expected = 0;
+ dctx->stage = ZSTDds_getFrameHeaderSize;
+ return 0;
+
+ default:
+ assert(0); /* impossible */
+ RETURN_ERROR(GENERIC, "impossible to reach"); /* some compiler require default to do something */
+ }
+}
+
+
+static size_t ZSTD_refDictContent(ZSTD_DCtx* dctx, const void* dict, size_t dictSize)
+{
+ dctx->dictEnd = dctx->previousDstEnd;
+ dctx->virtualStart = (const char*)dict - ((const char*)(dctx->previousDstEnd) - (const char*)(dctx->prefixStart));
+ dctx->prefixStart = dict;
+ dctx->previousDstEnd = (const char*)dict + dictSize;
+#ifdef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
+ dctx->dictContentBeginForFuzzing = dctx->prefixStart;
+ dctx->dictContentEndForFuzzing = dctx->previousDstEnd;
+#endif
+ return 0;
+}
+
+/*! ZSTD_loadDEntropy() :
+ * dict : must point at beginning of a valid zstd dictionary.
+ * @return : size of entropy tables read */
+size_t
+ZSTD_loadDEntropy(ZSTD_entropyDTables_t* entropy,
+ const void* const dict, size_t const dictSize)
+{
+ const BYTE* dictPtr = (const BYTE*)dict;
+ const BYTE* const dictEnd = dictPtr + dictSize;
+
+ RETURN_ERROR_IF(dictSize <= 8, dictionary_corrupted, "dict is too small");
+ assert(MEM_readLE32(dict) == ZSTD_MAGIC_DICTIONARY); /* dict must be valid */
+ dictPtr += 8; /* skip header = magic + dictID */
+
+ ZSTD_STATIC_ASSERT(offsetof(ZSTD_entropyDTables_t, OFTable) == offsetof(ZSTD_entropyDTables_t, LLTable) + sizeof(entropy->LLTable));
+ ZSTD_STATIC_ASSERT(offsetof(ZSTD_entropyDTables_t, MLTable) == offsetof(ZSTD_entropyDTables_t, OFTable) + sizeof(entropy->OFTable));
+ ZSTD_STATIC_ASSERT(sizeof(entropy->LLTable) + sizeof(entropy->OFTable) + sizeof(entropy->MLTable) >= HUF_DECOMPRESS_WORKSPACE_SIZE);
+ { void* const workspace = &entropy->LLTable; /* use fse tables as temporary workspace; implies fse tables are grouped together */
+ size_t const workspaceSize = sizeof(entropy->LLTable) + sizeof(entropy->OFTable) + sizeof(entropy->MLTable);
+#ifdef HUF_FORCE_DECOMPRESS_X1
+ /* in minimal huffman, we always use X1 variants */
+ size_t const hSize = HUF_readDTableX1_wksp(entropy->hufTable,
+ dictPtr, dictEnd - dictPtr,
+ workspace, workspaceSize);
+#else
+ size_t const hSize = HUF_readDTableX2_wksp(entropy->hufTable,
+ dictPtr, (size_t)(dictEnd - dictPtr),
+ workspace, workspaceSize);
+#endif
+ RETURN_ERROR_IF(HUF_isError(hSize), dictionary_corrupted, "");
+ dictPtr += hSize;
+ }
+
+ { short offcodeNCount[MaxOff+1];
+ unsigned offcodeMaxValue = MaxOff, offcodeLog;
+ size_t const offcodeHeaderSize = FSE_readNCount(offcodeNCount, &offcodeMaxValue, &offcodeLog, dictPtr, (size_t)(dictEnd-dictPtr));
+ RETURN_ERROR_IF(FSE_isError(offcodeHeaderSize), dictionary_corrupted, "");
+ RETURN_ERROR_IF(offcodeMaxValue > MaxOff, dictionary_corrupted, "");
+ RETURN_ERROR_IF(offcodeLog > OffFSELog, dictionary_corrupted, "");
+ ZSTD_buildFSETable( entropy->OFTable,
+ offcodeNCount, offcodeMaxValue,
+ OF_base, OF_bits,
+ offcodeLog,
+ entropy->workspace, sizeof(entropy->workspace),
+ /* bmi2 */0);
+ dictPtr += offcodeHeaderSize;
+ }
+
+ { short matchlengthNCount[MaxML+1];
+ unsigned matchlengthMaxValue = MaxML, matchlengthLog;
+ size_t const matchlengthHeaderSize = FSE_readNCount(matchlengthNCount, &matchlengthMaxValue, &matchlengthLog, dictPtr, (size_t)(dictEnd-dictPtr));
+ RETURN_ERROR_IF(FSE_isError(matchlengthHeaderSize), dictionary_corrupted, "");
+ RETURN_ERROR_IF(matchlengthMaxValue > MaxML, dictionary_corrupted, "");
+ RETURN_ERROR_IF(matchlengthLog > MLFSELog, dictionary_corrupted, "");
+ ZSTD_buildFSETable( entropy->MLTable,
+ matchlengthNCount, matchlengthMaxValue,
+ ML_base, ML_bits,
+ matchlengthLog,
+ entropy->workspace, sizeof(entropy->workspace),
+ /* bmi2 */ 0);
+ dictPtr += matchlengthHeaderSize;
+ }
+
+ { short litlengthNCount[MaxLL+1];
+ unsigned litlengthMaxValue = MaxLL, litlengthLog;
+ size_t const litlengthHeaderSize = FSE_readNCount(litlengthNCount, &litlengthMaxValue, &litlengthLog, dictPtr, (size_t)(dictEnd-dictPtr));
+ RETURN_ERROR_IF(FSE_isError(litlengthHeaderSize), dictionary_corrupted, "");
+ RETURN_ERROR_IF(litlengthMaxValue > MaxLL, dictionary_corrupted, "");
+ RETURN_ERROR_IF(litlengthLog > LLFSELog, dictionary_corrupted, "");
+ ZSTD_buildFSETable( entropy->LLTable,
+ litlengthNCount, litlengthMaxValue,
+ LL_base, LL_bits,
+ litlengthLog,
+ entropy->workspace, sizeof(entropy->workspace),
+ /* bmi2 */ 0);
+ dictPtr += litlengthHeaderSize;
+ }
+
+ RETURN_ERROR_IF(dictPtr+12 > dictEnd, dictionary_corrupted, "");
+ { int i;
+ size_t const dictContentSize = (size_t)(dictEnd - (dictPtr+12));
+ for (i=0; i<3; i++) {
+ U32 const rep = MEM_readLE32(dictPtr); dictPtr += 4;
+ RETURN_ERROR_IF(rep==0 || rep > dictContentSize,
+ dictionary_corrupted, "");
+ entropy->rep[i] = rep;
+ } }
+
+ return (size_t)(dictPtr - (const BYTE*)dict);
+}
+
+static size_t ZSTD_decompress_insertDictionary(ZSTD_DCtx* dctx, const void* dict, size_t dictSize)
+{
+ if (dictSize < 8) return ZSTD_refDictContent(dctx, dict, dictSize);
+ { U32 const magic = MEM_readLE32(dict);
+ if (magic != ZSTD_MAGIC_DICTIONARY) {
+ return ZSTD_refDictContent(dctx, dict, dictSize); /* pure content mode */
+ } }
+ dctx->dictID = MEM_readLE32((const char*)dict + ZSTD_FRAMEIDSIZE);
+
+ /* load entropy tables */
+ { size_t const eSize = ZSTD_loadDEntropy(&dctx->entropy, dict, dictSize);
+ RETURN_ERROR_IF(ZSTD_isError(eSize), dictionary_corrupted, "");
+ dict = (const char*)dict + eSize;
+ dictSize -= eSize;
+ }
+ dctx->litEntropy = dctx->fseEntropy = 1;
+
+ /* reference dictionary content */
+ return ZSTD_refDictContent(dctx, dict, dictSize);
+}
+
+size_t ZSTD_decompressBegin(ZSTD_DCtx* dctx)
+{
+ assert(dctx != NULL);
+ dctx->expected = ZSTD_startingInputLength(dctx->format); /* dctx->format must be properly set */
+ dctx->stage = ZSTDds_getFrameHeaderSize;
+ dctx->processedCSize = 0;
+ dctx->decodedSize = 0;
+ dctx->previousDstEnd = NULL;
+ dctx->prefixStart = NULL;
+ dctx->virtualStart = NULL;
+ dctx->dictEnd = NULL;
+ dctx->entropy.hufTable[0] = (HUF_DTable)((HufLog)*0x1000001); /* cover both little and big endian */
+ dctx->litEntropy = dctx->fseEntropy = 0;
+ dctx->dictID = 0;
+ dctx->bType = bt_reserved;
+ ZSTD_STATIC_ASSERT(sizeof(dctx->entropy.rep) == sizeof(repStartValue));
+ ZSTD_memcpy(dctx->entropy.rep, repStartValue, sizeof(repStartValue)); /* initial repcodes */
+ dctx->LLTptr = dctx->entropy.LLTable;
+ dctx->MLTptr = dctx->entropy.MLTable;
+ dctx->OFTptr = dctx->entropy.OFTable;
+ dctx->HUFptr = dctx->entropy.hufTable;
+ return 0;
+}
+
+size_t ZSTD_decompressBegin_usingDict(ZSTD_DCtx* dctx, const void* dict, size_t dictSize)
+{
+ FORWARD_IF_ERROR( ZSTD_decompressBegin(dctx) , "");
+ if (dict && dictSize)
+ RETURN_ERROR_IF(
+ ZSTD_isError(ZSTD_decompress_insertDictionary(dctx, dict, dictSize)),
+ dictionary_corrupted, "");
+ return 0;
+}
+
+
+/* ====== ZSTD_DDict ====== */
+
+size_t ZSTD_decompressBegin_usingDDict(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict)
+{
+ DEBUGLOG(4, "ZSTD_decompressBegin_usingDDict");
+ assert(dctx != NULL);
+ if (ddict) {
+ const char* const dictStart = (const char*)ZSTD_DDict_dictContent(ddict);
+ size_t const dictSize = ZSTD_DDict_dictSize(ddict);
+ const void* const dictEnd = dictStart + dictSize;
+ dctx->ddictIsCold = (dctx->dictEnd != dictEnd);
+ DEBUGLOG(4, "DDict is %s",
+ dctx->ddictIsCold ? "~cold~" : "hot!");
+ }
+ FORWARD_IF_ERROR( ZSTD_decompressBegin(dctx) , "");
+ if (ddict) { /* NULL ddict is equivalent to no dictionary */
+ ZSTD_copyDDictParameters(dctx, ddict);
+ }
+ return 0;
+}
+
+/*! ZSTD_getDictID_fromDict() :
+ * Provides the dictID stored within dictionary.
+ * if @return == 0, the dictionary is not conformant with Zstandard specification.
+ * It can still be loaded, but as a content-only dictionary. */
+unsigned ZSTD_getDictID_fromDict(const void* dict, size_t dictSize)
+{
+ if (dictSize < 8) return 0;
+ if (MEM_readLE32(dict) != ZSTD_MAGIC_DICTIONARY) return 0;
+ return MEM_readLE32((const char*)dict + ZSTD_FRAMEIDSIZE);
+}
+
+/*! ZSTD_getDictID_fromFrame() :
+ * Provides the dictID required to decompress frame stored within `src`.
+ * If @return == 0, the dictID could not be decoded.
+ * This could for one of the following reasons :
+ * - The frame does not require a dictionary (most common case).
+ * - The frame was built with dictID intentionally removed.
+ * Needed dictionary is a hidden information.
+ * Note : this use case also happens when using a non-conformant dictionary.
+ * - `srcSize` is too small, and as a result, frame header could not be decoded.
+ * Note : possible if `srcSize < ZSTD_FRAMEHEADERSIZE_MAX`.
+ * - This is not a Zstandard frame.
+ * When identifying the exact failure cause, it's possible to use
+ * ZSTD_getFrameHeader(), which will provide a more precise error code. */
+unsigned ZSTD_getDictID_fromFrame(const void* src, size_t srcSize)
+{
+ ZSTD_frameHeader zfp = { 0, 0, 0, ZSTD_frame, 0, 0, 0 };
+ size_t const hError = ZSTD_getFrameHeader(&zfp, src, srcSize);
+ if (ZSTD_isError(hError)) return 0;
+ return zfp.dictID;
+}
+
+
+/*! ZSTD_decompress_usingDDict() :
+* Decompression using a pre-digested Dictionary
+* Use dictionary without significant overhead. */
+size_t ZSTD_decompress_usingDDict(ZSTD_DCtx* dctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ const ZSTD_DDict* ddict)
+{
+ /* pass content and size in case legacy frames are encountered */
+ return ZSTD_decompressMultiFrame(dctx, dst, dstCapacity, src, srcSize,
+ NULL, 0,
+ ddict);
+}
+
+
+/*=====================================
+* Streaming decompression
+*====================================*/
+
+ZSTD_DStream* ZSTD_createDStream(void)
+{
+ DEBUGLOG(3, "ZSTD_createDStream");
+ return ZSTD_createDCtx_internal(ZSTD_defaultCMem);
+}
+
+ZSTD_DStream* ZSTD_initStaticDStream(void *workspace, size_t workspaceSize)
+{
+ return ZSTD_initStaticDCtx(workspace, workspaceSize);
+}
+
+ZSTD_DStream* ZSTD_createDStream_advanced(ZSTD_customMem customMem)
+{
+ return ZSTD_createDCtx_internal(customMem);
+}
+
+size_t ZSTD_freeDStream(ZSTD_DStream* zds)
+{
+ return ZSTD_freeDCtx(zds);
+}
+
+
+/* *** Initialization *** */
+
+size_t ZSTD_DStreamInSize(void) { return ZSTD_BLOCKSIZE_MAX + ZSTD_blockHeaderSize; }
+size_t ZSTD_DStreamOutSize(void) { return ZSTD_BLOCKSIZE_MAX; }
+
+size_t ZSTD_DCtx_loadDictionary_advanced(ZSTD_DCtx* dctx,
+ const void* dict, size_t dictSize,
+ ZSTD_dictLoadMethod_e dictLoadMethod,
+ ZSTD_dictContentType_e dictContentType)
+{
+ RETURN_ERROR_IF(dctx->streamStage != zdss_init, stage_wrong, "");
+ ZSTD_clearDict(dctx);
+ if (dict && dictSize != 0) {
+ dctx->ddictLocal = ZSTD_createDDict_advanced(dict, dictSize, dictLoadMethod, dictContentType, dctx->customMem);
+ RETURN_ERROR_IF(dctx->ddictLocal == NULL, memory_allocation, "NULL pointer!");
+ dctx->ddict = dctx->ddictLocal;
+ dctx->dictUses = ZSTD_use_indefinitely;
+ }
+ return 0;
+}
+
+size_t ZSTD_DCtx_loadDictionary_byReference(ZSTD_DCtx* dctx, const void* dict, size_t dictSize)
+{
+ return ZSTD_DCtx_loadDictionary_advanced(dctx, dict, dictSize, ZSTD_dlm_byRef, ZSTD_dct_auto);
+}
+
+size_t ZSTD_DCtx_loadDictionary(ZSTD_DCtx* dctx, const void* dict, size_t dictSize)
+{
+ return ZSTD_DCtx_loadDictionary_advanced(dctx, dict, dictSize, ZSTD_dlm_byCopy, ZSTD_dct_auto);
+}
+
+size_t ZSTD_DCtx_refPrefix_advanced(ZSTD_DCtx* dctx, const void* prefix, size_t prefixSize, ZSTD_dictContentType_e dictContentType)
+{
+ FORWARD_IF_ERROR(ZSTD_DCtx_loadDictionary_advanced(dctx, prefix, prefixSize, ZSTD_dlm_byRef, dictContentType), "");
+ dctx->dictUses = ZSTD_use_once;
+ return 0;
+}
+
+size_t ZSTD_DCtx_refPrefix(ZSTD_DCtx* dctx, const void* prefix, size_t prefixSize)
+{
+ return ZSTD_DCtx_refPrefix_advanced(dctx, prefix, prefixSize, ZSTD_dct_rawContent);
+}
+
+
+/* ZSTD_initDStream_usingDict() :
+ * return : expected size, aka ZSTD_startingInputLength().
+ * this function cannot fail */
+size_t ZSTD_initDStream_usingDict(ZSTD_DStream* zds, const void* dict, size_t dictSize)
+{
+ DEBUGLOG(4, "ZSTD_initDStream_usingDict");
+ FORWARD_IF_ERROR( ZSTD_DCtx_reset(zds, ZSTD_reset_session_only) , "");
+ FORWARD_IF_ERROR( ZSTD_DCtx_loadDictionary(zds, dict, dictSize) , "");
+ return ZSTD_startingInputLength(zds->format);
+}
+
+/* note : this variant can't fail */
+size_t ZSTD_initDStream(ZSTD_DStream* zds)
+{
+ DEBUGLOG(4, "ZSTD_initDStream");
+ return ZSTD_initDStream_usingDDict(zds, NULL);
+}
+
+/* ZSTD_initDStream_usingDDict() :
+ * ddict will just be referenced, and must outlive decompression session
+ * this function cannot fail */
+size_t ZSTD_initDStream_usingDDict(ZSTD_DStream* dctx, const ZSTD_DDict* ddict)
+{
+ FORWARD_IF_ERROR( ZSTD_DCtx_reset(dctx, ZSTD_reset_session_only) , "");
+ FORWARD_IF_ERROR( ZSTD_DCtx_refDDict(dctx, ddict) , "");
+ return ZSTD_startingInputLength(dctx->format);
+}
+
+/* ZSTD_resetDStream() :
+ * return : expected size, aka ZSTD_startingInputLength().
+ * this function cannot fail */
+size_t ZSTD_resetDStream(ZSTD_DStream* dctx)
+{
+ FORWARD_IF_ERROR(ZSTD_DCtx_reset(dctx, ZSTD_reset_session_only), "");
+ return ZSTD_startingInputLength(dctx->format);
+}
+
+
+size_t ZSTD_DCtx_refDDict(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict)
+{
+ RETURN_ERROR_IF(dctx->streamStage != zdss_init, stage_wrong, "");
+ ZSTD_clearDict(dctx);
+ if (ddict) {
+ dctx->ddict = ddict;
+ dctx->dictUses = ZSTD_use_indefinitely;
+ if (dctx->refMultipleDDicts == ZSTD_rmd_refMultipleDDicts) {
+ if (dctx->ddictSet == NULL) {
+ dctx->ddictSet = ZSTD_createDDictHashSet(dctx->customMem);
+ if (!dctx->ddictSet) {
+ RETURN_ERROR(memory_allocation, "Failed to allocate memory for hash set!");
+ }
+ }
+ assert(!dctx->staticSize); /* Impossible: ddictSet cannot have been allocated if static dctx */
+ FORWARD_IF_ERROR(ZSTD_DDictHashSet_addDDict(dctx->ddictSet, ddict, dctx->customMem), "");
+ }
+ }
+ return 0;
+}
+
+/* ZSTD_DCtx_setMaxWindowSize() :
+ * note : no direct equivalence in ZSTD_DCtx_setParameter,
+ * since this version sets windowSize, and the other sets windowLog */
+size_t ZSTD_DCtx_setMaxWindowSize(ZSTD_DCtx* dctx, size_t maxWindowSize)
+{
+ ZSTD_bounds const bounds = ZSTD_dParam_getBounds(ZSTD_d_windowLogMax);
+ size_t const min = (size_t)1 << bounds.lowerBound;
+ size_t const max = (size_t)1 << bounds.upperBound;
+ RETURN_ERROR_IF(dctx->streamStage != zdss_init, stage_wrong, "");
+ RETURN_ERROR_IF(maxWindowSize < min, parameter_outOfBound, "");
+ RETURN_ERROR_IF(maxWindowSize > max, parameter_outOfBound, "");
+ dctx->maxWindowSize = maxWindowSize;
+ return 0;
+}
+
+size_t ZSTD_DCtx_setFormat(ZSTD_DCtx* dctx, ZSTD_format_e format)
+{
+ return ZSTD_DCtx_setParameter(dctx, ZSTD_d_format, (int)format);
+}
+
+ZSTD_bounds ZSTD_dParam_getBounds(ZSTD_dParameter dParam)
+{
+ ZSTD_bounds bounds = { 0, 0, 0 };
+ switch(dParam) {
+ case ZSTD_d_windowLogMax:
+ bounds.lowerBound = ZSTD_WINDOWLOG_ABSOLUTEMIN;
+ bounds.upperBound = ZSTD_WINDOWLOG_MAX;
+ return bounds;
+ case ZSTD_d_format:
+ bounds.lowerBound = (int)ZSTD_f_zstd1;
+ bounds.upperBound = (int)ZSTD_f_zstd1_magicless;
+ ZSTD_STATIC_ASSERT(ZSTD_f_zstd1 < ZSTD_f_zstd1_magicless);
+ return bounds;
+ case ZSTD_d_stableOutBuffer:
+ bounds.lowerBound = (int)ZSTD_bm_buffered;
+ bounds.upperBound = (int)ZSTD_bm_stable;
+ return bounds;
+ case ZSTD_d_forceIgnoreChecksum:
+ bounds.lowerBound = (int)ZSTD_d_validateChecksum;
+ bounds.upperBound = (int)ZSTD_d_ignoreChecksum;
+ return bounds;
+ case ZSTD_d_refMultipleDDicts:
+ bounds.lowerBound = (int)ZSTD_rmd_refSingleDDict;
+ bounds.upperBound = (int)ZSTD_rmd_refMultipleDDicts;
+ return bounds;
+ default:;
+ }
+ bounds.error = ERROR(parameter_unsupported);
+ return bounds;
+}
+
+/* ZSTD_dParam_withinBounds:
+ * @return 1 if value is within dParam bounds,
+ * 0 otherwise */
+static int ZSTD_dParam_withinBounds(ZSTD_dParameter dParam, int value)
+{
+ ZSTD_bounds const bounds = ZSTD_dParam_getBounds(dParam);
+ if (ZSTD_isError(bounds.error)) return 0;
+ if (value < bounds.lowerBound) return 0;
+ if (value > bounds.upperBound) return 0;
+ return 1;
+}
+
+#define CHECK_DBOUNDS(p,v) { \
+ RETURN_ERROR_IF(!ZSTD_dParam_withinBounds(p, v), parameter_outOfBound, ""); \
+}
+
+size_t ZSTD_DCtx_getParameter(ZSTD_DCtx* dctx, ZSTD_dParameter param, int* value)
+{
+ switch (param) {
+ case ZSTD_d_windowLogMax:
+ *value = (int)ZSTD_highbit32((U32)dctx->maxWindowSize);
+ return 0;
+ case ZSTD_d_format:
+ *value = (int)dctx->format;
+ return 0;
+ case ZSTD_d_stableOutBuffer:
+ *value = (int)dctx->outBufferMode;
+ return 0;
+ case ZSTD_d_forceIgnoreChecksum:
+ *value = (int)dctx->forceIgnoreChecksum;
+ return 0;
+ case ZSTD_d_refMultipleDDicts:
+ *value = (int)dctx->refMultipleDDicts;
+ return 0;
+ default:;
+ }
+ RETURN_ERROR(parameter_unsupported, "");
+}
+
+size_t ZSTD_DCtx_setParameter(ZSTD_DCtx* dctx, ZSTD_dParameter dParam, int value)
+{
+ RETURN_ERROR_IF(dctx->streamStage != zdss_init, stage_wrong, "");
+ switch(dParam) {
+ case ZSTD_d_windowLogMax:
+ if (value == 0) value = ZSTD_WINDOWLOG_LIMIT_DEFAULT;
+ CHECK_DBOUNDS(ZSTD_d_windowLogMax, value);
+ dctx->maxWindowSize = ((size_t)1) << value;
+ return 0;
+ case ZSTD_d_format:
+ CHECK_DBOUNDS(ZSTD_d_format, value);
+ dctx->format = (ZSTD_format_e)value;
+ return 0;
+ case ZSTD_d_stableOutBuffer:
+ CHECK_DBOUNDS(ZSTD_d_stableOutBuffer, value);
+ dctx->outBufferMode = (ZSTD_bufferMode_e)value;
+ return 0;
+ case ZSTD_d_forceIgnoreChecksum:
+ CHECK_DBOUNDS(ZSTD_d_forceIgnoreChecksum, value);
+ dctx->forceIgnoreChecksum = (ZSTD_forceIgnoreChecksum_e)value;
+ return 0;
+ case ZSTD_d_refMultipleDDicts:
+ CHECK_DBOUNDS(ZSTD_d_refMultipleDDicts, value);
+ if (dctx->staticSize != 0) {
+ RETURN_ERROR(parameter_unsupported, "Static dctx does not support multiple DDicts!");
+ }
+ dctx->refMultipleDDicts = (ZSTD_refMultipleDDicts_e)value;
+ return 0;
+ default:;
+ }
+ RETURN_ERROR(parameter_unsupported, "");
+}
+
+size_t ZSTD_DCtx_reset(ZSTD_DCtx* dctx, ZSTD_ResetDirective reset)
+{
+ if ( (reset == ZSTD_reset_session_only)
+ || (reset == ZSTD_reset_session_and_parameters) ) {
+ dctx->streamStage = zdss_init;
+ dctx->noForwardProgress = 0;
+ }
+ if ( (reset == ZSTD_reset_parameters)
+ || (reset == ZSTD_reset_session_and_parameters) ) {
+ RETURN_ERROR_IF(dctx->streamStage != zdss_init, stage_wrong, "");
+ ZSTD_clearDict(dctx);
+ ZSTD_DCtx_resetParameters(dctx);
+ }
+ return 0;
+}
+
+
+size_t ZSTD_sizeof_DStream(const ZSTD_DStream* dctx)
+{
+ return ZSTD_sizeof_DCtx(dctx);
+}
+
+size_t ZSTD_decodingBufferSize_min(unsigned long long windowSize, unsigned long long frameContentSize)
+{
+ size_t const blockSize = (size_t) MIN(windowSize, ZSTD_BLOCKSIZE_MAX);
+ /* space is needed to store the litbuffer after the output of a given block without stomping the extDict of a previous run, as well as to cover both windows against wildcopy*/
+ unsigned long long const neededRBSize = windowSize + blockSize + ZSTD_BLOCKSIZE_MAX + (WILDCOPY_OVERLENGTH * 2);
+ unsigned long long const neededSize = MIN(frameContentSize, neededRBSize);
+ size_t const minRBSize = (size_t) neededSize;
+ RETURN_ERROR_IF((unsigned long long)minRBSize != neededSize,
+ frameParameter_windowTooLarge, "");
+ return minRBSize;
+}
+
+size_t ZSTD_estimateDStreamSize(size_t windowSize)
+{
+ size_t const blockSize = MIN(windowSize, ZSTD_BLOCKSIZE_MAX);
+ size_t const inBuffSize = blockSize; /* no block can be larger */
+ size_t const outBuffSize = ZSTD_decodingBufferSize_min(windowSize, ZSTD_CONTENTSIZE_UNKNOWN);
+ return ZSTD_estimateDCtxSize() + inBuffSize + outBuffSize;
+}
+
+size_t ZSTD_estimateDStreamSize_fromFrame(const void* src, size_t srcSize)
+{
+ U32 const windowSizeMax = 1U << ZSTD_WINDOWLOG_MAX; /* note : should be user-selectable, but requires an additional parameter (or a dctx) */
+ ZSTD_frameHeader zfh;
+ size_t const err = ZSTD_getFrameHeader(&zfh, src, srcSize);
+ if (ZSTD_isError(err)) return err;
+ RETURN_ERROR_IF(err>0, srcSize_wrong, "");
+ RETURN_ERROR_IF(zfh.windowSize > windowSizeMax,
+ frameParameter_windowTooLarge, "");
+ return ZSTD_estimateDStreamSize((size_t)zfh.windowSize);
+}
+
+
+/* ***** Decompression ***** */
+
+static int ZSTD_DCtx_isOverflow(ZSTD_DStream* zds, size_t const neededInBuffSize, size_t const neededOutBuffSize)
+{
+ return (zds->inBuffSize + zds->outBuffSize) >= (neededInBuffSize + neededOutBuffSize) * ZSTD_WORKSPACETOOLARGE_FACTOR;
+}
+
+static void ZSTD_DCtx_updateOversizedDuration(ZSTD_DStream* zds, size_t const neededInBuffSize, size_t const neededOutBuffSize)
+{
+ if (ZSTD_DCtx_isOverflow(zds, neededInBuffSize, neededOutBuffSize))
+ zds->oversizedDuration++;
+ else
+ zds->oversizedDuration = 0;
+}
+
+static int ZSTD_DCtx_isOversizedTooLong(ZSTD_DStream* zds)
+{
+ return zds->oversizedDuration >= ZSTD_WORKSPACETOOLARGE_MAXDURATION;
+}
+
+/* Checks that the output buffer hasn't changed if ZSTD_obm_stable is used. */
+static size_t ZSTD_checkOutBuffer(ZSTD_DStream const* zds, ZSTD_outBuffer const* output)
+{
+ ZSTD_outBuffer const expect = zds->expectedOutBuffer;
+ /* No requirement when ZSTD_obm_stable is not enabled. */
+ if (zds->outBufferMode != ZSTD_bm_stable)
+ return 0;
+ /* Any buffer is allowed in zdss_init, this must be the same for every other call until
+ * the context is reset.
+ */
+ if (zds->streamStage == zdss_init)
+ return 0;
+ /* The buffer must match our expectation exactly. */
+ if (expect.dst == output->dst && expect.pos == output->pos && expect.size == output->size)
+ return 0;
+ RETURN_ERROR(dstBuffer_wrong, "ZSTD_d_stableOutBuffer enabled but output differs!");
+}
+
+/* Calls ZSTD_decompressContinue() with the right parameters for ZSTD_decompressStream()
+ * and updates the stage and the output buffer state. This call is extracted so it can be
+ * used both when reading directly from the ZSTD_inBuffer, and in buffered input mode.
+ * NOTE: You must break after calling this function since the streamStage is modified.
+ */
+static size_t ZSTD_decompressContinueStream(
+ ZSTD_DStream* zds, char** op, char* oend,
+ void const* src, size_t srcSize) {
+ int const isSkipFrame = ZSTD_isSkipFrame(zds);
+ if (zds->outBufferMode == ZSTD_bm_buffered) {
+ size_t const dstSize = isSkipFrame ? 0 : zds->outBuffSize - zds->outStart;
+ size_t const decodedSize = ZSTD_decompressContinue(zds,
+ zds->outBuff + zds->outStart, dstSize, src, srcSize);
+ FORWARD_IF_ERROR(decodedSize, "");
+ if (!decodedSize && !isSkipFrame) {
+ zds->streamStage = zdss_read;
+ } else {
+ zds->outEnd = zds->outStart + decodedSize;
+ zds->streamStage = zdss_flush;
+ }
+ } else {
+ /* Write directly into the output buffer */
+ size_t const dstSize = isSkipFrame ? 0 : (size_t)(oend - *op);
+ size_t const decodedSize = ZSTD_decompressContinue(zds, *op, dstSize, src, srcSize);
+ FORWARD_IF_ERROR(decodedSize, "");
+ *op += decodedSize;
+ /* Flushing is not needed. */
+ zds->streamStage = zdss_read;
+ assert(*op <= oend);
+ assert(zds->outBufferMode == ZSTD_bm_stable);
+ }
+ return 0;
+}
+
+size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inBuffer* input)
+{
+ const char* const src = (const char*)input->src;
+ const char* const istart = input->pos != 0 ? src + input->pos : src;
+ const char* const iend = input->size != 0 ? src + input->size : src;
+ const char* ip = istart;
+ char* const dst = (char*)output->dst;
+ char* const ostart = output->pos != 0 ? dst + output->pos : dst;
+ char* const oend = output->size != 0 ? dst + output->size : dst;
+ char* op = ostart;
+ U32 someMoreWork = 1;
+
+ DEBUGLOG(5, "ZSTD_decompressStream");
+ RETURN_ERROR_IF(
+ input->pos > input->size,
+ srcSize_wrong,
+ "forbidden. in: pos: %u vs size: %u",
+ (U32)input->pos, (U32)input->size);
+ RETURN_ERROR_IF(
+ output->pos > output->size,
+ dstSize_tooSmall,
+ "forbidden. out: pos: %u vs size: %u",
+ (U32)output->pos, (U32)output->size);
+ DEBUGLOG(5, "input size : %u", (U32)(input->size - input->pos));
+ FORWARD_IF_ERROR(ZSTD_checkOutBuffer(zds, output), "");
+
+ while (someMoreWork) {
+ switch(zds->streamStage)
+ {
+ case zdss_init :
+ DEBUGLOG(5, "stage zdss_init => transparent reset ");
+ zds->streamStage = zdss_loadHeader;
+ zds->lhSize = zds->inPos = zds->outStart = zds->outEnd = 0;
+ zds->hostageByte = 0;
+ zds->expectedOutBuffer = *output;
+ ZSTD_FALLTHROUGH;
+
+ case zdss_loadHeader :
+ DEBUGLOG(5, "stage zdss_loadHeader (srcSize : %u)", (U32)(iend - ip));
+ { size_t const hSize = ZSTD_getFrameHeader_advanced(&zds->fParams, zds->headerBuffer, zds->lhSize, zds->format);
+ if (zds->refMultipleDDicts && zds->ddictSet) {
+ ZSTD_DCtx_selectFrameDDict(zds);
+ }
+ DEBUGLOG(5, "header size : %u", (U32)hSize);
+ if (ZSTD_isError(hSize)) {
+ return hSize; /* error */
+ }
+ if (hSize != 0) { /* need more input */
+ size_t const toLoad = hSize - zds->lhSize; /* if hSize!=0, hSize > zds->lhSize */
+ size_t const remainingInput = (size_t)(iend-ip);
+ assert(iend >= ip);
+ if (toLoad > remainingInput) { /* not enough input to load full header */
+ if (remainingInput > 0) {
+ ZSTD_memcpy(zds->headerBuffer + zds->lhSize, ip, remainingInput);
+ zds->lhSize += remainingInput;
+ }
+ input->pos = input->size;
+ return (MAX((size_t)ZSTD_FRAMEHEADERSIZE_MIN(zds->format), hSize) - zds->lhSize) + ZSTD_blockHeaderSize; /* remaining header bytes + next block header */
+ }
+ assert(ip != NULL);
+ ZSTD_memcpy(zds->headerBuffer + zds->lhSize, ip, toLoad); zds->lhSize = hSize; ip += toLoad;
+ break;
+ } }
+
+ /* check for single-pass mode opportunity */
+ if (zds->fParams.frameContentSize != ZSTD_CONTENTSIZE_UNKNOWN
+ && zds->fParams.frameType != ZSTD_skippableFrame
+ && (U64)(size_t)(oend-op) >= zds->fParams.frameContentSize) {
+ size_t const cSize = ZSTD_findFrameCompressedSize(istart, (size_t)(iend-istart));
+ if (cSize <= (size_t)(iend-istart)) {
+ /* shortcut : using single-pass mode */
+ size_t const decompressedSize = ZSTD_decompress_usingDDict(zds, op, (size_t)(oend-op), istart, cSize, ZSTD_getDDict(zds));
+ if (ZSTD_isError(decompressedSize)) return decompressedSize;
+ DEBUGLOG(4, "shortcut to single-pass ZSTD_decompress_usingDDict()")
+ ip = istart + cSize;
+ op += decompressedSize;
+ zds->expected = 0;
+ zds->streamStage = zdss_init;
+ someMoreWork = 0;
+ break;
+ } }
+
+ /* Check output buffer is large enough for ZSTD_odm_stable. */
+ if (zds->outBufferMode == ZSTD_bm_stable
+ && zds->fParams.frameType != ZSTD_skippableFrame
+ && zds->fParams.frameContentSize != ZSTD_CONTENTSIZE_UNKNOWN
+ && (U64)(size_t)(oend-op) < zds->fParams.frameContentSize) {
+ RETURN_ERROR(dstSize_tooSmall, "ZSTD_obm_stable passed but ZSTD_outBuffer is too small");
+ }
+
+ /* Consume header (see ZSTDds_decodeFrameHeader) */
+ DEBUGLOG(4, "Consume header");
+ FORWARD_IF_ERROR(ZSTD_decompressBegin_usingDDict(zds, ZSTD_getDDict(zds)), "");
+
+ if ((MEM_readLE32(zds->headerBuffer) & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) { /* skippable frame */
+ zds->expected = MEM_readLE32(zds->headerBuffer + ZSTD_FRAMEIDSIZE);
+ zds->stage = ZSTDds_skipFrame;
+ } else {
+ FORWARD_IF_ERROR(ZSTD_decodeFrameHeader(zds, zds->headerBuffer, zds->lhSize), "");
+ zds->expected = ZSTD_blockHeaderSize;
+ zds->stage = ZSTDds_decodeBlockHeader;
+ }
+
+ /* control buffer memory usage */
+ DEBUGLOG(4, "Control max memory usage (%u KB <= max %u KB)",
+ (U32)(zds->fParams.windowSize >>10),
+ (U32)(zds->maxWindowSize >> 10) );
+ zds->fParams.windowSize = MAX(zds->fParams.windowSize, 1U << ZSTD_WINDOWLOG_ABSOLUTEMIN);
+ RETURN_ERROR_IF(zds->fParams.windowSize > zds->maxWindowSize,
+ frameParameter_windowTooLarge, "");
+
+ /* Adapt buffer sizes to frame header instructions */
+ { size_t const neededInBuffSize = MAX(zds->fParams.blockSizeMax, 4 /* frame checksum */);
+ size_t const neededOutBuffSize = zds->outBufferMode == ZSTD_bm_buffered
+ ? ZSTD_decodingBufferSize_min(zds->fParams.windowSize, zds->fParams.frameContentSize)
+ : 0;
+
+ ZSTD_DCtx_updateOversizedDuration(zds, neededInBuffSize, neededOutBuffSize);
+
+ { int const tooSmall = (zds->inBuffSize < neededInBuffSize) || (zds->outBuffSize < neededOutBuffSize);
+ int const tooLarge = ZSTD_DCtx_isOversizedTooLong(zds);
+
+ if (tooSmall || tooLarge) {
+ size_t const bufferSize = neededInBuffSize + neededOutBuffSize;
+ DEBUGLOG(4, "inBuff : from %u to %u",
+ (U32)zds->inBuffSize, (U32)neededInBuffSize);
+ DEBUGLOG(4, "outBuff : from %u to %u",
+ (U32)zds->outBuffSize, (U32)neededOutBuffSize);
+ if (zds->staticSize) { /* static DCtx */
+ DEBUGLOG(4, "staticSize : %u", (U32)zds->staticSize);
+ assert(zds->staticSize >= sizeof(ZSTD_DCtx)); /* controlled at init */
+ RETURN_ERROR_IF(
+ bufferSize > zds->staticSize - sizeof(ZSTD_DCtx),
+ memory_allocation, "");
+ } else {
+ ZSTD_customFree(zds->inBuff, zds->customMem);
+ zds->inBuffSize = 0;
+ zds->outBuffSize = 0;
+ zds->inBuff = (char*)ZSTD_customMalloc(bufferSize, zds->customMem);
+ RETURN_ERROR_IF(zds->inBuff == NULL, memory_allocation, "");
+ }
+ zds->inBuffSize = neededInBuffSize;
+ zds->outBuff = zds->inBuff + zds->inBuffSize;
+ zds->outBuffSize = neededOutBuffSize;
+ } } }
+ zds->streamStage = zdss_read;
+ ZSTD_FALLTHROUGH;
+
+ case zdss_read:
+ DEBUGLOG(5, "stage zdss_read");
+ { size_t const neededInSize = ZSTD_nextSrcSizeToDecompressWithInputSize(zds, (size_t)(iend - ip));
+ DEBUGLOG(5, "neededInSize = %u", (U32)neededInSize);
+ if (neededInSize==0) { /* end of frame */
+ zds->streamStage = zdss_init;
+ someMoreWork = 0;
+ break;
+ }
+ if ((size_t)(iend-ip) >= neededInSize) { /* decode directly from src */
+ FORWARD_IF_ERROR(ZSTD_decompressContinueStream(zds, &op, oend, ip, neededInSize), "");
+ ip += neededInSize;
+ /* Function modifies the stage so we must break */
+ break;
+ } }
+ if (ip==iend) { someMoreWork = 0; break; } /* no more input */
+ zds->streamStage = zdss_load;
+ ZSTD_FALLTHROUGH;
+
+ case zdss_load:
+ { size_t const neededInSize = ZSTD_nextSrcSizeToDecompress(zds);
+ size_t const toLoad = neededInSize - zds->inPos;
+ int const isSkipFrame = ZSTD_isSkipFrame(zds);
+ size_t loadedSize;
+ /* At this point we shouldn't be decompressing a block that we can stream. */
+ assert(neededInSize == ZSTD_nextSrcSizeToDecompressWithInputSize(zds, iend - ip));
+ if (isSkipFrame) {
+ loadedSize = MIN(toLoad, (size_t)(iend-ip));
+ } else {
+ RETURN_ERROR_IF(toLoad > zds->inBuffSize - zds->inPos,
+ corruption_detected,
+ "should never happen");
+ loadedSize = ZSTD_limitCopy(zds->inBuff + zds->inPos, toLoad, ip, (size_t)(iend-ip));
+ }
+ ip += loadedSize;
+ zds->inPos += loadedSize;
+ if (loadedSize < toLoad) { someMoreWork = 0; break; } /* not enough input, wait for more */
+
+ /* decode loaded input */
+ zds->inPos = 0; /* input is consumed */
+ FORWARD_IF_ERROR(ZSTD_decompressContinueStream(zds, &op, oend, zds->inBuff, neededInSize), "");
+ /* Function modifies the stage so we must break */
+ break;
+ }
+ case zdss_flush:
+ { size_t const toFlushSize = zds->outEnd - zds->outStart;
+ size_t const flushedSize = ZSTD_limitCopy(op, (size_t)(oend-op), zds->outBuff + zds->outStart, toFlushSize);
+ op += flushedSize;
+ zds->outStart += flushedSize;
+ if (flushedSize == toFlushSize) { /* flush completed */
+ zds->streamStage = zdss_read;
+ if ( (zds->outBuffSize < zds->fParams.frameContentSize)
+ && (zds->outStart + zds->fParams.blockSizeMax > zds->outBuffSize) ) {
+ DEBUGLOG(5, "restart filling outBuff from beginning (left:%i, needed:%u)",
+ (int)(zds->outBuffSize - zds->outStart),
+ (U32)zds->fParams.blockSizeMax);
+ zds->outStart = zds->outEnd = 0;
+ }
+ break;
+ } }
+ /* cannot complete flush */
+ someMoreWork = 0;
+ break;
+
+ default:
+ assert(0); /* impossible */
+ RETURN_ERROR(GENERIC, "impossible to reach"); /* some compiler require default to do something */
+ } }
+
+ /* result */
+ input->pos = (size_t)(ip - (const char*)(input->src));
+ output->pos = (size_t)(op - (char*)(output->dst));
+
+ /* Update the expected output buffer for ZSTD_obm_stable. */
+ zds->expectedOutBuffer = *output;
+
+ if ((ip==istart) && (op==ostart)) { /* no forward progress */
+ zds->noForwardProgress ++;
+ if (zds->noForwardProgress >= ZSTD_NO_FORWARD_PROGRESS_MAX) {
+ RETURN_ERROR_IF(op==oend, dstSize_tooSmall, "");
+ RETURN_ERROR_IF(ip==iend, srcSize_wrong, "");
+ assert(0);
+ }
+ } else {
+ zds->noForwardProgress = 0;
+ }
+ { size_t nextSrcSizeHint = ZSTD_nextSrcSizeToDecompress(zds);
+ if (!nextSrcSizeHint) { /* frame fully decoded */
+ if (zds->outEnd == zds->outStart) { /* output fully flushed */
+ if (zds->hostageByte) {
+ if (input->pos >= input->size) {
+ /* can't release hostage (not present) */
+ zds->streamStage = zdss_read;
+ return 1;
+ }
+ input->pos++; /* release hostage */
+ } /* zds->hostageByte */
+ return 0;
+ } /* zds->outEnd == zds->outStart */
+ if (!zds->hostageByte) { /* output not fully flushed; keep last byte as hostage; will be released when all output is flushed */
+ input->pos--; /* note : pos > 0, otherwise, impossible to finish reading last block */
+ zds->hostageByte=1;
+ }
+ return 1;
+ } /* nextSrcSizeHint==0 */
+ nextSrcSizeHint += ZSTD_blockHeaderSize * (ZSTD_nextInputType(zds) == ZSTDnit_block); /* preload header of next block */
+ assert(zds->inPos <= nextSrcSizeHint);
+ nextSrcSizeHint -= zds->inPos; /* part already loaded*/
+ return nextSrcSizeHint;
+ }
+}
+
+size_t ZSTD_decompressStream_simpleArgs (
+ ZSTD_DCtx* dctx,
+ void* dst, size_t dstCapacity, size_t* dstPos,
+ const void* src, size_t srcSize, size_t* srcPos)
+{
+ ZSTD_outBuffer output = { dst, dstCapacity, *dstPos };
+ ZSTD_inBuffer input = { src, srcSize, *srcPos };
+ /* ZSTD_compress_generic() will check validity of dstPos and srcPos */
+ size_t const cErr = ZSTD_decompressStream(dctx, &output, &input);
+ *dstPos = output.pos;
+ *srcPos = input.pos;
+ return cErr;
+}
diff --git a/lib/zstd/decompress/zstd_decompress_block.c b/lib/zstd/decompress/zstd_decompress_block.c
new file mode 100644
index 000000000000..c1913b8e7c89
--- /dev/null
+++ b/lib/zstd/decompress/zstd_decompress_block.c
@@ -0,0 +1,2072 @@
+/*
+ * Copyright (c) Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+/* zstd_decompress_block :
+ * this module takes care of decompressing _compressed_ block */
+
+/*-*******************************************************
+* Dependencies
+*********************************************************/
+#include "../common/zstd_deps.h" /* ZSTD_memcpy, ZSTD_memmove, ZSTD_memset */
+#include "../common/compiler.h" /* prefetch */
+#include "../common/cpu.h" /* bmi2 */
+#include "../common/mem.h" /* low level memory routines */
+#define FSE_STATIC_LINKING_ONLY
+#include "../common/fse.h"
+#define HUF_STATIC_LINKING_ONLY
+#include "../common/huf.h"
+#include "../common/zstd_internal.h"
+#include "zstd_decompress_internal.h" /* ZSTD_DCtx */
+#include "zstd_ddict.h" /* ZSTD_DDictDictContent */
+#include "zstd_decompress_block.h"
+
+/*_*******************************************************
+* Macros
+**********************************************************/
+
+/* These two optional macros force the use one way or another of the two
+ * ZSTD_decompressSequences implementations. You can't force in both directions
+ * at the same time.
+ */
+#if defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT) && \
+ defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG)
+#error "Cannot force the use of the short and the long ZSTD_decompressSequences variants!"
+#endif
+
+
+/*_*******************************************************
+* Memory operations
+**********************************************************/
+static void ZSTD_copy4(void* dst, const void* src) { ZSTD_memcpy(dst, src, 4); }
+
+
+/*-*************************************************************
+ * Block decoding
+ ***************************************************************/
+
+/*! ZSTD_getcBlockSize() :
+ * Provides the size of compressed block from block header `src` */
+size_t ZSTD_getcBlockSize(const void* src, size_t srcSize,
+ blockProperties_t* bpPtr)
+{
+ RETURN_ERROR_IF(srcSize < ZSTD_blockHeaderSize, srcSize_wrong, "");
+
+ { U32 const cBlockHeader = MEM_readLE24(src);
+ U32 const cSize = cBlockHeader >> 3;
+ bpPtr->lastBlock = cBlockHeader & 1;
+ bpPtr->blockType = (blockType_e)((cBlockHeader >> 1) & 3);
+ bpPtr->origSize = cSize; /* only useful for RLE */
+ if (bpPtr->blockType == bt_rle) return 1;
+ RETURN_ERROR_IF(bpPtr->blockType == bt_reserved, corruption_detected, "");
+ return cSize;
+ }
+}
+
+/* Allocate buffer for literals, either overlapping current dst, or split between dst and litExtraBuffer, or stored entirely within litExtraBuffer */
+static void ZSTD_allocateLiteralsBuffer(ZSTD_DCtx* dctx, void* const dst, const size_t dstCapacity, const size_t litSize,
+ const streaming_operation streaming, const size_t expectedWriteSize, const unsigned splitImmediately)
+{
+ if (streaming == not_streaming && dstCapacity > ZSTD_BLOCKSIZE_MAX + WILDCOPY_OVERLENGTH + litSize + WILDCOPY_OVERLENGTH)
+ {
+ /* room for litbuffer to fit without read faulting */
+ dctx->litBuffer = (BYTE*)dst + ZSTD_BLOCKSIZE_MAX + WILDCOPY_OVERLENGTH;
+ dctx->litBufferEnd = dctx->litBuffer + litSize;
+ dctx->litBufferLocation = ZSTD_in_dst;
+ }
+ else if (litSize > ZSTD_LITBUFFEREXTRASIZE)
+ {
+ /* won't fit in litExtraBuffer, so it will be split between end of dst and extra buffer */
+ if (splitImmediately) {
+ /* won't fit in litExtraBuffer, so it will be split between end of dst and extra buffer */
+ dctx->litBuffer = (BYTE*)dst + expectedWriteSize - litSize + ZSTD_LITBUFFEREXTRASIZE - WILDCOPY_OVERLENGTH;
+ dctx->litBufferEnd = dctx->litBuffer + litSize - ZSTD_LITBUFFEREXTRASIZE;
+ }
+ else {
+ /* initially this will be stored entirely in dst during huffman decoding, it will partially shifted to litExtraBuffer after */
+ dctx->litBuffer = (BYTE*)dst + expectedWriteSize - litSize;
+ dctx->litBufferEnd = (BYTE*)dst + expectedWriteSize;
+ }
+ dctx->litBufferLocation = ZSTD_split;
+ }
+ else
+ {
+ /* fits entirely within litExtraBuffer, so no split is necessary */
+ dctx->litBuffer = dctx->litExtraBuffer;
+ dctx->litBufferEnd = dctx->litBuffer + litSize;
+ dctx->litBufferLocation = ZSTD_not_in_dst;
+ }
+}
+
+/* Hidden declaration for fullbench */
+size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx,
+ const void* src, size_t srcSize,
+ void* dst, size_t dstCapacity, const streaming_operation streaming);
+/*! ZSTD_decodeLiteralsBlock() :
+ * Where it is possible to do so without being stomped by the output during decompression, the literals block will be stored
+ * in the dstBuffer. If there is room to do so, it will be stored in full in the excess dst space after where the current
+ * block will be output. Otherwise it will be stored at the end of the current dst blockspace, with a small portion being
+ * stored in dctx->litExtraBuffer to help keep it "ahead" of the current output write.
+ *
+ * @return : nb of bytes read from src (< srcSize )
+ * note : symbol not declared but exposed for fullbench */
+size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx,
+ const void* src, size_t srcSize, /* note : srcSize < BLOCKSIZE */
+ void* dst, size_t dstCapacity, const streaming_operation streaming)
+{
+ DEBUGLOG(5, "ZSTD_decodeLiteralsBlock");
+ RETURN_ERROR_IF(srcSize < MIN_CBLOCK_SIZE, corruption_detected, "");
+
+ { const BYTE* const istart = (const BYTE*) src;
+ symbolEncodingType_e const litEncType = (symbolEncodingType_e)(istart[0] & 3);
+
+ switch(litEncType)
+ {
+ case set_repeat:
+ DEBUGLOG(5, "set_repeat flag : re-using stats from previous compressed literals block");
+ RETURN_ERROR_IF(dctx->litEntropy==0, dictionary_corrupted, "");
+ ZSTD_FALLTHROUGH;
+
+ case set_compressed:
+ RETURN_ERROR_IF(srcSize < 5, corruption_detected, "srcSize >= MIN_CBLOCK_SIZE == 3; here we need up to 5 for case 3");
+ { size_t lhSize, litSize, litCSize;
+ U32 singleStream=0;
+ U32 const lhlCode = (istart[0] >> 2) & 3;
+ U32 const lhc = MEM_readLE32(istart);
+ size_t hufSuccess;
+ size_t expectedWriteSize = MIN(ZSTD_BLOCKSIZE_MAX, dstCapacity);
+ switch(lhlCode)
+ {
+ case 0: case 1: default: /* note : default is impossible, since lhlCode into [0..3] */
+ /* 2 - 2 - 10 - 10 */
+ singleStream = !lhlCode;
+ lhSize = 3;
+ litSize = (lhc >> 4) & 0x3FF;
+ litCSize = (lhc >> 14) & 0x3FF;
+ break;
+ case 2:
+ /* 2 - 2 - 14 - 14 */
+ lhSize = 4;
+ litSize = (lhc >> 4) & 0x3FFF;
+ litCSize = lhc >> 18;
+ break;
+ case 3:
+ /* 2 - 2 - 18 - 18 */
+ lhSize = 5;
+ litSize = (lhc >> 4) & 0x3FFFF;
+ litCSize = (lhc >> 22) + ((size_t)istart[4] << 10);
+ break;
+ }
+ RETURN_ERROR_IF(litSize > 0 && dst == NULL, dstSize_tooSmall, "NULL not handled");
+ RETURN_ERROR_IF(litSize > ZSTD_BLOCKSIZE_MAX, corruption_detected, "");
+ RETURN_ERROR_IF(litCSize + lhSize > srcSize, corruption_detected, "");
+ RETURN_ERROR_IF(expectedWriteSize < litSize , dstSize_tooSmall, "");
+ ZSTD_allocateLiteralsBuffer(dctx, dst, dstCapacity, litSize, streaming, expectedWriteSize, 0);
+
+ /* prefetch huffman table if cold */
+ if (dctx->ddictIsCold && (litSize > 768 /* heuristic */)) {
+ PREFETCH_AREA(dctx->HUFptr, sizeof(dctx->entropy.hufTable));
+ }
+
+ if (litEncType==set_repeat) {
+ if (singleStream) {
+ hufSuccess = HUF_decompress1X_usingDTable_bmi2(
+ dctx->litBuffer, litSize, istart+lhSize, litCSize,
+ dctx->HUFptr, ZSTD_DCtx_get_bmi2(dctx));
+ } else {
+ hufSuccess = HUF_decompress4X_usingDTable_bmi2(
+ dctx->litBuffer, litSize, istart+lhSize, litCSize,
+ dctx->HUFptr, ZSTD_DCtx_get_bmi2(dctx));
+ }
+ } else {
+ if (singleStream) {
+#if defined(HUF_FORCE_DECOMPRESS_X2)
+ hufSuccess = HUF_decompress1X_DCtx_wksp(
+ dctx->entropy.hufTable, dctx->litBuffer, litSize,
+ istart+lhSize, litCSize, dctx->workspace,
+ sizeof(dctx->workspace));
+#else
+ hufSuccess = HUF_decompress1X1_DCtx_wksp_bmi2(
+ dctx->entropy.hufTable, dctx->litBuffer, litSize,
+ istart+lhSize, litCSize, dctx->workspace,
+ sizeof(dctx->workspace), ZSTD_DCtx_get_bmi2(dctx));
+#endif
+ } else {
+ hufSuccess = HUF_decompress4X_hufOnly_wksp_bmi2(
+ dctx->entropy.hufTable, dctx->litBuffer, litSize,
+ istart+lhSize, litCSize, dctx->workspace,
+ sizeof(dctx->workspace), ZSTD_DCtx_get_bmi2(dctx));
+ }
+ }
+ if (dctx->litBufferLocation == ZSTD_split)
+ {
+ ZSTD_memcpy(dctx->litExtraBuffer, dctx->litBufferEnd - ZSTD_LITBUFFEREXTRASIZE, ZSTD_LITBUFFEREXTRASIZE);
+ ZSTD_memmove(dctx->litBuffer + ZSTD_LITBUFFEREXTRASIZE - WILDCOPY_OVERLENGTH, dctx->litBuffer, litSize - ZSTD_LITBUFFEREXTRASIZE);
+ dctx->litBuffer += ZSTD_LITBUFFEREXTRASIZE - WILDCOPY_OVERLENGTH;
+ dctx->litBufferEnd -= WILDCOPY_OVERLENGTH;
+ }
+
+ RETURN_ERROR_IF(HUF_isError(hufSuccess), corruption_detected, "");
+
+ dctx->litPtr = dctx->litBuffer;
+ dctx->litSize = litSize;
+ dctx->litEntropy = 1;
+ if (litEncType==set_compressed) dctx->HUFptr = dctx->entropy.hufTable;
+ return litCSize + lhSize;
+ }
+
+ case set_basic:
+ { size_t litSize, lhSize;
+ U32 const lhlCode = ((istart[0]) >> 2) & 3;
+ size_t expectedWriteSize = MIN(ZSTD_BLOCKSIZE_MAX, dstCapacity);
+ switch(lhlCode)
+ {
+ case 0: case 2: default: /* note : default is impossible, since lhlCode into [0..3] */
+ lhSize = 1;
+ litSize = istart[0] >> 3;
+ break;
+ case 1:
+ lhSize = 2;
+ litSize = MEM_readLE16(istart) >> 4;
+ break;
+ case 3:
+ lhSize = 3;
+ litSize = MEM_readLE24(istart) >> 4;
+ break;
+ }
+
+ RETURN_ERROR_IF(litSize > 0 && dst == NULL, dstSize_tooSmall, "NULL not handled");
+ RETURN_ERROR_IF(expectedWriteSize < litSize, dstSize_tooSmall, "");
+ ZSTD_allocateLiteralsBuffer(dctx, dst, dstCapacity, litSize, streaming, expectedWriteSize, 1);
+ if (lhSize+litSize+WILDCOPY_OVERLENGTH > srcSize) { /* risk reading beyond src buffer with wildcopy */
+ RETURN_ERROR_IF(litSize+lhSize > srcSize, corruption_detected, "");
+ if (dctx->litBufferLocation == ZSTD_split)
+ {
+ ZSTD_memcpy(dctx->litBuffer, istart + lhSize, litSize - ZSTD_LITBUFFEREXTRASIZE);
+ ZSTD_memcpy(dctx->litExtraBuffer, istart + lhSize + litSize - ZSTD_LITBUFFEREXTRASIZE, ZSTD_LITBUFFEREXTRASIZE);
+ }
+ else
+ {
+ ZSTD_memcpy(dctx->litBuffer, istart + lhSize, litSize);
+ }
+ dctx->litPtr = dctx->litBuffer;
+ dctx->litSize = litSize;
+ return lhSize+litSize;
+ }
+ /* direct reference into compressed stream */
+ dctx->litPtr = istart+lhSize;
+ dctx->litSize = litSize;
+ dctx->litBufferEnd = dctx->litPtr + litSize;
+ dctx->litBufferLocation = ZSTD_not_in_dst;
+ return lhSize+litSize;
+ }
+
+ case set_rle:
+ { U32 const lhlCode = ((istart[0]) >> 2) & 3;
+ size_t litSize, lhSize;
+ size_t expectedWriteSize = MIN(ZSTD_BLOCKSIZE_MAX, dstCapacity);
+ switch(lhlCode)
+ {
+ case 0: case 2: default: /* note : default is impossible, since lhlCode into [0..3] */
+ lhSize = 1;
+ litSize = istart[0] >> 3;
+ break;
+ case 1:
+ lhSize = 2;
+ litSize = MEM_readLE16(istart) >> 4;
+ break;
+ case 3:
+ lhSize = 3;
+ litSize = MEM_readLE24(istart) >> 4;
+ RETURN_ERROR_IF(srcSize<4, corruption_detected, "srcSize >= MIN_CBLOCK_SIZE == 3; here we need lhSize+1 = 4");
+ break;
+ }
+ RETURN_ERROR_IF(litSize > 0 && dst == NULL, dstSize_tooSmall, "NULL not handled");
+ RETURN_ERROR_IF(litSize > ZSTD_BLOCKSIZE_MAX, corruption_detected, "");
+ RETURN_ERROR_IF(expectedWriteSize < litSize, dstSize_tooSmall, "");
+ ZSTD_allocateLiteralsBuffer(dctx, dst, dstCapacity, litSize, streaming, expectedWriteSize, 1);
+ if (dctx->litBufferLocation == ZSTD_split)
+ {
+ ZSTD_memset(dctx->litBuffer, istart[lhSize], litSize - ZSTD_LITBUFFEREXTRASIZE);
+ ZSTD_memset(dctx->litExtraBuffer, istart[lhSize], ZSTD_LITBUFFEREXTRASIZE);
+ }
+ else
+ {
+ ZSTD_memset(dctx->litBuffer, istart[lhSize], litSize);
+ }
+ dctx->litPtr = dctx->litBuffer;
+ dctx->litSize = litSize;
+ return lhSize+1;
+ }
+ default:
+ RETURN_ERROR(corruption_detected, "impossible");
+ }
+ }
+}
+
+/* Default FSE distribution tables.
+ * These are pre-calculated FSE decoding tables using default distributions as defined in specification :
+ * https://github.com/facebook/zstd/blob/release/doc/zstd_compression_format.md#default-distributions
+ * They were generated programmatically with following method :
+ * - start from default distributions, present in /lib/common/zstd_internal.h
+ * - generate tables normally, using ZSTD_buildFSETable()
+ * - printout the content of tables
+ * - pretify output, report below, test with fuzzer to ensure it's correct */
+
+/* Default FSE distribution table for Literal Lengths */
+static const ZSTD_seqSymbol LL_defaultDTable[(1<<LL_DEFAULTNORMLOG)+1] = {
+ { 1, 1, 1, LL_DEFAULTNORMLOG}, /* header : fastMode, tableLog */
+ /* nextState, nbAddBits, nbBits, baseVal */
+ { 0, 0, 4, 0}, { 16, 0, 4, 0},
+ { 32, 0, 5, 1}, { 0, 0, 5, 3},
+ { 0, 0, 5, 4}, { 0, 0, 5, 6},
+ { 0, 0, 5, 7}, { 0, 0, 5, 9},
+ { 0, 0, 5, 10}, { 0, 0, 5, 12},
+ { 0, 0, 6, 14}, { 0, 1, 5, 16},
+ { 0, 1, 5, 20}, { 0, 1, 5, 22},
+ { 0, 2, 5, 28}, { 0, 3, 5, 32},
+ { 0, 4, 5, 48}, { 32, 6, 5, 64},
+ { 0, 7, 5, 128}, { 0, 8, 6, 256},
+ { 0, 10, 6, 1024}, { 0, 12, 6, 4096},
+ { 32, 0, 4, 0}, { 0, 0, 4, 1},
+ { 0, 0, 5, 2}, { 32, 0, 5, 4},
+ { 0, 0, 5, 5}, { 32, 0, 5, 7},
+ { 0, 0, 5, 8}, { 32, 0, 5, 10},
+ { 0, 0, 5, 11}, { 0, 0, 6, 13},
+ { 32, 1, 5, 16}, { 0, 1, 5, 18},
+ { 32, 1, 5, 22}, { 0, 2, 5, 24},
+ { 32, 3, 5, 32}, { 0, 3, 5, 40},
+ { 0, 6, 4, 64}, { 16, 6, 4, 64},
+ { 32, 7, 5, 128}, { 0, 9, 6, 512},
+ { 0, 11, 6, 2048}, { 48, 0, 4, 0},
+ { 16, 0, 4, 1}, { 32, 0, 5, 2},
+ { 32, 0, 5, 3}, { 32, 0, 5, 5},
+ { 32, 0, 5, 6}, { 32, 0, 5, 8},
+ { 32, 0, 5, 9}, { 32, 0, 5, 11},
+ { 32, 0, 5, 12}, { 0, 0, 6, 15},
+ { 32, 1, 5, 18}, { 32, 1, 5, 20},
+ { 32, 2, 5, 24}, { 32, 2, 5, 28},
+ { 32, 3, 5, 40}, { 32, 4, 5, 48},
+ { 0, 16, 6,65536}, { 0, 15, 6,32768},
+ { 0, 14, 6,16384}, { 0, 13, 6, 8192},
+}; /* LL_defaultDTable */
+
+/* Default FSE distribution table for Offset Codes */
+static const ZSTD_seqSymbol OF_defaultDTable[(1<<OF_DEFAULTNORMLOG)+1] = {
+ { 1, 1, 1, OF_DEFAULTNORMLOG}, /* header : fastMode, tableLog */
+ /* nextState, nbAddBits, nbBits, baseVal */
+ { 0, 0, 5, 0}, { 0, 6, 4, 61},
+ { 0, 9, 5, 509}, { 0, 15, 5,32765},
+ { 0, 21, 5,2097149}, { 0, 3, 5, 5},
+ { 0, 7, 4, 125}, { 0, 12, 5, 4093},
+ { 0, 18, 5,262141}, { 0, 23, 5,8388605},
+ { 0, 5, 5, 29}, { 0, 8, 4, 253},
+ { 0, 14, 5,16381}, { 0, 20, 5,1048573},
+ { 0, 2, 5, 1}, { 16, 7, 4, 125},
+ { 0, 11, 5, 2045}, { 0, 17, 5,131069},
+ { 0, 22, 5,4194301}, { 0, 4, 5, 13},
+ { 16, 8, 4, 253}, { 0, 13, 5, 8189},
+ { 0, 19, 5,524285}, { 0, 1, 5, 1},
+ { 16, 6, 4, 61}, { 0, 10, 5, 1021},
+ { 0, 16, 5,65533}, { 0, 28, 5,268435453},
+ { 0, 27, 5,134217725}, { 0, 26, 5,67108861},
+ { 0, 25, 5,33554429}, { 0, 24, 5,16777213},
+}; /* OF_defaultDTable */
+
+
+/* Default FSE distribution table for Match Lengths */
+static const ZSTD_seqSymbol ML_defaultDTable[(1<<ML_DEFAULTNORMLOG)+1] = {
+ { 1, 1, 1, ML_DEFAULTNORMLOG}, /* header : fastMode, tableLog */
+ /* nextState, nbAddBits, nbBits, baseVal */
+ { 0, 0, 6, 3}, { 0, 0, 4, 4},
+ { 32, 0, 5, 5}, { 0, 0, 5, 6},
+ { 0, 0, 5, 8}, { 0, 0, 5, 9},
+ { 0, 0, 5, 11}, { 0, 0, 6, 13},
+ { 0, 0, 6, 16}, { 0, 0, 6, 19},
+ { 0, 0, 6, 22}, { 0, 0, 6, 25},
+ { 0, 0, 6, 28}, { 0, 0, 6, 31},
+ { 0, 0, 6, 34}, { 0, 1, 6, 37},
+ { 0, 1, 6, 41}, { 0, 2, 6, 47},
+ { 0, 3, 6, 59}, { 0, 4, 6, 83},
+ { 0, 7, 6, 131}, { 0, 9, 6, 515},
+ { 16, 0, 4, 4}, { 0, 0, 4, 5},
+ { 32, 0, 5, 6}, { 0, 0, 5, 7},
+ { 32, 0, 5, 9}, { 0, 0, 5, 10},
+ { 0, 0, 6, 12}, { 0, 0, 6, 15},
+ { 0, 0, 6, 18}, { 0, 0, 6, 21},
+ { 0, 0, 6, 24}, { 0, 0, 6, 27},
+ { 0, 0, 6, 30}, { 0, 0, 6, 33},
+ { 0, 1, 6, 35}, { 0, 1, 6, 39},
+ { 0, 2, 6, 43}, { 0, 3, 6, 51},
+ { 0, 4, 6, 67}, { 0, 5, 6, 99},
+ { 0, 8, 6, 259}, { 32, 0, 4, 4},
+ { 48, 0, 4, 4}, { 16, 0, 4, 5},
+ { 32, 0, 5, 7}, { 32, 0, 5, 8},
+ { 32, 0, 5, 10}, { 32, 0, 5, 11},
+ { 0, 0, 6, 14}, { 0, 0, 6, 17},
+ { 0, 0, 6, 20}, { 0, 0, 6, 23},
+ { 0, 0, 6, 26}, { 0, 0, 6, 29},
+ { 0, 0, 6, 32}, { 0, 16, 6,65539},
+ { 0, 15, 6,32771}, { 0, 14, 6,16387},
+ { 0, 13, 6, 8195}, { 0, 12, 6, 4099},
+ { 0, 11, 6, 2051}, { 0, 10, 6, 1027},
+}; /* ML_defaultDTable */
+
+
+static void ZSTD_buildSeqTable_rle(ZSTD_seqSymbol* dt, U32 baseValue, U8 nbAddBits)
+{
+ void* ptr = dt;
+ ZSTD_seqSymbol_header* const DTableH = (ZSTD_seqSymbol_header*)ptr;
+ ZSTD_seqSymbol* const cell = dt + 1;
+
+ DTableH->tableLog = 0;
+ DTableH->fastMode = 0;
+
+ cell->nbBits = 0;
+ cell->nextState = 0;
+ assert(nbAddBits < 255);
+ cell->nbAdditionalBits = nbAddBits;
+ cell->baseValue = baseValue;
+}
+
+
+/* ZSTD_buildFSETable() :
+ * generate FSE decoding table for one symbol (ll, ml or off)
+ * cannot fail if input is valid =>
+ * all inputs are presumed validated at this stage */
+FORCE_INLINE_TEMPLATE
+void ZSTD_buildFSETable_body(ZSTD_seqSymbol* dt,
+ const short* normalizedCounter, unsigned maxSymbolValue,
+ const U32* baseValue, const U8* nbAdditionalBits,
+ unsigned tableLog, void* wksp, size_t wkspSize)
+{
+ ZSTD_seqSymbol* const tableDecode = dt+1;
+ U32 const maxSV1 = maxSymbolValue + 1;
+ U32 const tableSize = 1 << tableLog;
+
+ U16* symbolNext = (U16*)wksp;
+ BYTE* spread = (BYTE*)(symbolNext + MaxSeq + 1);
+ U32 highThreshold = tableSize - 1;
+
+
+ /* Sanity Checks */
+ assert(maxSymbolValue <= MaxSeq);
+ assert(tableLog <= MaxFSELog);
+ assert(wkspSize >= ZSTD_BUILD_FSE_TABLE_WKSP_SIZE);
+ (void)wkspSize;
+ /* Init, lay down lowprob symbols */
+ { ZSTD_seqSymbol_header DTableH;
+ DTableH.tableLog = tableLog;
+ DTableH.fastMode = 1;
+ { S16 const largeLimit= (S16)(1 << (tableLog-1));
+ U32 s;
+ for (s=0; s<maxSV1; s++) {
+ if (normalizedCounter[s]==-1) {
+ tableDecode[highThreshold--].baseValue = s;
+ symbolNext[s] = 1;
+ } else {
+ if (normalizedCounter[s] >= largeLimit) DTableH.fastMode=0;
+ assert(normalizedCounter[s]>=0);
+ symbolNext[s] = (U16)normalizedCounter[s];
+ } } }
+ ZSTD_memcpy(dt, &DTableH, sizeof(DTableH));
+ }
+
+ /* Spread symbols */
+ assert(tableSize <= 512);
+ /* Specialized symbol spreading for the case when there are
+ * no low probability (-1 count) symbols. When compressing
+ * small blocks we avoid low probability symbols to hit this
+ * case, since header decoding speed matters more.
+ */
+ if (highThreshold == tableSize - 1) {
+ size_t const tableMask = tableSize-1;
+ size_t const step = FSE_TABLESTEP(tableSize);
+ /* First lay down the symbols in order.
+ * We use a uint64_t to lay down 8 bytes at a time. This reduces branch
+ * misses since small blocks generally have small table logs, so nearly
+ * all symbols have counts <= 8. We ensure we have 8 bytes at the end of
+ * our buffer to handle the over-write.
+ */
+ {
+ U64 const add = 0x0101010101010101ull;
+ size_t pos = 0;
+ U64 sv = 0;
+ U32 s;
+ for (s=0; s<maxSV1; ++s, sv += add) {
+ int i;
+ int const n = normalizedCounter[s];
+ MEM_write64(spread + pos, sv);
+ for (i = 8; i < n; i += 8) {
+ MEM_write64(spread + pos + i, sv);
+ }
+ pos += n;
+ }
+ }
+ /* Now we spread those positions across the table.
+ * The benefit of doing it in two stages is that we avoid the the
+ * variable size inner loop, which caused lots of branch misses.
+ * Now we can run through all the positions without any branch misses.
+ * We unroll the loop twice, since that is what emperically worked best.
+ */
+ {
+ size_t position = 0;
+ size_t s;
+ size_t const unroll = 2;
+ assert(tableSize % unroll == 0); /* FSE_MIN_TABLELOG is 5 */
+ for (s = 0; s < (size_t)tableSize; s += unroll) {
+ size_t u;
+ for (u = 0; u < unroll; ++u) {
+ size_t const uPosition = (position + (u * step)) & tableMask;
+ tableDecode[uPosition].baseValue = spread[s + u];
+ }
+ position = (position + (unroll * step)) & tableMask;
+ }
+ assert(position == 0);
+ }
+ } else {
+ U32 const tableMask = tableSize-1;
+ U32 const step = FSE_TABLESTEP(tableSize);
+ U32 s, position = 0;
+ for (s=0; s<maxSV1; s++) {
+ int i;
+ int const n = normalizedCounter[s];
+ for (i=0; i<n; i++) {
+ tableDecode[position].baseValue = s;
+ position = (position + step) & tableMask;
+ while (position > highThreshold) position = (position + step) & tableMask; /* lowprob area */
+ } }
+ assert(position == 0); /* position must reach all cells once, otherwise normalizedCounter is incorrect */
+ }
+
+ /* Build Decoding table */
+ {
+ U32 u;
+ for (u=0; u<tableSize; u++) {
+ U32 const symbol = tableDecode[u].baseValue;
+ U32 const nextState = symbolNext[symbol]++;
+ tableDecode[u].nbBits = (BYTE) (tableLog - BIT_highbit32(nextState) );
+ tableDecode[u].nextState = (U16) ( (nextState << tableDecode[u].nbBits) - tableSize);
+ assert(nbAdditionalBits[symbol] < 255);
+ tableDecode[u].nbAdditionalBits = nbAdditionalBits[symbol];
+ tableDecode[u].baseValue = baseValue[symbol];
+ }
+ }
+}
+
+/* Avoids the FORCE_INLINE of the _body() function. */
+static void ZSTD_buildFSETable_body_default(ZSTD_seqSymbol* dt,
+ const short* normalizedCounter, unsigned maxSymbolValue,
+ const U32* baseValue, const U8* nbAdditionalBits,
+ unsigned tableLog, void* wksp, size_t wkspSize)
+{
+ ZSTD_buildFSETable_body(dt, normalizedCounter, maxSymbolValue,
+ baseValue, nbAdditionalBits, tableLog, wksp, wkspSize);
+}
+
+#if DYNAMIC_BMI2
+BMI2_TARGET_ATTRIBUTE static void ZSTD_buildFSETable_body_bmi2(ZSTD_seqSymbol* dt,
+ const short* normalizedCounter, unsigned maxSymbolValue,
+ const U32* baseValue, const U8* nbAdditionalBits,
+ unsigned tableLog, void* wksp, size_t wkspSize)
+{
+ ZSTD_buildFSETable_body(dt, normalizedCounter, maxSymbolValue,
+ baseValue, nbAdditionalBits, tableLog, wksp, wkspSize);
+}
+#endif
+
+void ZSTD_buildFSETable(ZSTD_seqSymbol* dt,
+ const short* normalizedCounter, unsigned maxSymbolValue,
+ const U32* baseValue, const U8* nbAdditionalBits,
+ unsigned tableLog, void* wksp, size_t wkspSize, int bmi2)
+{
+#if DYNAMIC_BMI2
+ if (bmi2) {
+ ZSTD_buildFSETable_body_bmi2(dt, normalizedCounter, maxSymbolValue,
+ baseValue, nbAdditionalBits, tableLog, wksp, wkspSize);
+ return;
+ }
+#endif
+ (void)bmi2;
+ ZSTD_buildFSETable_body_default(dt, normalizedCounter, maxSymbolValue,
+ baseValue, nbAdditionalBits, tableLog, wksp, wkspSize);
+}
+
+
+/*! ZSTD_buildSeqTable() :
+ * @return : nb bytes read from src,
+ * or an error code if it fails */
+static size_t ZSTD_buildSeqTable(ZSTD_seqSymbol* DTableSpace, const ZSTD_seqSymbol** DTablePtr,
+ symbolEncodingType_e type, unsigned max, U32 maxLog,
+ const void* src, size_t srcSize,
+ const U32* baseValue, const U8* nbAdditionalBits,
+ const ZSTD_seqSymbol* defaultTable, U32 flagRepeatTable,
+ int ddictIsCold, int nbSeq, U32* wksp, size_t wkspSize,
+ int bmi2)
+{
+ switch(type)
+ {
+ case set_rle :
+ RETURN_ERROR_IF(!srcSize, srcSize_wrong, "");
+ RETURN_ERROR_IF((*(const BYTE*)src) > max, corruption_detected, "");
+ { U32 const symbol = *(const BYTE*)src;
+ U32 const baseline = baseValue[symbol];
+ U8 const nbBits = nbAdditionalBits[symbol];
+ ZSTD_buildSeqTable_rle(DTableSpace, baseline, nbBits);
+ }
+ *DTablePtr = DTableSpace;
+ return 1;
+ case set_basic :
+ *DTablePtr = defaultTable;
+ return 0;
+ case set_repeat:
+ RETURN_ERROR_IF(!flagRepeatTable, corruption_detected, "");
+ /* prefetch FSE table if used */
+ if (ddictIsCold && (nbSeq > 24 /* heuristic */)) {
+ const void* const pStart = *DTablePtr;
+ size_t const pSize = sizeof(ZSTD_seqSymbol) * (SEQSYMBOL_TABLE_SIZE(maxLog));
+ PREFETCH_AREA(pStart, pSize);
+ }
+ return 0;
+ case set_compressed :
+ { unsigned tableLog;
+ S16 norm[MaxSeq+1];
+ size_t const headerSize = FSE_readNCount(norm, &max, &tableLog, src, srcSize);
+ RETURN_ERROR_IF(FSE_isError(headerSize), corruption_detected, "");
+ RETURN_ERROR_IF(tableLog > maxLog, corruption_detected, "");
+ ZSTD_buildFSETable(DTableSpace, norm, max, baseValue, nbAdditionalBits, tableLog, wksp, wkspSize, bmi2);
+ *DTablePtr = DTableSpace;
+ return headerSize;
+ }
+ default :
+ assert(0);
+ RETURN_ERROR(GENERIC, "impossible");
+ }
+}
+
+size_t ZSTD_decodeSeqHeaders(ZSTD_DCtx* dctx, int* nbSeqPtr,
+ const void* src, size_t srcSize)
+{
+ const BYTE* const istart = (const BYTE*)src;
+ const BYTE* const iend = istart + srcSize;
+ const BYTE* ip = istart;
+ int nbSeq;
+ DEBUGLOG(5, "ZSTD_decodeSeqHeaders");
+
+ /* check */
+ RETURN_ERROR_IF(srcSize < MIN_SEQUENCES_SIZE, srcSize_wrong, "");
+
+ /* SeqHead */
+ nbSeq = *ip++;
+ if (!nbSeq) {
+ *nbSeqPtr=0;
+ RETURN_ERROR_IF(srcSize != 1, srcSize_wrong, "");
+ return 1;
+ }
+ if (nbSeq > 0x7F) {
+ if (nbSeq == 0xFF) {
+ RETURN_ERROR_IF(ip+2 > iend, srcSize_wrong, "");
+ nbSeq = MEM_readLE16(ip) + LONGNBSEQ;
+ ip+=2;
+ } else {
+ RETURN_ERROR_IF(ip >= iend, srcSize_wrong, "");
+ nbSeq = ((nbSeq-0x80)<<8) + *ip++;
+ }
+ }
+ *nbSeqPtr = nbSeq;
+
+ /* FSE table descriptors */
+ RETURN_ERROR_IF(ip+1 > iend, srcSize_wrong, ""); /* minimum possible size: 1 byte for symbol encoding types */
+ { symbolEncodingType_e const LLtype = (symbolEncodingType_e)(*ip >> 6);
+ symbolEncodingType_e const OFtype = (symbolEncodingType_e)((*ip >> 4) & 3);
+ symbolEncodingType_e const MLtype = (symbolEncodingType_e)((*ip >> 2) & 3);
+ ip++;
+
+ /* Build DTables */
+ { size_t const llhSize = ZSTD_buildSeqTable(dctx->entropy.LLTable, &dctx->LLTptr,
+ LLtype, MaxLL, LLFSELog,
+ ip, iend-ip,
+ LL_base, LL_bits,
+ LL_defaultDTable, dctx->fseEntropy,
+ dctx->ddictIsCold, nbSeq,
+ dctx->workspace, sizeof(dctx->workspace),
+ ZSTD_DCtx_get_bmi2(dctx));
+ RETURN_ERROR_IF(ZSTD_isError(llhSize), corruption_detected, "ZSTD_buildSeqTable failed");
+ ip += llhSize;
+ }
+
+ { size_t const ofhSize = ZSTD_buildSeqTable(dctx->entropy.OFTable, &dctx->OFTptr,
+ OFtype, MaxOff, OffFSELog,
+ ip, iend-ip,
+ OF_base, OF_bits,
+ OF_defaultDTable, dctx->fseEntropy,
+ dctx->ddictIsCold, nbSeq,
+ dctx->workspace, sizeof(dctx->workspace),
+ ZSTD_DCtx_get_bmi2(dctx));
+ RETURN_ERROR_IF(ZSTD_isError(ofhSize), corruption_detected, "ZSTD_buildSeqTable failed");
+ ip += ofhSize;
+ }
+
+ { size_t const mlhSize = ZSTD_buildSeqTable(dctx->entropy.MLTable, &dctx->MLTptr,
+ MLtype, MaxML, MLFSELog,
+ ip, iend-ip,
+ ML_base, ML_bits,
+ ML_defaultDTable, dctx->fseEntropy,
+ dctx->ddictIsCold, nbSeq,
+ dctx->workspace, sizeof(dctx->workspace),
+ ZSTD_DCtx_get_bmi2(dctx));
+ RETURN_ERROR_IF(ZSTD_isError(mlhSize), corruption_detected, "ZSTD_buildSeqTable failed");
+ ip += mlhSize;
+ }
+ }
+
+ return ip-istart;
+}
+
+
+typedef struct {
+ size_t litLength;
+ size_t matchLength;
+ size_t offset;
+} seq_t;
+
+typedef struct {
+ size_t state;
+ const ZSTD_seqSymbol* table;
+} ZSTD_fseState;
+
+typedef struct {
+ BIT_DStream_t DStream;
+ ZSTD_fseState stateLL;
+ ZSTD_fseState stateOffb;
+ ZSTD_fseState stateML;
+ size_t prevOffset[ZSTD_REP_NUM];
+} seqState_t;
+
+/*! ZSTD_overlapCopy8() :
+ * Copies 8 bytes from ip to op and updates op and ip where ip <= op.
+ * If the offset is < 8 then the offset is spread to at least 8 bytes.
+ *
+ * Precondition: *ip <= *op
+ * Postcondition: *op - *op >= 8
+ */
+HINT_INLINE void ZSTD_overlapCopy8(BYTE** op, BYTE const** ip, size_t offset) {
+ assert(*ip <= *op);
+ if (offset < 8) {
+ /* close range match, overlap */
+ static const U32 dec32table[] = { 0, 1, 2, 1, 4, 4, 4, 4 }; /* added */
+ static const int dec64table[] = { 8, 8, 8, 7, 8, 9,10,11 }; /* subtracted */
+ int const sub2 = dec64table[offset];
+ (*op)[0] = (*ip)[0];
+ (*op)[1] = (*ip)[1];
+ (*op)[2] = (*ip)[2];
+ (*op)[3] = (*ip)[3];
+ *ip += dec32table[offset];
+ ZSTD_copy4(*op+4, *ip);
+ *ip -= sub2;
+ } else {
+ ZSTD_copy8(*op, *ip);
+ }
+ *ip += 8;
+ *op += 8;
+ assert(*op - *ip >= 8);
+}
+
+/*! ZSTD_safecopy() :
+ * Specialized version of memcpy() that is allowed to READ up to WILDCOPY_OVERLENGTH past the input buffer
+ * and write up to 16 bytes past oend_w (op >= oend_w is allowed).
+ * This function is only called in the uncommon case where the sequence is near the end of the block. It
+ * should be fast for a single long sequence, but can be slow for several short sequences.
+ *
+ * @param ovtype controls the overlap detection
+ * - ZSTD_no_overlap: The source and destination are guaranteed to be at least WILDCOPY_VECLEN bytes apart.
+ * - ZSTD_overlap_src_before_dst: The src and dst may overlap and may be any distance apart.
+ * The src buffer must be before the dst buffer.
+ */
+static void ZSTD_safecopy(BYTE* op, const BYTE* const oend_w, BYTE const* ip, ptrdiff_t length, ZSTD_overlap_e ovtype) {
+ ptrdiff_t const diff = op - ip;
+ BYTE* const oend = op + length;
+
+ assert((ovtype == ZSTD_no_overlap && (diff <= -8 || diff >= 8 || op >= oend_w)) ||
+ (ovtype == ZSTD_overlap_src_before_dst && diff >= 0));
+
+ if (length < 8) {
+ /* Handle short lengths. */
+ while (op < oend) *op++ = *ip++;
+ return;
+ }
+ if (ovtype == ZSTD_overlap_src_before_dst) {
+ /* Copy 8 bytes and ensure the offset >= 8 when there can be overlap. */
+ assert(length >= 8);
+ ZSTD_overlapCopy8(&op, &ip, diff);
+ length -= 8;
+ assert(op - ip >= 8);
+ assert(op <= oend);
+ }
+
+ if (oend <= oend_w) {
+ /* No risk of overwrite. */
+ ZSTD_wildcopy(op, ip, length, ovtype);
+ return;
+ }
+ if (op <= oend_w) {
+ /* Wildcopy until we get close to the end. */
+ assert(oend > oend_w);
+ ZSTD_wildcopy(op, ip, oend_w - op, ovtype);
+ ip += oend_w - op;
+ op += oend_w - op;
+ }
+ /* Handle the leftovers. */
+ while (op < oend) *op++ = *ip++;
+}
+
+/* ZSTD_safecopyDstBeforeSrc():
+ * This version allows overlap with dst before src, or handles the non-overlap case with dst after src
+ * Kept separate from more common ZSTD_safecopy case to avoid performance impact to the safecopy common case */
+static void ZSTD_safecopyDstBeforeSrc(BYTE* op, BYTE const* ip, ptrdiff_t length) {
+ ptrdiff_t const diff = op - ip;
+ BYTE* const oend = op + length;
+
+ if (length < 8 || diff > -8) {
+ /* Handle short lengths, close overlaps, and dst not before src. */
+ while (op < oend) *op++ = *ip++;
+ return;
+ }
+
+ if (op <= oend - WILDCOPY_OVERLENGTH && diff < -WILDCOPY_VECLEN) {
+ ZSTD_wildcopy(op, ip, oend - WILDCOPY_OVERLENGTH - op, ZSTD_no_overlap);
+ ip += oend - WILDCOPY_OVERLENGTH - op;
+ op += oend - WILDCOPY_OVERLENGTH - op;
+ }
+
+ /* Handle the leftovers. */
+ while (op < oend) *op++ = *ip++;
+}
+
+/* ZSTD_execSequenceEnd():
+ * This version handles cases that are near the end of the output buffer. It requires
+ * more careful checks to make sure there is no overflow. By separating out these hard
+ * and unlikely cases, we can speed up the common cases.
+ *
+ * NOTE: This function needs to be fast for a single long sequence, but doesn't need
+ * to be optimized for many small sequences, since those fall into ZSTD_execSequence().
+ */
+FORCE_NOINLINE
+size_t ZSTD_execSequenceEnd(BYTE* op,
+ BYTE* const oend, seq_t sequence,
+ const BYTE** litPtr, const BYTE* const litLimit,
+ const BYTE* const prefixStart, const BYTE* const virtualStart, const BYTE* const dictEnd)
+{
+ BYTE* const oLitEnd = op + sequence.litLength;
+ size_t const sequenceLength = sequence.litLength + sequence.matchLength;
+ const BYTE* const iLitEnd = *litPtr + sequence.litLength;
+ const BYTE* match = oLitEnd - sequence.offset;
+ BYTE* const oend_w = oend - WILDCOPY_OVERLENGTH;
+
+ /* bounds checks : careful of address space overflow in 32-bit mode */
+ RETURN_ERROR_IF(sequenceLength > (size_t)(oend - op), dstSize_tooSmall, "last match must fit within dstBuffer");
+ RETURN_ERROR_IF(sequence.litLength > (size_t)(litLimit - *litPtr), corruption_detected, "try to read beyond literal buffer");
+ assert(op < op + sequenceLength);
+ assert(oLitEnd < op + sequenceLength);
+
+ /* copy literals */
+ ZSTD_safecopy(op, oend_w, *litPtr, sequence.litLength, ZSTD_no_overlap);
+ op = oLitEnd;
+ *litPtr = iLitEnd;
+
+ /* copy Match */
+ if (sequence.offset > (size_t)(oLitEnd - prefixStart)) {
+ /* offset beyond prefix */
+ RETURN_ERROR_IF(sequence.offset > (size_t)(oLitEnd - virtualStart), corruption_detected, "");
+ match = dictEnd - (prefixStart - match);
+ if (match + sequence.matchLength <= dictEnd) {
+ ZSTD_memmove(oLitEnd, match, sequence.matchLength);
+ return sequenceLength;
+ }
+ /* span extDict & currentPrefixSegment */
+ { size_t const length1 = dictEnd - match;
+ ZSTD_memmove(oLitEnd, match, length1);
+ op = oLitEnd + length1;
+ sequence.matchLength -= length1;
+ match = prefixStart;
+ }
+ }
+ ZSTD_safecopy(op, oend_w, match, sequence.matchLength, ZSTD_overlap_src_before_dst);
+ return sequenceLength;
+}
+
+/* ZSTD_execSequenceEndSplitLitBuffer():
+ * This version is intended to be used during instances where the litBuffer is still split. It is kept separate to avoid performance impact for the good case.
+ */
+FORCE_NOINLINE
+size_t ZSTD_execSequenceEndSplitLitBuffer(BYTE* op,
+ BYTE* const oend, const BYTE* const oend_w, seq_t sequence,
+ const BYTE** litPtr, const BYTE* const litLimit,
+ const BYTE* const prefixStart, const BYTE* const virtualStart, const BYTE* const dictEnd)
+{
+ BYTE* const oLitEnd = op + sequence.litLength;
+ size_t const sequenceLength = sequence.litLength + sequence.matchLength;
+ const BYTE* const iLitEnd = *litPtr + sequence.litLength;
+ const BYTE* match = oLitEnd - sequence.offset;
+
+
+ /* bounds checks : careful of address space overflow in 32-bit mode */
+ RETURN_ERROR_IF(sequenceLength > (size_t)(oend - op), dstSize_tooSmall, "last match must fit within dstBuffer");
+ RETURN_ERROR_IF(sequence.litLength > (size_t)(litLimit - *litPtr), corruption_detected, "try to read beyond literal buffer");
+ assert(op < op + sequenceLength);
+ assert(oLitEnd < op + sequenceLength);
+
+ /* copy literals */
+ RETURN_ERROR_IF(op > *litPtr && op < *litPtr + sequence.litLength, dstSize_tooSmall, "output should not catch up to and overwrite literal buffer");
+ ZSTD_safecopyDstBeforeSrc(op, *litPtr, sequence.litLength);
+ op = oLitEnd;
+ *litPtr = iLitEnd;
+
+ /* copy Match */
+ if (sequence.offset > (size_t)(oLitEnd - prefixStart)) {
+ /* offset beyond prefix */
+ RETURN_ERROR_IF(sequence.offset > (size_t)(oLitEnd - virtualStart), corruption_detected, "");
+ match = dictEnd - (prefixStart - match);
+ if (match + sequence.matchLength <= dictEnd) {
+ ZSTD_memmove(oLitEnd, match, sequence.matchLength);
+ return sequenceLength;
+ }
+ /* span extDict & currentPrefixSegment */
+ { size_t const length1 = dictEnd - match;
+ ZSTD_memmove(oLitEnd, match, length1);
+ op = oLitEnd + length1;
+ sequence.matchLength -= length1;
+ match = prefixStart;
+ }
+ }
+ ZSTD_safecopy(op, oend_w, match, sequence.matchLength, ZSTD_overlap_src_before_dst);
+ return sequenceLength;
+}
+
+HINT_INLINE
+size_t ZSTD_execSequence(BYTE* op,
+ BYTE* const oend, seq_t sequence,
+ const BYTE** litPtr, const BYTE* const litLimit,
+ const BYTE* const prefixStart, const BYTE* const virtualStart, const BYTE* const dictEnd)
+{
+ BYTE* const oLitEnd = op + sequence.litLength;
+ size_t const sequenceLength = sequence.litLength + sequence.matchLength;
+ BYTE* const oMatchEnd = op + sequenceLength; /* risk : address space overflow (32-bits) */
+ BYTE* const oend_w = oend - WILDCOPY_OVERLENGTH; /* risk : address space underflow on oend=NULL */
+ const BYTE* const iLitEnd = *litPtr + sequence.litLength;
+ const BYTE* match = oLitEnd - sequence.offset;
+
+ assert(op != NULL /* Precondition */);
+ assert(oend_w < oend /* No underflow */);
+ /* Handle edge cases in a slow path:
+ * - Read beyond end of literals
+ * - Match end is within WILDCOPY_OVERLIMIT of oend
+ * - 32-bit mode and the match length overflows
+ */
+ if (UNLIKELY(
+ iLitEnd > litLimit ||
+ oMatchEnd > oend_w ||
+ (MEM_32bits() && (size_t)(oend - op) < sequenceLength + WILDCOPY_OVERLENGTH)))
+ return ZSTD_execSequenceEnd(op, oend, sequence, litPtr, litLimit, prefixStart, virtualStart, dictEnd);
+
+ /* Assumptions (everything else goes into ZSTD_execSequenceEnd()) */
+ assert(op <= oLitEnd /* No overflow */);
+ assert(oLitEnd < oMatchEnd /* Non-zero match & no overflow */);
+ assert(oMatchEnd <= oend /* No underflow */);
+ assert(iLitEnd <= litLimit /* Literal length is in bounds */);
+ assert(oLitEnd <= oend_w /* Can wildcopy literals */);
+ assert(oMatchEnd <= oend_w /* Can wildcopy matches */);
+
+ /* Copy Literals:
+ * Split out litLength <= 16 since it is nearly always true. +1.6% on gcc-9.
+ * We likely don't need the full 32-byte wildcopy.
+ */
+ assert(WILDCOPY_OVERLENGTH >= 16);
+ ZSTD_copy16(op, (*litPtr));
+ if (UNLIKELY(sequence.litLength > 16)) {
+ ZSTD_wildcopy(op + 16, (*litPtr) + 16, sequence.litLength - 16, ZSTD_no_overlap);
+ }
+ op = oLitEnd;
+ *litPtr = iLitEnd; /* update for next sequence */
+
+ /* Copy Match */
+ if (sequence.offset > (size_t)(oLitEnd - prefixStart)) {
+ /* offset beyond prefix -> go into extDict */
+ RETURN_ERROR_IF(UNLIKELY(sequence.offset > (size_t)(oLitEnd - virtualStart)), corruption_detected, "");
+ match = dictEnd + (match - prefixStart);
+ if (match + sequence.matchLength <= dictEnd) {
+ ZSTD_memmove(oLitEnd, match, sequence.matchLength);
+ return sequenceLength;
+ }
+ /* span extDict & currentPrefixSegment */
+ { size_t const length1 = dictEnd - match;
+ ZSTD_memmove(oLitEnd, match, length1);
+ op = oLitEnd + length1;
+ sequence.matchLength -= length1;
+ match = prefixStart;
+ }
+ }
+ /* Match within prefix of 1 or more bytes */
+ assert(op <= oMatchEnd);
+ assert(oMatchEnd <= oend_w);
+ assert(match >= prefixStart);
+ assert(sequence.matchLength >= 1);
+
+ /* Nearly all offsets are >= WILDCOPY_VECLEN bytes, which means we can use wildcopy
+ * without overlap checking.
+ */
+ if (LIKELY(sequence.offset >= WILDCOPY_VECLEN)) {
+ /* We bet on a full wildcopy for matches, since we expect matches to be
+ * longer than literals (in general). In silesia, ~10% of matches are longer
+ * than 16 bytes.
+ */
+ ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength, ZSTD_no_overlap);
+ return sequenceLength;
+ }
+ assert(sequence.offset < WILDCOPY_VECLEN);
+
+ /* Copy 8 bytes and spread the offset to be >= 8. */
+ ZSTD_overlapCopy8(&op, &match, sequence.offset);
+
+ /* If the match length is > 8 bytes, then continue with the wildcopy. */
+ if (sequence.matchLength > 8) {
+ assert(op < oMatchEnd);
+ ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength - 8, ZSTD_overlap_src_before_dst);
+ }
+ return sequenceLength;
+}
+
+HINT_INLINE
+size_t ZSTD_execSequenceSplitLitBuffer(BYTE* op,
+ BYTE* const oend, const BYTE* const oend_w, seq_t sequence,
+ const BYTE** litPtr, const BYTE* const litLimit,
+ const BYTE* const prefixStart, const BYTE* const virtualStart, const BYTE* const dictEnd)
+{
+ BYTE* const oLitEnd = op + sequence.litLength;
+ size_t const sequenceLength = sequence.litLength + sequence.matchLength;
+ BYTE* const oMatchEnd = op + sequenceLength; /* risk : address space overflow (32-bits) */
+ const BYTE* const iLitEnd = *litPtr + sequence.litLength;
+ const BYTE* match = oLitEnd - sequence.offset;
+
+ assert(op != NULL /* Precondition */);
+ assert(oend_w < oend /* No underflow */);
+ /* Handle edge cases in a slow path:
+ * - Read beyond end of literals
+ * - Match end is within WILDCOPY_OVERLIMIT of oend
+ * - 32-bit mode and the match length overflows
+ */
+ if (UNLIKELY(
+ iLitEnd > litLimit ||
+ oMatchEnd > oend_w ||
+ (MEM_32bits() && (size_t)(oend - op) < sequenceLength + WILDCOPY_OVERLENGTH)))
+ return ZSTD_execSequenceEndSplitLitBuffer(op, oend, oend_w, sequence, litPtr, litLimit, prefixStart, virtualStart, dictEnd);
+
+ /* Assumptions (everything else goes into ZSTD_execSequenceEnd()) */
+ assert(op <= oLitEnd /* No overflow */);
+ assert(oLitEnd < oMatchEnd /* Non-zero match & no overflow */);
+ assert(oMatchEnd <= oend /* No underflow */);
+ assert(iLitEnd <= litLimit /* Literal length is in bounds */);
+ assert(oLitEnd <= oend_w /* Can wildcopy literals */);
+ assert(oMatchEnd <= oend_w /* Can wildcopy matches */);
+
+ /* Copy Literals:
+ * Split out litLength <= 16 since it is nearly always true. +1.6% on gcc-9.
+ * We likely don't need the full 32-byte wildcopy.
+ */
+ assert(WILDCOPY_OVERLENGTH >= 16);
+ ZSTD_copy16(op, (*litPtr));
+ if (UNLIKELY(sequence.litLength > 16)) {
+ ZSTD_wildcopy(op+16, (*litPtr)+16, sequence.litLength-16, ZSTD_no_overlap);
+ }
+ op = oLitEnd;
+ *litPtr = iLitEnd; /* update for next sequence */
+
+ /* Copy Match */
+ if (sequence.offset > (size_t)(oLitEnd - prefixStart)) {
+ /* offset beyond prefix -> go into extDict */
+ RETURN_ERROR_IF(UNLIKELY(sequence.offset > (size_t)(oLitEnd - virtualStart)), corruption_detected, "");
+ match = dictEnd + (match - prefixStart);
+ if (match + sequence.matchLength <= dictEnd) {
+ ZSTD_memmove(oLitEnd, match, sequence.matchLength);
+ return sequenceLength;
+ }
+ /* span extDict & currentPrefixSegment */
+ { size_t const length1 = dictEnd - match;
+ ZSTD_memmove(oLitEnd, match, length1);
+ op = oLitEnd + length1;
+ sequence.matchLength -= length1;
+ match = prefixStart;
+ } }
+ /* Match within prefix of 1 or more bytes */
+ assert(op <= oMatchEnd);
+ assert(oMatchEnd <= oend_w);
+ assert(match >= prefixStart);
+ assert(sequence.matchLength >= 1);
+
+ /* Nearly all offsets are >= WILDCOPY_VECLEN bytes, which means we can use wildcopy
+ * without overlap checking.
+ */
+ if (LIKELY(sequence.offset >= WILDCOPY_VECLEN)) {
+ /* We bet on a full wildcopy for matches, since we expect matches to be
+ * longer than literals (in general). In silesia, ~10% of matches are longer
+ * than 16 bytes.
+ */
+ ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength, ZSTD_no_overlap);
+ return sequenceLength;
+ }
+ assert(sequence.offset < WILDCOPY_VECLEN);
+
+ /* Copy 8 bytes and spread the offset to be >= 8. */
+ ZSTD_overlapCopy8(&op, &match, sequence.offset);
+
+ /* If the match length is > 8 bytes, then continue with the wildcopy. */
+ if (sequence.matchLength > 8) {
+ assert(op < oMatchEnd);
+ ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength-8, ZSTD_overlap_src_before_dst);
+ }
+ return sequenceLength;
+}
+
+
+static void
+ZSTD_initFseState(ZSTD_fseState* DStatePtr, BIT_DStream_t* bitD, const ZSTD_seqSymbol* dt)
+{
+ const void* ptr = dt;
+ const ZSTD_seqSymbol_header* const DTableH = (const ZSTD_seqSymbol_header*)ptr;
+ DStatePtr->state = BIT_readBits(bitD, DTableH->tableLog);
+ DEBUGLOG(6, "ZSTD_initFseState : val=%u using %u bits",
+ (U32)DStatePtr->state, DTableH->tableLog);
+ BIT_reloadDStream(bitD);
+ DStatePtr->table = dt + 1;
+}
+
+FORCE_INLINE_TEMPLATE void
+ZSTD_updateFseStateWithDInfo(ZSTD_fseState* DStatePtr, BIT_DStream_t* bitD, U16 nextState, U32 nbBits)
+{
+ size_t const lowBits = BIT_readBits(bitD, nbBits);
+ DStatePtr->state = nextState + lowBits;
+}
+
+/* We need to add at most (ZSTD_WINDOWLOG_MAX_32 - 1) bits to read the maximum
+ * offset bits. But we can only read at most (STREAM_ACCUMULATOR_MIN_32 - 1)
+ * bits before reloading. This value is the maximum number of bytes we read
+ * after reloading when we are decoding long offsets.
+ */
+#define LONG_OFFSETS_MAX_EXTRA_BITS_32 \
+ (ZSTD_WINDOWLOG_MAX_32 > STREAM_ACCUMULATOR_MIN_32 \
+ ? ZSTD_WINDOWLOG_MAX_32 - STREAM_ACCUMULATOR_MIN_32 \
+ : 0)
+
+typedef enum { ZSTD_lo_isRegularOffset, ZSTD_lo_isLongOffset=1 } ZSTD_longOffset_e;
+
+FORCE_INLINE_TEMPLATE seq_t
+ZSTD_decodeSequence(seqState_t* seqState, const ZSTD_longOffset_e longOffsets)
+{
+ seq_t seq;
+ const ZSTD_seqSymbol* const llDInfo = seqState->stateLL.table + seqState->stateLL.state;
+ const ZSTD_seqSymbol* const mlDInfo = seqState->stateML.table + seqState->stateML.state;
+ const ZSTD_seqSymbol* const ofDInfo = seqState->stateOffb.table + seqState->stateOffb.state;
+ seq.matchLength = mlDInfo->baseValue;
+ seq.litLength = llDInfo->baseValue;
+ { U32 const ofBase = ofDInfo->baseValue;
+ BYTE const llBits = llDInfo->nbAdditionalBits;
+ BYTE const mlBits = mlDInfo->nbAdditionalBits;
+ BYTE const ofBits = ofDInfo->nbAdditionalBits;
+ BYTE const totalBits = llBits+mlBits+ofBits;
+
+ U16 const llNext = llDInfo->nextState;
+ U16 const mlNext = mlDInfo->nextState;
+ U16 const ofNext = ofDInfo->nextState;
+ U32 const llnbBits = llDInfo->nbBits;
+ U32 const mlnbBits = mlDInfo->nbBits;
+ U32 const ofnbBits = ofDInfo->nbBits;
+ /*
+ * As gcc has better branch and block analyzers, sometimes it is only
+ * valuable to mark likelyness for clang, it gives around 3-4% of
+ * performance.
+ */
+
+ /* sequence */
+ { size_t offset;
+ #if defined(__clang__)
+ if (LIKELY(ofBits > 1)) {
+ #else
+ if (ofBits > 1) {
+ #endif
+ ZSTD_STATIC_ASSERT(ZSTD_lo_isLongOffset == 1);
+ ZSTD_STATIC_ASSERT(LONG_OFFSETS_MAX_EXTRA_BITS_32 == 5);
+ assert(ofBits <= MaxOff);
+ if (MEM_32bits() && longOffsets && (ofBits >= STREAM_ACCUMULATOR_MIN_32)) {
+ U32 const extraBits = ofBits - MIN(ofBits, 32 - seqState->DStream.bitsConsumed);
+ offset = ofBase + (BIT_readBitsFast(&seqState->DStream, ofBits - extraBits) << extraBits);
+ BIT_reloadDStream(&seqState->DStream);
+ if (extraBits) offset += BIT_readBitsFast(&seqState->DStream, extraBits);
+ assert(extraBits <= LONG_OFFSETS_MAX_EXTRA_BITS_32); /* to avoid another reload */
+ } else {
+ offset = ofBase + BIT_readBitsFast(&seqState->DStream, ofBits/*>0*/); /* <= (ZSTD_WINDOWLOG_MAX-1) bits */
+ if (MEM_32bits()) BIT_reloadDStream(&seqState->DStream);
+ }
+ seqState->prevOffset[2] = seqState->prevOffset[1];
+ seqState->prevOffset[1] = seqState->prevOffset[0];
+ seqState->prevOffset[0] = offset;
+ } else {
+ U32 const ll0 = (llDInfo->baseValue == 0);
+ if (LIKELY((ofBits == 0))) {
+ offset = seqState->prevOffset[ll0];
+ seqState->prevOffset[1] = seqState->prevOffset[!ll0];
+ seqState->prevOffset[0] = offset;
+ } else {
+ offset = ofBase + ll0 + BIT_readBitsFast(&seqState->DStream, 1);
+ { size_t temp = (offset==3) ? seqState->prevOffset[0] - 1 : seqState->prevOffset[offset];
+ temp += !temp; /* 0 is not valid; input is corrupted; force offset to 1 */
+ if (offset != 1) seqState->prevOffset[2] = seqState->prevOffset[1];
+ seqState->prevOffset[1] = seqState->prevOffset[0];
+ seqState->prevOffset[0] = offset = temp;
+ } } }
+ seq.offset = offset;
+ }
+
+ #if defined(__clang__)
+ if (UNLIKELY(mlBits > 0))
+ #else
+ if (mlBits > 0)
+ #endif
+ seq.matchLength += BIT_readBitsFast(&seqState->DStream, mlBits/*>0*/);
+
+ if (MEM_32bits() && (mlBits+llBits >= STREAM_ACCUMULATOR_MIN_32-LONG_OFFSETS_MAX_EXTRA_BITS_32))
+ BIT_reloadDStream(&seqState->DStream);
+ if (MEM_64bits() && UNLIKELY(totalBits >= STREAM_ACCUMULATOR_MIN_64-(LLFSELog+MLFSELog+OffFSELog)))
+ BIT_reloadDStream(&seqState->DStream);
+ /* Ensure there are enough bits to read the rest of data in 64-bit mode. */
+ ZSTD_STATIC_ASSERT(16+LLFSELog+MLFSELog+OffFSELog < STREAM_ACCUMULATOR_MIN_64);
+
+ #if defined(__clang__)
+ if (UNLIKELY(llBits > 0))
+ #else
+ if (llBits > 0)
+ #endif
+ seq.litLength += BIT_readBitsFast(&seqState->DStream, llBits/*>0*/);
+
+ if (MEM_32bits())
+ BIT_reloadDStream(&seqState->DStream);
+
+ DEBUGLOG(6, "seq: litL=%u, matchL=%u, offset=%u",
+ (U32)seq.litLength, (U32)seq.matchLength, (U32)seq.offset);
+
+ ZSTD_updateFseStateWithDInfo(&seqState->stateLL, &seqState->DStream, llNext, llnbBits); /* <= 9 bits */
+ ZSTD_updateFseStateWithDInfo(&seqState->stateML, &seqState->DStream, mlNext, mlnbBits); /* <= 9 bits */
+ if (MEM_32bits()) BIT_reloadDStream(&seqState->DStream); /* <= 18 bits */
+ ZSTD_updateFseStateWithDInfo(&seqState->stateOffb, &seqState->DStream, ofNext, ofnbBits); /* <= 8 bits */
+ }
+
+ return seq;
+}
+
+#ifdef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
+MEM_STATIC int ZSTD_dictionaryIsActive(ZSTD_DCtx const* dctx, BYTE const* prefixStart, BYTE const* oLitEnd)
+{
+ size_t const windowSize = dctx->fParams.windowSize;
+ /* No dictionary used. */
+ if (dctx->dictContentEndForFuzzing == NULL) return 0;
+ /* Dictionary is our prefix. */
+ if (prefixStart == dctx->dictContentBeginForFuzzing) return 1;
+ /* Dictionary is not our ext-dict. */
+ if (dctx->dictEnd != dctx->dictContentEndForFuzzing) return 0;
+ /* Dictionary is not within our window size. */
+ if ((size_t)(oLitEnd - prefixStart) >= windowSize) return 0;
+ /* Dictionary is active. */
+ return 1;
+}
+
+MEM_STATIC void ZSTD_assertValidSequence(
+ ZSTD_DCtx const* dctx,
+ BYTE const* op, BYTE const* oend,
+ seq_t const seq,
+ BYTE const* prefixStart, BYTE const* virtualStart)
+{
+#if DEBUGLEVEL >= 1
+ size_t const windowSize = dctx->fParams.windowSize;
+ size_t const sequenceSize = seq.litLength + seq.matchLength;
+ BYTE const* const oLitEnd = op + seq.litLength;
+ DEBUGLOG(6, "Checking sequence: litL=%u matchL=%u offset=%u",
+ (U32)seq.litLength, (U32)seq.matchLength, (U32)seq.offset);
+ assert(op <= oend);
+ assert((size_t)(oend - op) >= sequenceSize);
+ assert(sequenceSize <= ZSTD_BLOCKSIZE_MAX);
+ if (ZSTD_dictionaryIsActive(dctx, prefixStart, oLitEnd)) {
+ size_t const dictSize = (size_t)((char const*)dctx->dictContentEndForFuzzing - (char const*)dctx->dictContentBeginForFuzzing);
+ /* Offset must be within the dictionary. */
+ assert(seq.offset <= (size_t)(oLitEnd - virtualStart));
+ assert(seq.offset <= windowSize + dictSize);
+ } else {
+ /* Offset must be within our window. */
+ assert(seq.offset <= windowSize);
+ }
+#else
+ (void)dctx, (void)op, (void)oend, (void)seq, (void)prefixStart, (void)virtualStart;
+#endif
+}
+#endif
+
+#ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG
+
+
+FORCE_INLINE_TEMPLATE size_t
+DONT_VECTORIZE
+ZSTD_decompressSequences_bodySplitLitBuffer( ZSTD_DCtx* dctx,
+ void* dst, size_t maxDstSize,
+ const void* seqStart, size_t seqSize, int nbSeq,
+ const ZSTD_longOffset_e isLongOffset,
+ const int frame)
+{
+ const BYTE* ip = (const BYTE*)seqStart;
+ const BYTE* const iend = ip + seqSize;
+ BYTE* const ostart = (BYTE*)dst;
+ BYTE* const oend = ostart + maxDstSize;
+ BYTE* op = ostart;
+ const BYTE* litPtr = dctx->litPtr;
+ const BYTE* litBufferEnd = dctx->litBufferEnd;
+ const BYTE* const prefixStart = (const BYTE*) (dctx->prefixStart);
+ const BYTE* const vBase = (const BYTE*) (dctx->virtualStart);
+ const BYTE* const dictEnd = (const BYTE*) (dctx->dictEnd);
+ DEBUGLOG(5, "ZSTD_decompressSequences_bodySplitLitBuffer");
+ (void)frame;
+
+ /* Regen sequences */
+ if (nbSeq) {
+ seqState_t seqState;
+ dctx->fseEntropy = 1;
+ { U32 i; for (i=0; i<ZSTD_REP_NUM; i++) seqState.prevOffset[i] = dctx->entropy.rep[i]; }
+ RETURN_ERROR_IF(
+ ERR_isError(BIT_initDStream(&seqState.DStream, ip, iend-ip)),
+ corruption_detected, "");
+ ZSTD_initFseState(&seqState.stateLL, &seqState.DStream, dctx->LLTptr);
+ ZSTD_initFseState(&seqState.stateOffb, &seqState.DStream, dctx->OFTptr);
+ ZSTD_initFseState(&seqState.stateML, &seqState.DStream, dctx->MLTptr);
+ assert(dst != NULL);
+
+ ZSTD_STATIC_ASSERT(
+ BIT_DStream_unfinished < BIT_DStream_completed &&
+ BIT_DStream_endOfBuffer < BIT_DStream_completed &&
+ BIT_DStream_completed < BIT_DStream_overflow);
+
+ /* decompress without overrunning litPtr begins */
+ {
+ seq_t sequence = ZSTD_decodeSequence(&seqState, isLongOffset);
+ /* Align the decompression loop to 32 + 16 bytes.
+ *
+ * zstd compiled with gcc-9 on an Intel i9-9900k shows 10% decompression
+ * speed swings based on the alignment of the decompression loop. This
+ * performance swing is caused by parts of the decompression loop falling
+ * out of the DSB. The entire decompression loop should fit in the DSB,
+ * when it can't we get much worse performance. You can measure if you've
+ * hit the good case or the bad case with this perf command for some
+ * compressed file test.zst:
+ *
+ * perf stat -e cycles -e instructions -e idq.all_dsb_cycles_any_uops \
+ * -e idq.all_mite_cycles_any_uops -- ./zstd -tq test.zst
+ *
+ * If you see most cycles served out of the MITE you've hit the bad case.
+ * If you see most cycles served out of the DSB you've hit the good case.
+ * If it is pretty even then you may be in an okay case.
+ *
+ * This issue has been reproduced on the following CPUs:
+ * - Kabylake: Macbook Pro (15-inch, 2019) 2.4 GHz Intel Core i9
+ * Use Instruments->Counters to get DSB/MITE cycles.
+ * I never got performance swings, but I was able to
+ * go from the good case of mostly DSB to half of the
+ * cycles served from MITE.
+ * - Coffeelake: Intel i9-9900k
+ * - Coffeelake: Intel i7-9700k
+ *
+ * I haven't been able to reproduce the instability or DSB misses on any
+ * of the following CPUS:
+ * - Haswell
+ * - Broadwell: Intel(R) Xeon(R) CPU E5-2680 v4 @ 2.40GH
+ * - Skylake
+ *
+ * Alignment is done for each of the three major decompression loops:
+ * - ZSTD_decompressSequences_bodySplitLitBuffer - presplit section of the literal buffer
+ * - ZSTD_decompressSequences_bodySplitLitBuffer - postsplit section of the literal buffer
+ * - ZSTD_decompressSequences_body
+ * Alignment choices are made to minimize large swings on bad cases and influence on performance
+ * from changes external to this code, rather than to overoptimize on the current commit.
+ *
+ * If you are seeing performance stability this script can help test.
+ * It tests on 4 commits in zstd where I saw performance change.
+ *
+ * https://gist.github.com/terrelln/9889fc06a423fd5ca6e99351564473f4
+ */
+#if defined(__x86_64__)
+ __asm__(".p2align 6");
+# if __GNUC__ >= 7
+ /* good for gcc-7, gcc-9, and gcc-11 */
+ __asm__("nop");
+ __asm__(".p2align 5");
+ __asm__("nop");
+ __asm__(".p2align 4");
+# if __GNUC__ == 8 || __GNUC__ == 10
+ /* good for gcc-8 and gcc-10 */
+ __asm__("nop");
+ __asm__(".p2align 3");
+# endif
+# endif
+#endif
+
+ /* Handle the initial state where litBuffer is currently split between dst and litExtraBuffer */
+ for (; litPtr + sequence.litLength <= dctx->litBufferEnd; ) {
+ size_t const oneSeqSize = ZSTD_execSequenceSplitLitBuffer(op, oend, litPtr + sequence.litLength - WILDCOPY_OVERLENGTH, sequence, &litPtr, litBufferEnd, prefixStart, vBase, dictEnd);
+#if defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION) && defined(FUZZING_ASSERT_VALID_SEQUENCE)
+ assert(!ZSTD_isError(oneSeqSize));
+ if (frame) ZSTD_assertValidSequence(dctx, op, oend, sequence, prefixStart, vBase);
+#endif
+ if (UNLIKELY(ZSTD_isError(oneSeqSize)))
+ return oneSeqSize;
+ DEBUGLOG(6, "regenerated sequence size : %u", (U32)oneSeqSize);
+ op += oneSeqSize;
+ if (UNLIKELY(!--nbSeq))
+ break;
+ BIT_reloadDStream(&(seqState.DStream));
+ sequence = ZSTD_decodeSequence(&seqState, isLongOffset);
+ }
+
+ /* If there are more sequences, they will need to read literals from litExtraBuffer; copy over the remainder from dst and update litPtr and litEnd */
+ if (nbSeq > 0) {
+ const size_t leftoverLit = dctx->litBufferEnd - litPtr;
+ if (leftoverLit)
+ {
+ RETURN_ERROR_IF(leftoverLit > (size_t)(oend - op), dstSize_tooSmall, "remaining lit must fit within dstBuffer");
+ ZSTD_safecopyDstBeforeSrc(op, litPtr, leftoverLit);
+ sequence.litLength -= leftoverLit;
+ op += leftoverLit;
+ }
+ litPtr = dctx->litExtraBuffer;
+ litBufferEnd = dctx->litExtraBuffer + ZSTD_LITBUFFEREXTRASIZE;
+ dctx->litBufferLocation = ZSTD_not_in_dst;
+ {
+ size_t const oneSeqSize = ZSTD_execSequence(op, oend, sequence, &litPtr, litBufferEnd, prefixStart, vBase, dictEnd);
+#if defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION) && defined(FUZZING_ASSERT_VALID_SEQUENCE)
+ assert(!ZSTD_isError(oneSeqSize));
+ if (frame) ZSTD_assertValidSequence(dctx, op, oend, sequence, prefixStart, vBase);
+#endif
+ if (UNLIKELY(ZSTD_isError(oneSeqSize)))
+ return oneSeqSize;
+ DEBUGLOG(6, "regenerated sequence size : %u", (U32)oneSeqSize);
+ op += oneSeqSize;
+ if (--nbSeq)
+ BIT_reloadDStream(&(seqState.DStream));
+ }
+ }
+ }
+
+ if (nbSeq > 0) /* there is remaining lit from extra buffer */
+ {
+
+#if defined(__x86_64__)
+ __asm__(".p2align 6");
+ __asm__("nop");
+# if __GNUC__ != 7
+ /* worse for gcc-7 better for gcc-8, gcc-9, and gcc-10 and clang */
+ __asm__(".p2align 4");
+ __asm__("nop");
+ __asm__(".p2align 3");
+# elif __GNUC__ >= 11
+ __asm__(".p2align 3");
+# else
+ __asm__(".p2align 5");
+ __asm__("nop");
+ __asm__(".p2align 3");
+# endif
+#endif
+
+ for (; ; ) {
+ seq_t const sequence = ZSTD_decodeSequence(&seqState, isLongOffset);
+ size_t const oneSeqSize = ZSTD_execSequence(op, oend, sequence, &litPtr, litBufferEnd, prefixStart, vBase, dictEnd);
+#if defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION) && defined(FUZZING_ASSERT_VALID_SEQUENCE)
+ assert(!ZSTD_isError(oneSeqSize));
+ if (frame) ZSTD_assertValidSequence(dctx, op, oend, sequence, prefixStart, vBase);
+#endif
+ if (UNLIKELY(ZSTD_isError(oneSeqSize)))
+ return oneSeqSize;
+ DEBUGLOG(6, "regenerated sequence size : %u", (U32)oneSeqSize);
+ op += oneSeqSize;
+ if (UNLIKELY(!--nbSeq))
+ break;
+ BIT_reloadDStream(&(seqState.DStream));
+ }
+ }
+
+ /* check if reached exact end */
+ DEBUGLOG(5, "ZSTD_decompressSequences_bodySplitLitBuffer: after decode loop, remaining nbSeq : %i", nbSeq);
+ RETURN_ERROR_IF(nbSeq, corruption_detected, "");
+ RETURN_ERROR_IF(BIT_reloadDStream(&seqState.DStream) < BIT_DStream_completed, corruption_detected, "");
+ /* save reps for next block */
+ { U32 i; for (i=0; i<ZSTD_REP_NUM; i++) dctx->entropy.rep[i] = (U32)(seqState.prevOffset[i]); }
+ }
+
+ /* last literal segment */
+ if (dctx->litBufferLocation == ZSTD_split) /* split hasn't been reached yet, first get dst then copy litExtraBuffer */
+ {
+ size_t const lastLLSize = litBufferEnd - litPtr;
+ RETURN_ERROR_IF(lastLLSize > (size_t)(oend - op), dstSize_tooSmall, "");
+ if (op != NULL) {
+ ZSTD_memmove(op, litPtr, lastLLSize);
+ op += lastLLSize;
+ }
+ litPtr = dctx->litExtraBuffer;
+ litBufferEnd = dctx->litExtraBuffer + ZSTD_LITBUFFEREXTRASIZE;
+ dctx->litBufferLocation = ZSTD_not_in_dst;
+ }
+ { size_t const lastLLSize = litBufferEnd - litPtr;
+ RETURN_ERROR_IF(lastLLSize > (size_t)(oend-op), dstSize_tooSmall, "");
+ if (op != NULL) {
+ ZSTD_memcpy(op, litPtr, lastLLSize);
+ op += lastLLSize;
+ }
+ }
+
+ return op-ostart;
+}
+
+FORCE_INLINE_TEMPLATE size_t
+DONT_VECTORIZE
+ZSTD_decompressSequences_body(ZSTD_DCtx* dctx,
+ void* dst, size_t maxDstSize,
+ const void* seqStart, size_t seqSize, int nbSeq,
+ const ZSTD_longOffset_e isLongOffset,
+ const int frame)
+{
+ const BYTE* ip = (const BYTE*)seqStart;
+ const BYTE* const iend = ip + seqSize;
+ BYTE* const ostart = (BYTE*)dst;
+ BYTE* const oend = dctx->litBufferLocation == ZSTD_not_in_dst ? ostart + maxDstSize : dctx->litBuffer;
+ BYTE* op = ostart;
+ const BYTE* litPtr = dctx->litPtr;
+ const BYTE* const litEnd = litPtr + dctx->litSize;
+ const BYTE* const prefixStart = (const BYTE*)(dctx->prefixStart);
+ const BYTE* const vBase = (const BYTE*)(dctx->virtualStart);
+ const BYTE* const dictEnd = (const BYTE*)(dctx->dictEnd);
+ DEBUGLOG(5, "ZSTD_decompressSequences_body");
+ (void)frame;
+
+ /* Regen sequences */
+ if (nbSeq) {
+ seqState_t seqState;
+ dctx->fseEntropy = 1;
+ { U32 i; for (i = 0; i < ZSTD_REP_NUM; i++) seqState.prevOffset[i] = dctx->entropy.rep[i]; }
+ RETURN_ERROR_IF(
+ ERR_isError(BIT_initDStream(&seqState.DStream, ip, iend - ip)),
+ corruption_detected, "");
+ ZSTD_initFseState(&seqState.stateLL, &seqState.DStream, dctx->LLTptr);
+ ZSTD_initFseState(&seqState.stateOffb, &seqState.DStream, dctx->OFTptr);
+ ZSTD_initFseState(&seqState.stateML, &seqState.DStream, dctx->MLTptr);
+ assert(dst != NULL);
+
+ ZSTD_STATIC_ASSERT(
+ BIT_DStream_unfinished < BIT_DStream_completed &&
+ BIT_DStream_endOfBuffer < BIT_DStream_completed &&
+ BIT_DStream_completed < BIT_DStream_overflow);
+
+#if defined(__x86_64__)
+ __asm__(".p2align 6");
+ __asm__("nop");
+# if __GNUC__ >= 7
+ __asm__(".p2align 5");
+ __asm__("nop");
+ __asm__(".p2align 3");
+# else
+ __asm__(".p2align 4");
+ __asm__("nop");
+ __asm__(".p2align 3");
+# endif
+#endif
+
+ for ( ; ; ) {
+ seq_t const sequence = ZSTD_decodeSequence(&seqState, isLongOffset);
+ size_t const oneSeqSize = ZSTD_execSequence(op, oend, sequence, &litPtr, litEnd, prefixStart, vBase, dictEnd);
+#if defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION) && defined(FUZZING_ASSERT_VALID_SEQUENCE)
+ assert(!ZSTD_isError(oneSeqSize));
+ if (frame) ZSTD_assertValidSequence(dctx, op, oend, sequence, prefixStart, vBase);
+#endif
+ if (UNLIKELY(ZSTD_isError(oneSeqSize)))
+ return oneSeqSize;
+ DEBUGLOG(6, "regenerated sequence size : %u", (U32)oneSeqSize);
+ op += oneSeqSize;
+ if (UNLIKELY(!--nbSeq))
+ break;
+ BIT_reloadDStream(&(seqState.DStream));
+ }
+
+ /* check if reached exact end */
+ DEBUGLOG(5, "ZSTD_decompressSequences_body: after decode loop, remaining nbSeq : %i", nbSeq);
+ RETURN_ERROR_IF(nbSeq, corruption_detected, "");
+ RETURN_ERROR_IF(BIT_reloadDStream(&seqState.DStream) < BIT_DStream_completed, corruption_detected, "");
+ /* save reps for next block */
+ { U32 i; for (i=0; i<ZSTD_REP_NUM; i++) dctx->entropy.rep[i] = (U32)(seqState.prevOffset[i]); }
+ }
+
+ /* last literal segment */
+ { size_t const lastLLSize = litEnd - litPtr;
+ RETURN_ERROR_IF(lastLLSize > (size_t)(oend-op), dstSize_tooSmall, "");
+ if (op != NULL) {
+ ZSTD_memcpy(op, litPtr, lastLLSize);
+ op += lastLLSize;
+ }
+ }
+
+ return op-ostart;
+}
+
+static size_t
+ZSTD_decompressSequences_default(ZSTD_DCtx* dctx,
+ void* dst, size_t maxDstSize,
+ const void* seqStart, size_t seqSize, int nbSeq,
+ const ZSTD_longOffset_e isLongOffset,
+ const int frame)
+{
+ return ZSTD_decompressSequences_body(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame);
+}
+
+static size_t
+ZSTD_decompressSequencesSplitLitBuffer_default(ZSTD_DCtx* dctx,
+ void* dst, size_t maxDstSize,
+ const void* seqStart, size_t seqSize, int nbSeq,
+ const ZSTD_longOffset_e isLongOffset,
+ const int frame)
+{
+ return ZSTD_decompressSequences_bodySplitLitBuffer(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame);
+}
+#endif /* ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG */
+
+#ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT
+
+FORCE_INLINE_TEMPLATE size_t
+ZSTD_prefetchMatch(size_t prefetchPos, seq_t const sequence,
+ const BYTE* const prefixStart, const BYTE* const dictEnd)
+{
+ prefetchPos += sequence.litLength;
+ { const BYTE* const matchBase = (sequence.offset > prefetchPos) ? dictEnd : prefixStart;
+ const BYTE* const match = matchBase + prefetchPos - sequence.offset; /* note : this operation can overflow when seq.offset is really too large, which can only happen when input is corrupted.
+ * No consequence though : memory address is only used for prefetching, not for dereferencing */
+ PREFETCH_L1(match); PREFETCH_L1(match+CACHELINE_SIZE); /* note : it's safe to invoke PREFETCH() on any memory address, including invalid ones */
+ }
+ return prefetchPos + sequence.matchLength;
+}
+
+/* This decoding function employs prefetching
+ * to reduce latency impact of cache misses.
+ * It's generally employed when block contains a significant portion of long-distance matches
+ * or when coupled with a "cold" dictionary */
+FORCE_INLINE_TEMPLATE size_t
+ZSTD_decompressSequencesLong_body(
+ ZSTD_DCtx* dctx,
+ void* dst, size_t maxDstSize,
+ const void* seqStart, size_t seqSize, int nbSeq,
+ const ZSTD_longOffset_e isLongOffset,
+ const int frame)
+{
+ const BYTE* ip = (const BYTE*)seqStart;
+ const BYTE* const iend = ip + seqSize;
+ BYTE* const ostart = (BYTE*)dst;
+ BYTE* const oend = dctx->litBufferLocation == ZSTD_in_dst ? dctx->litBuffer : ostart + maxDstSize;
+ BYTE* op = ostart;
+ const BYTE* litPtr = dctx->litPtr;
+ const BYTE* litBufferEnd = dctx->litBufferEnd;
+ const BYTE* const prefixStart = (const BYTE*) (dctx->prefixStart);
+ const BYTE* const dictStart = (const BYTE*) (dctx->virtualStart);
+ const BYTE* const dictEnd = (const BYTE*) (dctx->dictEnd);
+ (void)frame;
+
+ /* Regen sequences */
+ if (nbSeq) {
+#define STORED_SEQS 8
+#define STORED_SEQS_MASK (STORED_SEQS-1)
+#define ADVANCED_SEQS STORED_SEQS
+ seq_t sequences[STORED_SEQS];
+ int const seqAdvance = MIN(nbSeq, ADVANCED_SEQS);
+ seqState_t seqState;
+ int seqNb;
+ size_t prefetchPos = (size_t)(op-prefixStart); /* track position relative to prefixStart */
+
+ dctx->fseEntropy = 1;
+ { int i; for (i=0; i<ZSTD_REP_NUM; i++) seqState.prevOffset[i] = dctx->entropy.rep[i]; }
+ assert(dst != NULL);
+ assert(iend >= ip);
+ RETURN_ERROR_IF(
+ ERR_isError(BIT_initDStream(&seqState.DStream, ip, iend-ip)),
+ corruption_detected, "");
+ ZSTD_initFseState(&seqState.stateLL, &seqState.DStream, dctx->LLTptr);
+ ZSTD_initFseState(&seqState.stateOffb, &seqState.DStream, dctx->OFTptr);
+ ZSTD_initFseState(&seqState.stateML, &seqState.DStream, dctx->MLTptr);
+
+ /* prepare in advance */
+ for (seqNb=0; (BIT_reloadDStream(&seqState.DStream) <= BIT_DStream_completed) && (seqNb<seqAdvance); seqNb++) {
+ seq_t const sequence = ZSTD_decodeSequence(&seqState, isLongOffset);
+ prefetchPos = ZSTD_prefetchMatch(prefetchPos, sequence, prefixStart, dictEnd);
+ sequences[seqNb] = sequence;
+ }
+ RETURN_ERROR_IF(seqNb<seqAdvance, corruption_detected, "");
+
+ /* decompress without stomping litBuffer */
+ for (; (BIT_reloadDStream(&(seqState.DStream)) <= BIT_DStream_completed) && (seqNb < nbSeq); seqNb++) {
+ seq_t sequence = ZSTD_decodeSequence(&seqState, isLongOffset);
+ size_t oneSeqSize;
+
+ if (dctx->litBufferLocation == ZSTD_split && litPtr + sequences[(seqNb - ADVANCED_SEQS) & STORED_SEQS_MASK].litLength > dctx->litBufferEnd)
+ {
+ /* lit buffer is reaching split point, empty out the first buffer and transition to litExtraBuffer */
+ const size_t leftoverLit = dctx->litBufferEnd - litPtr;
+ if (leftoverLit)
+ {
+ RETURN_ERROR_IF(leftoverLit > (size_t)(oend - op), dstSize_tooSmall, "remaining lit must fit within dstBuffer");
+ ZSTD_safecopyDstBeforeSrc(op, litPtr, leftoverLit);
+ sequences[(seqNb - ADVANCED_SEQS) & STORED_SEQS_MASK].litLength -= leftoverLit;
+ op += leftoverLit;
+ }
+ litPtr = dctx->litExtraBuffer;
+ litBufferEnd = dctx->litExtraBuffer + ZSTD_LITBUFFEREXTRASIZE;
+ dctx->litBufferLocation = ZSTD_not_in_dst;
+ oneSeqSize = ZSTD_execSequence(op, oend, sequences[(seqNb - ADVANCED_SEQS) & STORED_SEQS_MASK], &litPtr, litBufferEnd, prefixStart, dictStart, dictEnd);
+#if defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION) && defined(FUZZING_ASSERT_VALID_SEQUENCE)
+ assert(!ZSTD_isError(oneSeqSize));
+ if (frame) ZSTD_assertValidSequence(dctx, op, oend, sequences[(seqNb - ADVANCED_SEQS) & STORED_SEQS_MASK], prefixStart, dictStart);
+#endif
+ if (ZSTD_isError(oneSeqSize)) return oneSeqSize;
+
+ prefetchPos = ZSTD_prefetchMatch(prefetchPos, sequence, prefixStart, dictEnd);
+ sequences[seqNb & STORED_SEQS_MASK] = sequence;
+ op += oneSeqSize;
+ }
+ else
+ {
+ /* lit buffer is either wholly contained in first or second split, or not split at all*/
+ oneSeqSize = dctx->litBufferLocation == ZSTD_split ?
+ ZSTD_execSequenceSplitLitBuffer(op, oend, litPtr + sequences[(seqNb - ADVANCED_SEQS) & STORED_SEQS_MASK].litLength - WILDCOPY_OVERLENGTH, sequences[(seqNb - ADVANCED_SEQS) & STORED_SEQS_MASK], &litPtr, litBufferEnd, prefixStart, dictStart, dictEnd) :
+ ZSTD_execSequence(op, oend, sequences[(seqNb - ADVANCED_SEQS) & STORED_SEQS_MASK], &litPtr, litBufferEnd, prefixStart, dictStart, dictEnd);
+#if defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION) && defined(FUZZING_ASSERT_VALID_SEQUENCE)
+ assert(!ZSTD_isError(oneSeqSize));
+ if (frame) ZSTD_assertValidSequence(dctx, op, oend, sequences[(seqNb - ADVANCED_SEQS) & STORED_SEQS_MASK], prefixStart, dictStart);
+#endif
+ if (ZSTD_isError(oneSeqSize)) return oneSeqSize;
+
+ prefetchPos = ZSTD_prefetchMatch(prefetchPos, sequence, prefixStart, dictEnd);
+ sequences[seqNb & STORED_SEQS_MASK] = sequence;
+ op += oneSeqSize;
+ }
+ }
+ RETURN_ERROR_IF(seqNb<nbSeq, corruption_detected, "");
+
+ /* finish queue */
+ seqNb -= seqAdvance;
+ for ( ; seqNb<nbSeq ; seqNb++) {
+ seq_t *sequence = &(sequences[seqNb&STORED_SEQS_MASK]);
+ if (dctx->litBufferLocation == ZSTD_split && litPtr + sequence->litLength > dctx->litBufferEnd)
+ {
+ const size_t leftoverLit = dctx->litBufferEnd - litPtr;
+ if (leftoverLit)
+ {
+ RETURN_ERROR_IF(leftoverLit > (size_t)(oend - op), dstSize_tooSmall, "remaining lit must fit within dstBuffer");
+ ZSTD_safecopyDstBeforeSrc(op, litPtr, leftoverLit);
+ sequence->litLength -= leftoverLit;
+ op += leftoverLit;
+ }
+ litPtr = dctx->litExtraBuffer;
+ litBufferEnd = dctx->litExtraBuffer + ZSTD_LITBUFFEREXTRASIZE;
+ dctx->litBufferLocation = ZSTD_not_in_dst;
+ {
+ size_t const oneSeqSize = ZSTD_execSequence(op, oend, *sequence, &litPtr, litBufferEnd, prefixStart, dictStart, dictEnd);
+#if defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION) && defined(FUZZING_ASSERT_VALID_SEQUENCE)
+ assert(!ZSTD_isError(oneSeqSize));
+ if (frame) ZSTD_assertValidSequence(dctx, op, oend, sequences[seqNb&STORED_SEQS_MASK], prefixStart, dictStart);
+#endif
+ if (ZSTD_isError(oneSeqSize)) return oneSeqSize;
+ op += oneSeqSize;
+ }
+ }
+ else
+ {
+ size_t const oneSeqSize = dctx->litBufferLocation == ZSTD_split ?
+ ZSTD_execSequenceSplitLitBuffer(op, oend, litPtr + sequence->litLength - WILDCOPY_OVERLENGTH, *sequence, &litPtr, litBufferEnd, prefixStart, dictStart, dictEnd) :
+ ZSTD_execSequence(op, oend, *sequence, &litPtr, litBufferEnd, prefixStart, dictStart, dictEnd);
+#if defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION) && defined(FUZZING_ASSERT_VALID_SEQUENCE)
+ assert(!ZSTD_isError(oneSeqSize));
+ if (frame) ZSTD_assertValidSequence(dctx, op, oend, sequences[seqNb&STORED_SEQS_MASK], prefixStart, dictStart);
+#endif
+ if (ZSTD_isError(oneSeqSize)) return oneSeqSize;
+ op += oneSeqSize;
+ }
+ }
+
+ /* save reps for next block */
+ { U32 i; for (i=0; i<ZSTD_REP_NUM; i++) dctx->entropy.rep[i] = (U32)(seqState.prevOffset[i]); }
+ }
+
+ /* last literal segment */
+ if (dctx->litBufferLocation == ZSTD_split) /* first deplete literal buffer in dst, then copy litExtraBuffer */
+ {
+ size_t const lastLLSize = litBufferEnd - litPtr;
+ RETURN_ERROR_IF(lastLLSize > (size_t)(oend - op), dstSize_tooSmall, "");
+ if (op != NULL) {
+ ZSTD_memmove(op, litPtr, lastLLSize);
+ op += lastLLSize;
+ }
+ litPtr = dctx->litExtraBuffer;
+ litBufferEnd = dctx->litExtraBuffer + ZSTD_LITBUFFEREXTRASIZE;
+ }
+ { size_t const lastLLSize = litBufferEnd - litPtr;
+ RETURN_ERROR_IF(lastLLSize > (size_t)(oend-op), dstSize_tooSmall, "");
+ if (op != NULL) {
+ ZSTD_memmove(op, litPtr, lastLLSize);
+ op += lastLLSize;
+ }
+ }
+
+ return op-ostart;
+}
+
+static size_t
+ZSTD_decompressSequencesLong_default(ZSTD_DCtx* dctx,
+ void* dst, size_t maxDstSize,
+ const void* seqStart, size_t seqSize, int nbSeq,
+ const ZSTD_longOffset_e isLongOffset,
+ const int frame)
+{
+ return ZSTD_decompressSequencesLong_body(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame);
+}
+#endif /* ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT */
+
+
+
+#if DYNAMIC_BMI2
+
+#ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG
+static BMI2_TARGET_ATTRIBUTE size_t
+DONT_VECTORIZE
+ZSTD_decompressSequences_bmi2(ZSTD_DCtx* dctx,
+ void* dst, size_t maxDstSize,
+ const void* seqStart, size_t seqSize, int nbSeq,
+ const ZSTD_longOffset_e isLongOffset,
+ const int frame)
+{
+ return ZSTD_decompressSequences_body(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame);
+}
+static BMI2_TARGET_ATTRIBUTE size_t
+DONT_VECTORIZE
+ZSTD_decompressSequencesSplitLitBuffer_bmi2(ZSTD_DCtx* dctx,
+ void* dst, size_t maxDstSize,
+ const void* seqStart, size_t seqSize, int nbSeq,
+ const ZSTD_longOffset_e isLongOffset,
+ const int frame)
+{
+ return ZSTD_decompressSequences_bodySplitLitBuffer(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame);
+}
+#endif /* ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG */
+
+#ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT
+static BMI2_TARGET_ATTRIBUTE size_t
+ZSTD_decompressSequencesLong_bmi2(ZSTD_DCtx* dctx,
+ void* dst, size_t maxDstSize,
+ const void* seqStart, size_t seqSize, int nbSeq,
+ const ZSTD_longOffset_e isLongOffset,
+ const int frame)
+{
+ return ZSTD_decompressSequencesLong_body(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame);
+}
+#endif /* ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT */
+
+#endif /* DYNAMIC_BMI2 */
+
+typedef size_t (*ZSTD_decompressSequences_t)(
+ ZSTD_DCtx* dctx,
+ void* dst, size_t maxDstSize,
+ const void* seqStart, size_t seqSize, int nbSeq,
+ const ZSTD_longOffset_e isLongOffset,
+ const int frame);
+
+#ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG
+static size_t
+ZSTD_decompressSequences(ZSTD_DCtx* dctx, void* dst, size_t maxDstSize,
+ const void* seqStart, size_t seqSize, int nbSeq,
+ const ZSTD_longOffset_e isLongOffset,
+ const int frame)
+{
+ DEBUGLOG(5, "ZSTD_decompressSequences");
+#if DYNAMIC_BMI2
+ if (ZSTD_DCtx_get_bmi2(dctx)) {
+ return ZSTD_decompressSequences_bmi2(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame);
+ }
+#endif
+ return ZSTD_decompressSequences_default(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame);
+}
+static size_t
+ZSTD_decompressSequencesSplitLitBuffer(ZSTD_DCtx* dctx, void* dst, size_t maxDstSize,
+ const void* seqStart, size_t seqSize, int nbSeq,
+ const ZSTD_longOffset_e isLongOffset,
+ const int frame)
+{
+ DEBUGLOG(5, "ZSTD_decompressSequencesSplitLitBuffer");
+#if DYNAMIC_BMI2
+ if (ZSTD_DCtx_get_bmi2(dctx)) {
+ return ZSTD_decompressSequencesSplitLitBuffer_bmi2(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame);
+ }
+#endif
+ return ZSTD_decompressSequencesSplitLitBuffer_default(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame);
+}
+#endif /* ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG */
+
+
+#ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT
+/* ZSTD_decompressSequencesLong() :
+ * decompression function triggered when a minimum share of offsets is considered "long",
+ * aka out of cache.
+ * note : "long" definition seems overloaded here, sometimes meaning "wider than bitstream register", and sometimes meaning "farther than memory cache distance".
+ * This function will try to mitigate main memory latency through the use of prefetching */
+static size_t
+ZSTD_decompressSequencesLong(ZSTD_DCtx* dctx,
+ void* dst, size_t maxDstSize,
+ const void* seqStart, size_t seqSize, int nbSeq,
+ const ZSTD_longOffset_e isLongOffset,
+ const int frame)
+{
+ DEBUGLOG(5, "ZSTD_decompressSequencesLong");
+#if DYNAMIC_BMI2
+ if (ZSTD_DCtx_get_bmi2(dctx)) {
+ return ZSTD_decompressSequencesLong_bmi2(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame);
+ }
+#endif
+ return ZSTD_decompressSequencesLong_default(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame);
+}
+#endif /* ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT */
+
+
+
+#if !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT) && \
+ !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG)
+/* ZSTD_getLongOffsetsShare() :
+ * condition : offTable must be valid
+ * @return : "share" of long offsets (arbitrarily defined as > (1<<23))
+ * compared to maximum possible of (1<<OffFSELog) */
+static unsigned
+ZSTD_getLongOffsetsShare(const ZSTD_seqSymbol* offTable)
+{
+ const void* ptr = offTable;
+ U32 const tableLog = ((const ZSTD_seqSymbol_header*)ptr)[0].tableLog;
+ const ZSTD_seqSymbol* table = offTable + 1;
+ U32 const max = 1 << tableLog;
+ U32 u, total = 0;
+ DEBUGLOG(5, "ZSTD_getLongOffsetsShare: (tableLog=%u)", tableLog);
+
+ assert(max <= (1 << OffFSELog)); /* max not too large */
+ for (u=0; u<max; u++) {
+ if (table[u].nbAdditionalBits > 22) total += 1;
+ }
+
+ assert(tableLog <= OffFSELog);
+ total <<= (OffFSELog - tableLog); /* scale to OffFSELog */
+
+ return total;
+}
+#endif
+
+size_t
+ZSTD_decompressBlock_internal(ZSTD_DCtx* dctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize, const int frame, const streaming_operation streaming)
+{ /* blockType == blockCompressed */
+ const BYTE* ip = (const BYTE*)src;
+ /* isLongOffset must be true if there are long offsets.
+ * Offsets are long if they are larger than 2^STREAM_ACCUMULATOR_MIN.
+ * We don't expect that to be the case in 64-bit mode.
+ * In block mode, window size is not known, so we have to be conservative.
+ * (note: but it could be evaluated from current-lowLimit)
+ */
+ ZSTD_longOffset_e const isLongOffset = (ZSTD_longOffset_e)(MEM_32bits() && (!frame || (dctx->fParams.windowSize > (1ULL << STREAM_ACCUMULATOR_MIN))));
+ DEBUGLOG(5, "ZSTD_decompressBlock_internal (size : %u)", (U32)srcSize);
+
+ RETURN_ERROR_IF(srcSize >= ZSTD_BLOCKSIZE_MAX, srcSize_wrong, "");
+
+ /* Decode literals section */
+ { size_t const litCSize = ZSTD_decodeLiteralsBlock(dctx, src, srcSize, dst, dstCapacity, streaming);
+ DEBUGLOG(5, "ZSTD_decodeLiteralsBlock : %u", (U32)litCSize);
+ if (ZSTD_isError(litCSize)) return litCSize;
+ ip += litCSize;
+ srcSize -= litCSize;
+ }
+
+ /* Build Decoding Tables */
+ {
+ /* These macros control at build-time which decompressor implementation
+ * we use. If neither is defined, we do some inspection and dispatch at
+ * runtime.
+ */
+#if !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT) && \
+ !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG)
+ int usePrefetchDecoder = dctx->ddictIsCold;
+#endif
+ int nbSeq;
+ size_t const seqHSize = ZSTD_decodeSeqHeaders(dctx, &nbSeq, ip, srcSize);
+ if (ZSTD_isError(seqHSize)) return seqHSize;
+ ip += seqHSize;
+ srcSize -= seqHSize;
+
+ RETURN_ERROR_IF(dst == NULL && nbSeq > 0, dstSize_tooSmall, "NULL not handled");
+
+#if !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT) && \
+ !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG)
+ if ( !usePrefetchDecoder
+ && (!frame || (dctx->fParams.windowSize > (1<<24)))
+ && (nbSeq>ADVANCED_SEQS) ) { /* could probably use a larger nbSeq limit */
+ U32 const shareLongOffsets = ZSTD_getLongOffsetsShare(dctx->OFTptr);
+ U32 const minShare = MEM_64bits() ? 7 : 20; /* heuristic values, correspond to 2.73% and 7.81% */
+ usePrefetchDecoder = (shareLongOffsets >= minShare);
+ }
+#endif
+
+ dctx->ddictIsCold = 0;
+
+#if !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT) && \
+ !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG)
+ if (usePrefetchDecoder)
+#endif
+#ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT
+ return ZSTD_decompressSequencesLong(dctx, dst, dstCapacity, ip, srcSize, nbSeq, isLongOffset, frame);
+#endif
+
+#ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG
+ /* else */
+ if (dctx->litBufferLocation == ZSTD_split)
+ return ZSTD_decompressSequencesSplitLitBuffer(dctx, dst, dstCapacity, ip, srcSize, nbSeq, isLongOffset, frame);
+ else
+ return ZSTD_decompressSequences(dctx, dst, dstCapacity, ip, srcSize, nbSeq, isLongOffset, frame);
+#endif
+ }
+}
+
+
+void ZSTD_checkContinuity(ZSTD_DCtx* dctx, const void* dst, size_t dstSize)
+{
+ if (dst != dctx->previousDstEnd && dstSize > 0) { /* not contiguous */
+ dctx->dictEnd = dctx->previousDstEnd;
+ dctx->virtualStart = (const char*)dst - ((const char*)(dctx->previousDstEnd) - (const char*)(dctx->prefixStart));
+ dctx->prefixStart = dst;
+ dctx->previousDstEnd = dst;
+ }
+}
+
+
+size_t ZSTD_decompressBlock(ZSTD_DCtx* dctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize)
+{
+ size_t dSize;
+ ZSTD_checkContinuity(dctx, dst, dstCapacity);
+ dSize = ZSTD_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize, /* frame */ 0, not_streaming);
+ dctx->previousDstEnd = (char*)dst + dSize;
+ return dSize;
+}
diff --git a/lib/zstd/decompress/zstd_decompress_block.h b/lib/zstd/decompress/zstd_decompress_block.h
new file mode 100644
index 000000000000..3d2d57a5d25a
--- /dev/null
+++ b/lib/zstd/decompress/zstd_decompress_block.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+
+#ifndef ZSTD_DEC_BLOCK_H
+#define ZSTD_DEC_BLOCK_H
+
+/*-*******************************************************
+ * Dependencies
+ *********************************************************/
+#include "../common/zstd_deps.h" /* size_t */
+#include <linux/zstd.h> /* DCtx, and some public functions */
+#include "../common/zstd_internal.h" /* blockProperties_t, and some public functions */
+#include "zstd_decompress_internal.h" /* ZSTD_seqSymbol */
+
+
+/* === Prototypes === */
+
+/* note: prototypes already published within `zstd.h` :
+ * ZSTD_decompressBlock()
+ */
+
+/* note: prototypes already published within `zstd_internal.h` :
+ * ZSTD_getcBlockSize()
+ * ZSTD_decodeSeqHeaders()
+ */
+
+
+ /* Streaming state is used to inform allocation of the literal buffer */
+typedef enum {
+ not_streaming = 0,
+ is_streaming = 1
+} streaming_operation;
+
+/* ZSTD_decompressBlock_internal() :
+ * decompress block, starting at `src`,
+ * into destination buffer `dst`.
+ * @return : decompressed block size,
+ * or an error code (which can be tested using ZSTD_isError())
+ */
+size_t ZSTD_decompressBlock_internal(ZSTD_DCtx* dctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize, const int frame, const streaming_operation streaming);
+
+/* ZSTD_buildFSETable() :
+ * generate FSE decoding table for one symbol (ll, ml or off)
+ * this function must be called with valid parameters only
+ * (dt is large enough, normalizedCounter distribution total is a power of 2, max is within range, etc.)
+ * in which case it cannot fail.
+ * The workspace must be 4-byte aligned and at least ZSTD_BUILD_FSE_TABLE_WKSP_SIZE bytes, which is
+ * defined in zstd_decompress_internal.h.
+ * Internal use only.
+ */
+void ZSTD_buildFSETable(ZSTD_seqSymbol* dt,
+ const short* normalizedCounter, unsigned maxSymbolValue,
+ const U32* baseValue, const U8* nbAdditionalBits,
+ unsigned tableLog, void* wksp, size_t wkspSize,
+ int bmi2);
+
+
+#endif /* ZSTD_DEC_BLOCK_H */
diff --git a/lib/zstd/decompress/zstd_decompress_internal.h b/lib/zstd/decompress/zstd_decompress_internal.h
new file mode 100644
index 000000000000..98102edb6a83
--- /dev/null
+++ b/lib/zstd/decompress/zstd_decompress_internal.h
@@ -0,0 +1,228 @@
+/*
+ * Copyright (c) Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+
+/* zstd_decompress_internal:
+ * objects and definitions shared within lib/decompress modules */
+
+ #ifndef ZSTD_DECOMPRESS_INTERNAL_H
+ #define ZSTD_DECOMPRESS_INTERNAL_H
+
+
+/*-*******************************************************
+ * Dependencies
+ *********************************************************/
+#include "../common/mem.h" /* BYTE, U16, U32 */
+#include "../common/zstd_internal.h" /* constants : MaxLL, MaxML, MaxOff, LLFSELog, etc. */
+
+
+
+/*-*******************************************************
+ * Constants
+ *********************************************************/
+static UNUSED_ATTR const U32 LL_base[MaxLL+1] = {
+ 0, 1, 2, 3, 4, 5, 6, 7,
+ 8, 9, 10, 11, 12, 13, 14, 15,
+ 16, 18, 20, 22, 24, 28, 32, 40,
+ 48, 64, 0x80, 0x100, 0x200, 0x400, 0x800, 0x1000,
+ 0x2000, 0x4000, 0x8000, 0x10000 };
+
+static UNUSED_ATTR const U32 OF_base[MaxOff+1] = {
+ 0, 1, 1, 5, 0xD, 0x1D, 0x3D, 0x7D,
+ 0xFD, 0x1FD, 0x3FD, 0x7FD, 0xFFD, 0x1FFD, 0x3FFD, 0x7FFD,
+ 0xFFFD, 0x1FFFD, 0x3FFFD, 0x7FFFD, 0xFFFFD, 0x1FFFFD, 0x3FFFFD, 0x7FFFFD,
+ 0xFFFFFD, 0x1FFFFFD, 0x3FFFFFD, 0x7FFFFFD, 0xFFFFFFD, 0x1FFFFFFD, 0x3FFFFFFD, 0x7FFFFFFD };
+
+static UNUSED_ATTR const U8 OF_bits[MaxOff+1] = {
+ 0, 1, 2, 3, 4, 5, 6, 7,
+ 8, 9, 10, 11, 12, 13, 14, 15,
+ 16, 17, 18, 19, 20, 21, 22, 23,
+ 24, 25, 26, 27, 28, 29, 30, 31 };
+
+static UNUSED_ATTR const U32 ML_base[MaxML+1] = {
+ 3, 4, 5, 6, 7, 8, 9, 10,
+ 11, 12, 13, 14, 15, 16, 17, 18,
+ 19, 20, 21, 22, 23, 24, 25, 26,
+ 27, 28, 29, 30, 31, 32, 33, 34,
+ 35, 37, 39, 41, 43, 47, 51, 59,
+ 67, 83, 99, 0x83, 0x103, 0x203, 0x403, 0x803,
+ 0x1003, 0x2003, 0x4003, 0x8003, 0x10003 };
+
+
+/*-*******************************************************
+ * Decompression types
+ *********************************************************/
+ typedef struct {
+ U32 fastMode;
+ U32 tableLog;
+ } ZSTD_seqSymbol_header;
+
+ typedef struct {
+ U16 nextState;
+ BYTE nbAdditionalBits;
+ BYTE nbBits;
+ U32 baseValue;
+ } ZSTD_seqSymbol;
+
+ #define SEQSYMBOL_TABLE_SIZE(log) (1 + (1 << (log)))
+
+#define ZSTD_BUILD_FSE_TABLE_WKSP_SIZE (sizeof(S16) * (MaxSeq + 1) + (1u << MaxFSELog) + sizeof(U64))
+#define ZSTD_BUILD_FSE_TABLE_WKSP_SIZE_U32 ((ZSTD_BUILD_FSE_TABLE_WKSP_SIZE + sizeof(U32) - 1) / sizeof(U32))
+
+typedef struct {
+ ZSTD_seqSymbol LLTable[SEQSYMBOL_TABLE_SIZE(LLFSELog)]; /* Note : Space reserved for FSE Tables */
+ ZSTD_seqSymbol OFTable[SEQSYMBOL_TABLE_SIZE(OffFSELog)]; /* is also used as temporary workspace while building hufTable during DDict creation */
+ ZSTD_seqSymbol MLTable[SEQSYMBOL_TABLE_SIZE(MLFSELog)]; /* and therefore must be at least HUF_DECOMPRESS_WORKSPACE_SIZE large */
+ HUF_DTable hufTable[HUF_DTABLE_SIZE(HufLog)]; /* can accommodate HUF_decompress4X */
+ U32 rep[ZSTD_REP_NUM];
+ U32 workspace[ZSTD_BUILD_FSE_TABLE_WKSP_SIZE_U32];
+} ZSTD_entropyDTables_t;
+
+typedef enum { ZSTDds_getFrameHeaderSize, ZSTDds_decodeFrameHeader,
+ ZSTDds_decodeBlockHeader, ZSTDds_decompressBlock,
+ ZSTDds_decompressLastBlock, ZSTDds_checkChecksum,
+ ZSTDds_decodeSkippableHeader, ZSTDds_skipFrame } ZSTD_dStage;
+
+typedef enum { zdss_init=0, zdss_loadHeader,
+ zdss_read, zdss_load, zdss_flush } ZSTD_dStreamStage;
+
+typedef enum {
+ ZSTD_use_indefinitely = -1, /* Use the dictionary indefinitely */
+ ZSTD_dont_use = 0, /* Do not use the dictionary (if one exists free it) */
+ ZSTD_use_once = 1 /* Use the dictionary once and set to ZSTD_dont_use */
+} ZSTD_dictUses_e;
+
+/* Hashset for storing references to multiple ZSTD_DDict within ZSTD_DCtx */
+typedef struct {
+ const ZSTD_DDict** ddictPtrTable;
+ size_t ddictPtrTableSize;
+ size_t ddictPtrCount;
+} ZSTD_DDictHashSet;
+
+#ifndef ZSTD_DECODER_INTERNAL_BUFFER
+# define ZSTD_DECODER_INTERNAL_BUFFER (1 << 16)
+#endif
+
+#define ZSTD_LBMIN 64
+#define ZSTD_LBMAX (128 << 10)
+
+/* extra buffer, compensates when dst is not large enough to store litBuffer */
+#define ZSTD_LITBUFFEREXTRASIZE BOUNDED(ZSTD_LBMIN, ZSTD_DECODER_INTERNAL_BUFFER, ZSTD_LBMAX)
+
+typedef enum {
+ ZSTD_not_in_dst = 0, /* Stored entirely within litExtraBuffer */
+ ZSTD_in_dst = 1, /* Stored entirely within dst (in memory after current output write) */
+ ZSTD_split = 2 /* Split between litExtraBuffer and dst */
+} ZSTD_litLocation_e;
+
+struct ZSTD_DCtx_s
+{
+ const ZSTD_seqSymbol* LLTptr;
+ const ZSTD_seqSymbol* MLTptr;
+ const ZSTD_seqSymbol* OFTptr;
+ const HUF_DTable* HUFptr;
+ ZSTD_entropyDTables_t entropy;
+ U32 workspace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32]; /* space needed when building huffman tables */
+ const void* previousDstEnd; /* detect continuity */
+ const void* prefixStart; /* start of current segment */
+ const void* virtualStart; /* virtual start of previous segment if it was just before current one */
+ const void* dictEnd; /* end of previous segment */
+ size_t expected;
+ ZSTD_frameHeader fParams;
+ U64 processedCSize;
+ U64 decodedSize;
+ blockType_e bType; /* used in ZSTD_decompressContinue(), store blockType between block header decoding and block decompression stages */
+ ZSTD_dStage stage;
+ U32 litEntropy;
+ U32 fseEntropy;
+ struct xxh64_state xxhState;
+ size_t headerSize;
+ ZSTD_format_e format;
+ ZSTD_forceIgnoreChecksum_e forceIgnoreChecksum; /* User specified: if == 1, will ignore checksums in compressed frame. Default == 0 */
+ U32 validateChecksum; /* if == 1, will validate checksum. Is == 1 if (fParams.checksumFlag == 1) and (forceIgnoreChecksum == 0). */
+ const BYTE* litPtr;
+ ZSTD_customMem customMem;
+ size_t litSize;
+ size_t rleSize;
+ size_t staticSize;
+#if DYNAMIC_BMI2 != 0
+ int bmi2; /* == 1 if the CPU supports BMI2 and 0 otherwise. CPU support is determined dynamically once per context lifetime. */
+#endif
+
+ /* dictionary */
+ ZSTD_DDict* ddictLocal;
+ const ZSTD_DDict* ddict; /* set by ZSTD_initDStream_usingDDict(), or ZSTD_DCtx_refDDict() */
+ U32 dictID;
+ int ddictIsCold; /* if == 1 : dictionary is "new" for working context, and presumed "cold" (not in cpu cache) */
+ ZSTD_dictUses_e dictUses;
+ ZSTD_DDictHashSet* ddictSet; /* Hash set for multiple ddicts */
+ ZSTD_refMultipleDDicts_e refMultipleDDicts; /* User specified: if == 1, will allow references to multiple DDicts. Default == 0 (disabled) */
+
+ /* streaming */
+ ZSTD_dStreamStage streamStage;
+ char* inBuff;
+ size_t inBuffSize;
+ size_t inPos;
+ size_t maxWindowSize;
+ char* outBuff;
+ size_t outBuffSize;
+ size_t outStart;
+ size_t outEnd;
+ size_t lhSize;
+ U32 hostageByte;
+ int noForwardProgress;
+ ZSTD_bufferMode_e outBufferMode;
+ ZSTD_outBuffer expectedOutBuffer;
+
+ /* workspace */
+ BYTE* litBuffer;
+ const BYTE* litBufferEnd;
+ ZSTD_litLocation_e litBufferLocation;
+ BYTE litExtraBuffer[ZSTD_LITBUFFEREXTRASIZE + WILDCOPY_OVERLENGTH]; /* literal buffer can be split between storage within dst and within this scratch buffer */
+ BYTE headerBuffer[ZSTD_FRAMEHEADERSIZE_MAX];
+
+ size_t oversizedDuration;
+
+#ifdef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
+ void const* dictContentBeginForFuzzing;
+ void const* dictContentEndForFuzzing;
+#endif
+
+ /* Tracing */
+}; /* typedef'd to ZSTD_DCtx within "zstd.h" */
+
+MEM_STATIC int ZSTD_DCtx_get_bmi2(const struct ZSTD_DCtx_s *dctx) {
+#if DYNAMIC_BMI2 != 0
+ return dctx->bmi2;
+#else
+ (void)dctx;
+ return 0;
+#endif
+}
+
+/*-*******************************************************
+ * Shared internal functions
+ *********************************************************/
+
+/*! ZSTD_loadDEntropy() :
+ * dict : must point at beginning of a valid zstd dictionary.
+ * @return : size of dictionary header (size of magic number + dict ID + entropy tables) */
+size_t ZSTD_loadDEntropy(ZSTD_entropyDTables_t* entropy,
+ const void* const dict, size_t const dictSize);
+
+/*! ZSTD_checkContinuity() :
+ * check if next `dst` follows previous position, where decompression ended.
+ * If yes, do nothing (continue on current segment).
+ * If not, classify previous segment as "external dictionary", and start a new segment.
+ * This function cannot fail. */
+void ZSTD_checkContinuity(ZSTD_DCtx* dctx, const void* dst, size_t dstSize);
+
+
+#endif /* ZSTD_DECOMPRESS_INTERNAL_H */
diff --git a/lib/zstd/decompress_sources.h b/lib/zstd/decompress_sources.h
new file mode 100644
index 000000000000..a06ca187aab5
--- /dev/null
+++ b/lib/zstd/decompress_sources.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause */
+/*
+ * Copyright (c) Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+/*
+ * This file includes every .c file needed for decompression.
+ * It is used by lib/decompress_unzstd.c to include the decompression
+ * source into the translation-unit, so it can be used for kernel
+ * decompression.
+ */
+
+/*
+ * Disable the ASM Huffman implementation because we need to
+ * include all the sources.
+ */
+#define ZSTD_DISABLE_ASM 1
+
+#include "common/debug.c"
+#include "common/entropy_common.c"
+#include "common/error_private.c"
+#include "common/fse_decompress.c"
+#include "common/zstd_common.c"
+#include "decompress/huf_decompress.c"
+#include "decompress/zstd_ddict.c"
+#include "decompress/zstd_decompress.c"
+#include "decompress/zstd_decompress_block.c"
+#include "zstd_decompress_module.c"
diff --git a/lib/zstd/entropy_common.c b/lib/zstd/entropy_common.c
deleted file mode 100644
index 2b0a643c32c4..000000000000
--- a/lib/zstd/entropy_common.c
+++ /dev/null
@@ -1,243 +0,0 @@
-/*
- * Common functions of New Generation Entropy library
- * Copyright (C) 2016, Yann Collet.
- *
- * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following disclaimer
- * in the documentation and/or other materials provided with the
- * distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * This program is free software; you can redistribute it and/or modify it under
- * the terms of the GNU General Public License version 2 as published by the
- * Free Software Foundation. This program is dual-licensed; you may select
- * either version 2 of the GNU General Public License ("GPL") or BSD license
- * ("BSD").
- *
- * You can contact the author at :
- * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
- */
-
-/* *************************************
-* Dependencies
-***************************************/
-#include "error_private.h" /* ERR_*, ERROR */
-#include "fse.h"
-#include "huf.h"
-#include "mem.h"
-
-/*=== Version ===*/
-unsigned FSE_versionNumber(void) { return FSE_VERSION_NUMBER; }
-
-/*=== Error Management ===*/
-unsigned FSE_isError(size_t code) { return ERR_isError(code); }
-
-unsigned HUF_isError(size_t code) { return ERR_isError(code); }
-
-/*-**************************************************************
-* FSE NCount encoding-decoding
-****************************************************************/
-size_t FSE_readNCount(short *normalizedCounter, unsigned *maxSVPtr, unsigned *tableLogPtr, const void *headerBuffer, size_t hbSize)
-{
- const BYTE *const istart = (const BYTE *)headerBuffer;
- const BYTE *const iend = istart + hbSize;
- const BYTE *ip = istart;
- int nbBits;
- int remaining;
- int threshold;
- U32 bitStream;
- int bitCount;
- unsigned charnum = 0;
- int previous0 = 0;
-
- if (hbSize < 4)
- return ERROR(srcSize_wrong);
- bitStream = ZSTD_readLE32(ip);
- nbBits = (bitStream & 0xF) + FSE_MIN_TABLELOG; /* extract tableLog */
- if (nbBits > FSE_TABLELOG_ABSOLUTE_MAX)
- return ERROR(tableLog_tooLarge);
- bitStream >>= 4;
- bitCount = 4;
- *tableLogPtr = nbBits;
- remaining = (1 << nbBits) + 1;
- threshold = 1 << nbBits;
- nbBits++;
-
- while ((remaining > 1) & (charnum <= *maxSVPtr)) {
- if (previous0) {
- unsigned n0 = charnum;
- while ((bitStream & 0xFFFF) == 0xFFFF) {
- n0 += 24;
- if (ip < iend - 5) {
- ip += 2;
- bitStream = ZSTD_readLE32(ip) >> bitCount;
- } else {
- bitStream >>= 16;
- bitCount += 16;
- }
- }
- while ((bitStream & 3) == 3) {
- n0 += 3;
- bitStream >>= 2;
- bitCount += 2;
- }
- n0 += bitStream & 3;
- bitCount += 2;
- if (n0 > *maxSVPtr)
- return ERROR(maxSymbolValue_tooSmall);
- while (charnum < n0)
- normalizedCounter[charnum++] = 0;
- if ((ip <= iend - 7) || (ip + (bitCount >> 3) <= iend - 4)) {
- ip += bitCount >> 3;
- bitCount &= 7;
- bitStream = ZSTD_readLE32(ip) >> bitCount;
- } else {
- bitStream >>= 2;
- }
- }
- {
- int const max = (2 * threshold - 1) - remaining;
- int count;
-
- if ((bitStream & (threshold - 1)) < (U32)max) {
- count = bitStream & (threshold - 1);
- bitCount += nbBits - 1;
- } else {
- count = bitStream & (2 * threshold - 1);
- if (count >= threshold)
- count -= max;
- bitCount += nbBits;
- }
-
- count--; /* extra accuracy */
- remaining -= count < 0 ? -count : count; /* -1 means +1 */
- normalizedCounter[charnum++] = (short)count;
- previous0 = !count;
- while (remaining < threshold) {
- nbBits--;
- threshold >>= 1;
- }
-
- if ((ip <= iend - 7) || (ip + (bitCount >> 3) <= iend - 4)) {
- ip += bitCount >> 3;
- bitCount &= 7;
- } else {
- bitCount -= (int)(8 * (iend - 4 - ip));
- ip = iend - 4;
- }
- bitStream = ZSTD_readLE32(ip) >> (bitCount & 31);
- }
- } /* while ((remaining>1) & (charnum<=*maxSVPtr)) */
- if (remaining != 1)
- return ERROR(corruption_detected);
- if (bitCount > 32)
- return ERROR(corruption_detected);
- *maxSVPtr = charnum - 1;
-
- ip += (bitCount + 7) >> 3;
- return ip - istart;
-}
-
-/*! HUF_readStats() :
- Read compact Huffman tree, saved by HUF_writeCTable().
- `huffWeight` is destination buffer.
- `rankStats` is assumed to be a table of at least HUF_TABLELOG_MAX U32.
- @return : size read from `src` , or an error Code .
- Note : Needed by HUF_readCTable() and HUF_readDTableX?() .
-*/
-size_t HUF_readStats_wksp(BYTE *huffWeight, size_t hwSize, U32 *rankStats, U32 *nbSymbolsPtr, U32 *tableLogPtr, const void *src, size_t srcSize, void *workspace, size_t workspaceSize)
-{
- U32 weightTotal;
- const BYTE *ip = (const BYTE *)src;
- size_t iSize;
- size_t oSize;
-
- if (!srcSize)
- return ERROR(srcSize_wrong);
- iSize = ip[0];
- /* memset(huffWeight, 0, hwSize); */ /* is not necessary, even though some analyzer complain ... */
-
- if (iSize >= 128) { /* special header */
- oSize = iSize - 127;
- iSize = ((oSize + 1) / 2);
- if (iSize + 1 > srcSize)
- return ERROR(srcSize_wrong);
- if (oSize >= hwSize)
- return ERROR(corruption_detected);
- ip += 1;
- {
- U32 n;
- for (n = 0; n < oSize; n += 2) {
- huffWeight[n] = ip[n / 2] >> 4;
- huffWeight[n + 1] = ip[n / 2] & 15;
- }
- }
- } else { /* header compressed with FSE (normal case) */
- if (iSize + 1 > srcSize)
- return ERROR(srcSize_wrong);
- oSize = FSE_decompress_wksp(huffWeight, hwSize - 1, ip + 1, iSize, 6, workspace, workspaceSize); /* max (hwSize-1) values decoded, as last one is implied */
- if (FSE_isError(oSize))
- return oSize;
- }
-
- /* collect weight stats */
- memset(rankStats, 0, (HUF_TABLELOG_MAX + 1) * sizeof(U32));
- weightTotal = 0;
- {
- U32 n;
- for (n = 0; n < oSize; n++) {
- if (huffWeight[n] >= HUF_TABLELOG_MAX)
- return ERROR(corruption_detected);
- rankStats[huffWeight[n]]++;
- weightTotal += (1 << huffWeight[n]) >> 1;
- }
- }
- if (weightTotal == 0)
- return ERROR(corruption_detected);
-
- /* get last non-null symbol weight (implied, total must be 2^n) */
- {
- U32 const tableLog = BIT_highbit32(weightTotal) + 1;
- if (tableLog > HUF_TABLELOG_MAX)
- return ERROR(corruption_detected);
- *tableLogPtr = tableLog;
- /* determine last weight */
- {
- U32 const total = 1 << tableLog;
- U32 const rest = total - weightTotal;
- U32 const verif = 1 << BIT_highbit32(rest);
- U32 const lastWeight = BIT_highbit32(rest) + 1;
- if (verif != rest)
- return ERROR(corruption_detected); /* last value must be a clean power of 2 */
- huffWeight[oSize] = (BYTE)lastWeight;
- rankStats[lastWeight]++;
- }
- }
-
- /* check tree construction validity */
- if ((rankStats[1] < 2) || (rankStats[1] & 1))
- return ERROR(corruption_detected); /* by construction : at least 2 elts of rank 1, must be even */
-
- /* results */
- *nbSymbolsPtr = (U32)(oSize + 1);
- return iSize + 1;
-}
diff --git a/lib/zstd/error_private.h b/lib/zstd/error_private.h
deleted file mode 100644
index 1a60b31f706c..000000000000
--- a/lib/zstd/error_private.h
+++ /dev/null
@@ -1,53 +0,0 @@
-/**
- * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
- * All rights reserved.
- *
- * This source code is licensed under the BSD-style license found in the
- * LICENSE file in the root directory of https://github.com/facebook/zstd.
- * An additional grant of patent rights can be found in the PATENTS file in the
- * same directory.
- *
- * This program is free software; you can redistribute it and/or modify it under
- * the terms of the GNU General Public License version 2 as published by the
- * Free Software Foundation. This program is dual-licensed; you may select
- * either version 2 of the GNU General Public License ("GPL") or BSD license
- * ("BSD").
- */
-
-/* Note : this module is expected to remain private, do not expose it */
-
-#ifndef ERROR_H_MODULE
-#define ERROR_H_MODULE
-
-/* ****************************************
-* Dependencies
-******************************************/
-#include <linux/types.h> /* size_t */
-#include <linux/zstd.h> /* enum list */
-
-/* ****************************************
-* Compiler-specific
-******************************************/
-#define ERR_STATIC static __attribute__((unused))
-
-/*-****************************************
-* Customization (error_public.h)
-******************************************/
-typedef ZSTD_ErrorCode ERR_enum;
-#define PREFIX(name) ZSTD_error_##name
-
-/*-****************************************
-* Error codes handling
-******************************************/
-#define ERROR(name) ((size_t)-PREFIX(name))
-
-ERR_STATIC unsigned ERR_isError(size_t code) { return (code > ERROR(maxCode)); }
-
-ERR_STATIC ERR_enum ERR_getErrorCode(size_t code)
-{
- if (!ERR_isError(code))
- return (ERR_enum)0;
- return (ERR_enum)(0 - code);
-}
-
-#endif /* ERROR_H_MODULE */
diff --git a/lib/zstd/fse.h b/lib/zstd/fse.h
deleted file mode 100644
index 7460ab04b191..000000000000
--- a/lib/zstd/fse.h
+++ /dev/null
@@ -1,575 +0,0 @@
-/*
- * FSE : Finite State Entropy codec
- * Public Prototypes declaration
- * Copyright (C) 2013-2016, Yann Collet.
- *
- * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following disclaimer
- * in the documentation and/or other materials provided with the
- * distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * This program is free software; you can redistribute it and/or modify it under
- * the terms of the GNU General Public License version 2 as published by the
- * Free Software Foundation. This program is dual-licensed; you may select
- * either version 2 of the GNU General Public License ("GPL") or BSD license
- * ("BSD").
- *
- * You can contact the author at :
- * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
- */
-#ifndef FSE_H
-#define FSE_H
-
-/*-*****************************************
-* Dependencies
-******************************************/
-#include <linux/types.h> /* size_t, ptrdiff_t */
-
-/*-*****************************************
-* FSE_PUBLIC_API : control library symbols visibility
-******************************************/
-#define FSE_PUBLIC_API
-
-/*------ Version ------*/
-#define FSE_VERSION_MAJOR 0
-#define FSE_VERSION_MINOR 9
-#define FSE_VERSION_RELEASE 0
-
-#define FSE_LIB_VERSION FSE_VERSION_MAJOR.FSE_VERSION_MINOR.FSE_VERSION_RELEASE
-#define FSE_QUOTE(str) #str
-#define FSE_EXPAND_AND_QUOTE(str) FSE_QUOTE(str)
-#define FSE_VERSION_STRING FSE_EXPAND_AND_QUOTE(FSE_LIB_VERSION)
-
-#define FSE_VERSION_NUMBER (FSE_VERSION_MAJOR * 100 * 100 + FSE_VERSION_MINOR * 100 + FSE_VERSION_RELEASE)
-FSE_PUBLIC_API unsigned FSE_versionNumber(void); /**< library version number; to be used when checking dll version */
-
-/*-*****************************************
-* Tool functions
-******************************************/
-FSE_PUBLIC_API size_t FSE_compressBound(size_t size); /* maximum compressed size */
-
-/* Error Management */
-FSE_PUBLIC_API unsigned FSE_isError(size_t code); /* tells if a return value is an error code */
-
-/*-*****************************************
-* FSE detailed API
-******************************************/
-/*!
-FSE_compress() does the following:
-1. count symbol occurrence from source[] into table count[]
-2. normalize counters so that sum(count[]) == Power_of_2 (2^tableLog)
-3. save normalized counters to memory buffer using writeNCount()
-4. build encoding table 'CTable' from normalized counters
-5. encode the data stream using encoding table 'CTable'
-
-FSE_decompress() does the following:
-1. read normalized counters with readNCount()
-2. build decoding table 'DTable' from normalized counters
-3. decode the data stream using decoding table 'DTable'
-
-The following API allows targeting specific sub-functions for advanced tasks.
-For example, it's possible to compress several blocks using the same 'CTable',
-or to save and provide normalized distribution using external method.
-*/
-
-/* *** COMPRESSION *** */
-/*! FSE_optimalTableLog():
- dynamically downsize 'tableLog' when conditions are met.
- It saves CPU time, by using smaller tables, while preserving or even improving compression ratio.
- @return : recommended tableLog (necessarily <= 'maxTableLog') */
-FSE_PUBLIC_API unsigned FSE_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue);
-
-/*! FSE_normalizeCount():
- normalize counts so that sum(count[]) == Power_of_2 (2^tableLog)
- 'normalizedCounter' is a table of short, of minimum size (maxSymbolValue+1).
- @return : tableLog,
- or an errorCode, which can be tested using FSE_isError() */
-FSE_PUBLIC_API size_t FSE_normalizeCount(short *normalizedCounter, unsigned tableLog, const unsigned *count, size_t srcSize, unsigned maxSymbolValue);
-
-/*! FSE_NCountWriteBound():
- Provides the maximum possible size of an FSE normalized table, given 'maxSymbolValue' and 'tableLog'.
- Typically useful for allocation purpose. */
-FSE_PUBLIC_API size_t FSE_NCountWriteBound(unsigned maxSymbolValue, unsigned tableLog);
-
-/*! FSE_writeNCount():
- Compactly save 'normalizedCounter' into 'buffer'.
- @return : size of the compressed table,
- or an errorCode, which can be tested using FSE_isError(). */
-FSE_PUBLIC_API size_t FSE_writeNCount(void *buffer, size_t bufferSize, const short *normalizedCounter, unsigned maxSymbolValue, unsigned tableLog);
-
-/*! Constructor and Destructor of FSE_CTable.
- Note that FSE_CTable size depends on 'tableLog' and 'maxSymbolValue' */
-typedef unsigned FSE_CTable; /* don't allocate that. It's only meant to be more restrictive than void* */
-
-/*! FSE_compress_usingCTable():
- Compress `src` using `ct` into `dst` which must be already allocated.
- @return : size of compressed data (<= `dstCapacity`),
- or 0 if compressed data could not fit into `dst`,
- or an errorCode, which can be tested using FSE_isError() */
-FSE_PUBLIC_API size_t FSE_compress_usingCTable(void *dst, size_t dstCapacity, const void *src, size_t srcSize, const FSE_CTable *ct);
-
-/*!
-Tutorial :
-----------
-The first step is to count all symbols. FSE_count() does this job very fast.
-Result will be saved into 'count', a table of unsigned int, which must be already allocated, and have 'maxSymbolValuePtr[0]+1' cells.
-'src' is a table of bytes of size 'srcSize'. All values within 'src' MUST be <= maxSymbolValuePtr[0]
-maxSymbolValuePtr[0] will be updated, with its real value (necessarily <= original value)
-FSE_count() will return the number of occurrence of the most frequent symbol.
-This can be used to know if there is a single symbol within 'src', and to quickly evaluate its compressibility.
-If there is an error, the function will return an ErrorCode (which can be tested using FSE_isError()).
-
-The next step is to normalize the frequencies.
-FSE_normalizeCount() will ensure that sum of frequencies is == 2 ^'tableLog'.
-It also guarantees a minimum of 1 to any Symbol with frequency >= 1.
-You can use 'tableLog'==0 to mean "use default tableLog value".
-If you are unsure of which tableLog value to use, you can ask FSE_optimalTableLog(),
-which will provide the optimal valid tableLog given sourceSize, maxSymbolValue, and a user-defined maximum (0 means "default").
-
-The result of FSE_normalizeCount() will be saved into a table,
-called 'normalizedCounter', which is a table of signed short.
-'normalizedCounter' must be already allocated, and have at least 'maxSymbolValue+1' cells.
-The return value is tableLog if everything proceeded as expected.
-It is 0 if there is a single symbol within distribution.
-If there is an error (ex: invalid tableLog value), the function will return an ErrorCode (which can be tested using FSE_isError()).
-
-'normalizedCounter' can be saved in a compact manner to a memory area using FSE_writeNCount().
-'buffer' must be already allocated.
-For guaranteed success, buffer size must be at least FSE_headerBound().
-The result of the function is the number of bytes written into 'buffer'.
-If there is an error, the function will return an ErrorCode (which can be tested using FSE_isError(); ex : buffer size too small).
-
-'normalizedCounter' can then be used to create the compression table 'CTable'.
-The space required by 'CTable' must be already allocated, using FSE_createCTable().
-You can then use FSE_buildCTable() to fill 'CTable'.
-If there is an error, both functions will return an ErrorCode (which can be tested using FSE_isError()).
-
-'CTable' can then be used to compress 'src', with FSE_compress_usingCTable().
-Similar to FSE_count(), the convention is that 'src' is assumed to be a table of char of size 'srcSize'
-The function returns the size of compressed data (without header), necessarily <= `dstCapacity`.
-If it returns '0', compressed data could not fit into 'dst'.
-If there is an error, the function will return an ErrorCode (which can be tested using FSE_isError()).
-*/
-
-/* *** DECOMPRESSION *** */
-
-/*! FSE_readNCount():
- Read compactly saved 'normalizedCounter' from 'rBuffer'.
- @return : size read from 'rBuffer',
- or an errorCode, which can be tested using FSE_isError().
- maxSymbolValuePtr[0] and tableLogPtr[0] will also be updated with their respective values */
-FSE_PUBLIC_API size_t FSE_readNCount(short *normalizedCounter, unsigned *maxSymbolValuePtr, unsigned *tableLogPtr, const void *rBuffer, size_t rBuffSize);
-
-/*! Constructor and Destructor of FSE_DTable.
- Note that its size depends on 'tableLog' */
-typedef unsigned FSE_DTable; /* don't allocate that. It's just a way to be more restrictive than void* */
-
-/*! FSE_buildDTable():
- Builds 'dt', which must be already allocated, using FSE_createDTable().
- return : 0, or an errorCode, which can be tested using FSE_isError() */
-FSE_PUBLIC_API size_t FSE_buildDTable_wksp(FSE_DTable *dt, const short *normalizedCounter, unsigned maxSymbolValue, unsigned tableLog, void *workspace, size_t workspaceSize);
-
-/*! FSE_decompress_usingDTable():
- Decompress compressed source `cSrc` of size `cSrcSize` using `dt`
- into `dst` which must be already allocated.
- @return : size of regenerated data (necessarily <= `dstCapacity`),
- or an errorCode, which can be tested using FSE_isError() */
-FSE_PUBLIC_API size_t FSE_decompress_usingDTable(void *dst, size_t dstCapacity, const void *cSrc, size_t cSrcSize, const FSE_DTable *dt);
-
-/*!
-Tutorial :
-----------
-(Note : these functions only decompress FSE-compressed blocks.
- If block is uncompressed, use memcpy() instead
- If block is a single repeated byte, use memset() instead )
-
-The first step is to obtain the normalized frequencies of symbols.
-This can be performed by FSE_readNCount() if it was saved using FSE_writeNCount().
-'normalizedCounter' must be already allocated, and have at least 'maxSymbolValuePtr[0]+1' cells of signed short.
-In practice, that means it's necessary to know 'maxSymbolValue' beforehand,
-or size the table to handle worst case situations (typically 256).
-FSE_readNCount() will provide 'tableLog' and 'maxSymbolValue'.
-The result of FSE_readNCount() is the number of bytes read from 'rBuffer'.
-Note that 'rBufferSize' must be at least 4 bytes, even if useful information is less than that.
-If there is an error, the function will return an error code, which can be tested using FSE_isError().
-
-The next step is to build the decompression tables 'FSE_DTable' from 'normalizedCounter'.
-This is performed by the function FSE_buildDTable().
-The space required by 'FSE_DTable' must be already allocated using FSE_createDTable().
-If there is an error, the function will return an error code, which can be tested using FSE_isError().
-
-`FSE_DTable` can then be used to decompress `cSrc`, with FSE_decompress_usingDTable().
-`cSrcSize` must be strictly correct, otherwise decompression will fail.
-FSE_decompress_usingDTable() result will tell how many bytes were regenerated (<=`dstCapacity`).
-If there is an error, the function will return an error code, which can be tested using FSE_isError(). (ex: dst buffer too small)
-*/
-
-/* *** Dependency *** */
-#include "bitstream.h"
-
-/* *****************************************
-* Static allocation
-*******************************************/
-/* FSE buffer bounds */
-#define FSE_NCOUNTBOUND 512
-#define FSE_BLOCKBOUND(size) (size + (size >> 7))
-#define FSE_COMPRESSBOUND(size) (FSE_NCOUNTBOUND + FSE_BLOCKBOUND(size)) /* Macro version, useful for static allocation */
-
-/* It is possible to statically allocate FSE CTable/DTable as a table of FSE_CTable/FSE_DTable using below macros */
-#define FSE_CTABLE_SIZE_U32(maxTableLog, maxSymbolValue) (1 + (1 << (maxTableLog - 1)) + ((maxSymbolValue + 1) * 2))
-#define FSE_DTABLE_SIZE_U32(maxTableLog) (1 + (1 << maxTableLog))
-
-/* *****************************************
-* FSE advanced API
-*******************************************/
-/* FSE_count_wksp() :
- * Same as FSE_count(), but using an externally provided scratch buffer.
- * `workSpace` size must be table of >= `1024` unsigned
- */
-size_t FSE_count_wksp(unsigned *count, unsigned *maxSymbolValuePtr, const void *source, size_t sourceSize, unsigned *workSpace);
-
-/* FSE_countFast_wksp() :
- * Same as FSE_countFast(), but using an externally provided scratch buffer.
- * `workSpace` must be a table of minimum `1024` unsigned
- */
-size_t FSE_countFast_wksp(unsigned *count, unsigned *maxSymbolValuePtr, const void *src, size_t srcSize, unsigned *workSpace);
-
-/*! FSE_count_simple
- * Same as FSE_countFast(), but does not use any additional memory (not even on stack).
- * This function is unsafe, and will segfault if any value within `src` is `> *maxSymbolValuePtr` (presuming it's also the size of `count`).
-*/
-size_t FSE_count_simple(unsigned *count, unsigned *maxSymbolValuePtr, const void *src, size_t srcSize);
-
-unsigned FSE_optimalTableLog_internal(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue, unsigned minus);
-/**< same as FSE_optimalTableLog(), which used `minus==2` */
-
-size_t FSE_buildCTable_raw(FSE_CTable *ct, unsigned nbBits);
-/**< build a fake FSE_CTable, designed for a flat distribution, where each symbol uses nbBits */
-
-size_t FSE_buildCTable_rle(FSE_CTable *ct, unsigned char symbolValue);
-/**< build a fake FSE_CTable, designed to compress always the same symbolValue */
-
-/* FSE_buildCTable_wksp() :
- * Same as FSE_buildCTable(), but using an externally allocated scratch buffer (`workSpace`).
- * `wkspSize` must be >= `(1<<tableLog)`.
- */
-size_t FSE_buildCTable_wksp(FSE_CTable *ct, const short *normalizedCounter, unsigned maxSymbolValue, unsigned tableLog, void *workSpace, size_t wkspSize);
-
-size_t FSE_buildDTable_raw(FSE_DTable *dt, unsigned nbBits);
-/**< build a fake FSE_DTable, designed to read a flat distribution where each symbol uses nbBits */
-
-size_t FSE_buildDTable_rle(FSE_DTable *dt, unsigned char symbolValue);
-/**< build a fake FSE_DTable, designed to always generate the same symbolValue */
-
-size_t FSE_decompress_wksp(void *dst, size_t dstCapacity, const void *cSrc, size_t cSrcSize, unsigned maxLog, void *workspace, size_t workspaceSize);
-/**< same as FSE_decompress(), using an externally allocated `workSpace` produced with `FSE_DTABLE_SIZE_U32(maxLog)` */
-
-/* *****************************************
-* FSE symbol compression API
-*******************************************/
-/*!
- This API consists of small unitary functions, which highly benefit from being inlined.
- Hence their body are included in next section.
-*/
-typedef struct {
- ptrdiff_t value;
- const void *stateTable;
- const void *symbolTT;
- unsigned stateLog;
-} FSE_CState_t;
-
-static void FSE_initCState(FSE_CState_t *CStatePtr, const FSE_CTable *ct);
-
-static void FSE_encodeSymbol(BIT_CStream_t *bitC, FSE_CState_t *CStatePtr, unsigned symbol);
-
-static void FSE_flushCState(BIT_CStream_t *bitC, const FSE_CState_t *CStatePtr);
-
-/**<
-These functions are inner components of FSE_compress_usingCTable().
-They allow the creation of custom streams, mixing multiple tables and bit sources.
-
-A key property to keep in mind is that encoding and decoding are done **in reverse direction**.
-So the first symbol you will encode is the last you will decode, like a LIFO stack.
-
-You will need a few variables to track your CStream. They are :
-
-FSE_CTable ct; // Provided by FSE_buildCTable()
-BIT_CStream_t bitStream; // bitStream tracking structure
-FSE_CState_t state; // State tracking structure (can have several)
-
-
-The first thing to do is to init bitStream and state.
- size_t errorCode = BIT_initCStream(&bitStream, dstBuffer, maxDstSize);
- FSE_initCState(&state, ct);
-
-Note that BIT_initCStream() can produce an error code, so its result should be tested, using FSE_isError();
-You can then encode your input data, byte after byte.
-FSE_encodeSymbol() outputs a maximum of 'tableLog' bits at a time.
-Remember decoding will be done in reverse direction.
- FSE_encodeByte(&bitStream, &state, symbol);
-
-At any time, you can also add any bit sequence.
-Note : maximum allowed nbBits is 25, for compatibility with 32-bits decoders
- BIT_addBits(&bitStream, bitField, nbBits);
-
-The above methods don't commit data to memory, they just store it into local register, for speed.
-Local register size is 64-bits on 64-bits systems, 32-bits on 32-bits systems (size_t).
-Writing data to memory is a manual operation, performed by the flushBits function.
- BIT_flushBits(&bitStream);
-
-Your last FSE encoding operation shall be to flush your last state value(s).
- FSE_flushState(&bitStream, &state);
-
-Finally, you must close the bitStream.
-The function returns the size of CStream in bytes.
-If data couldn't fit into dstBuffer, it will return a 0 ( == not compressible)
-If there is an error, it returns an errorCode (which can be tested using FSE_isError()).
- size_t size = BIT_closeCStream(&bitStream);
-*/
-
-/* *****************************************
-* FSE symbol decompression API
-*******************************************/
-typedef struct {
- size_t state;
- const void *table; /* precise table may vary, depending on U16 */
-} FSE_DState_t;
-
-static void FSE_initDState(FSE_DState_t *DStatePtr, BIT_DStream_t *bitD, const FSE_DTable *dt);
-
-static unsigned char FSE_decodeSymbol(FSE_DState_t *DStatePtr, BIT_DStream_t *bitD);
-
-static unsigned FSE_endOfDState(const FSE_DState_t *DStatePtr);
-
-/**<
-Let's now decompose FSE_decompress_usingDTable() into its unitary components.
-You will decode FSE-encoded symbols from the bitStream,
-and also any other bitFields you put in, **in reverse order**.
-
-You will need a few variables to track your bitStream. They are :
-
-BIT_DStream_t DStream; // Stream context
-FSE_DState_t DState; // State context. Multiple ones are possible
-FSE_DTable* DTablePtr; // Decoding table, provided by FSE_buildDTable()
-
-The first thing to do is to init the bitStream.
- errorCode = BIT_initDStream(&DStream, srcBuffer, srcSize);
-
-You should then retrieve your initial state(s)
-(in reverse flushing order if you have several ones) :
- errorCode = FSE_initDState(&DState, &DStream, DTablePtr);
-
-You can then decode your data, symbol after symbol.
-For information the maximum number of bits read by FSE_decodeSymbol() is 'tableLog'.
-Keep in mind that symbols are decoded in reverse order, like a LIFO stack (last in, first out).
- unsigned char symbol = FSE_decodeSymbol(&DState, &DStream);
-
-You can retrieve any bitfield you eventually stored into the bitStream (in reverse order)
-Note : maximum allowed nbBits is 25, for 32-bits compatibility
- size_t bitField = BIT_readBits(&DStream, nbBits);
-
-All above operations only read from local register (which size depends on size_t).
-Refueling the register from memory is manually performed by the reload method.
- endSignal = FSE_reloadDStream(&DStream);
-
-BIT_reloadDStream() result tells if there is still some more data to read from DStream.
-BIT_DStream_unfinished : there is still some data left into the DStream.
-BIT_DStream_endOfBuffer : Dstream reached end of buffer. Its container may no longer be completely filled.
-BIT_DStream_completed : Dstream reached its exact end, corresponding in general to decompression completed.
-BIT_DStream_tooFar : Dstream went too far. Decompression result is corrupted.
-
-When reaching end of buffer (BIT_DStream_endOfBuffer), progress slowly, notably if you decode multiple symbols per loop,
-to properly detect the exact end of stream.
-After each decoded symbol, check if DStream is fully consumed using this simple test :
- BIT_reloadDStream(&DStream) >= BIT_DStream_completed
-
-When it's done, verify decompression is fully completed, by checking both DStream and the relevant states.
-Checking if DStream has reached its end is performed by :
- BIT_endOfDStream(&DStream);
-Check also the states. There might be some symbols left there, if some high probability ones (>50%) are possible.
- FSE_endOfDState(&DState);
-*/
-
-/* *****************************************
-* FSE unsafe API
-*******************************************/
-static unsigned char FSE_decodeSymbolFast(FSE_DState_t *DStatePtr, BIT_DStream_t *bitD);
-/* faster, but works only if nbBits is always >= 1 (otherwise, result will be corrupted) */
-
-/* *****************************************
-* Implementation of inlined functions
-*******************************************/
-typedef struct {
- int deltaFindState;
- U32 deltaNbBits;
-} FSE_symbolCompressionTransform; /* total 8 bytes */
-
-ZSTD_STATIC void FSE_initCState(FSE_CState_t *statePtr, const FSE_CTable *ct)
-{
- const void *ptr = ct;
- const U16 *u16ptr = (const U16 *)ptr;
- const U32 tableLog = ZSTD_read16(ptr);
- statePtr->value = (ptrdiff_t)1 << tableLog;
- statePtr->stateTable = u16ptr + 2;
- statePtr->symbolTT = ((const U32 *)ct + 1 + (tableLog ? (1 << (tableLog - 1)) : 1));
- statePtr->stateLog = tableLog;
-}
-
-/*! FSE_initCState2() :
-* Same as FSE_initCState(), but the first symbol to include (which will be the last to be read)
-* uses the smallest state value possible, saving the cost of this symbol */
-ZSTD_STATIC void FSE_initCState2(FSE_CState_t *statePtr, const FSE_CTable *ct, U32 symbol)
-{
- FSE_initCState(statePtr, ct);
- {
- const FSE_symbolCompressionTransform symbolTT = ((const FSE_symbolCompressionTransform *)(statePtr->symbolTT))[symbol];
- const U16 *stateTable = (const U16 *)(statePtr->stateTable);
- U32 nbBitsOut = (U32)((symbolTT.deltaNbBits + (1 << 15)) >> 16);
- statePtr->value = (nbBitsOut << 16) - symbolTT.deltaNbBits;
- statePtr->value = stateTable[(statePtr->value >> nbBitsOut) + symbolTT.deltaFindState];
- }
-}
-
-ZSTD_STATIC void FSE_encodeSymbol(BIT_CStream_t *bitC, FSE_CState_t *statePtr, U32 symbol)
-{
- const FSE_symbolCompressionTransform symbolTT = ((const FSE_symbolCompressionTransform *)(statePtr->symbolTT))[symbol];
- const U16 *const stateTable = (const U16 *)(statePtr->stateTable);
- U32 nbBitsOut = (U32)((statePtr->value + symbolTT.deltaNbBits) >> 16);
- BIT_addBits(bitC, statePtr->value, nbBitsOut);
- statePtr->value = stateTable[(statePtr->value >> nbBitsOut) + symbolTT.deltaFindState];
-}
-
-ZSTD_STATIC void FSE_flushCState(BIT_CStream_t *bitC, const FSE_CState_t *statePtr)
-{
- BIT_addBits(bitC, statePtr->value, statePtr->stateLog);
- BIT_flushBits(bitC);
-}
-
-/* ====== Decompression ====== */
-
-typedef struct {
- U16 tableLog;
- U16 fastMode;
-} FSE_DTableHeader; /* sizeof U32 */
-
-typedef struct {
- unsigned short newState;
- unsigned char symbol;
- unsigned char nbBits;
-} FSE_decode_t; /* size == U32 */
-
-ZSTD_STATIC void FSE_initDState(FSE_DState_t *DStatePtr, BIT_DStream_t *bitD, const FSE_DTable *dt)
-{
- const void *ptr = dt;
- const FSE_DTableHeader *const DTableH = (const FSE_DTableHeader *)ptr;
- DStatePtr->state = BIT_readBits(bitD, DTableH->tableLog);
- BIT_reloadDStream(bitD);
- DStatePtr->table = dt + 1;
-}
-
-ZSTD_STATIC BYTE FSE_peekSymbol(const FSE_DState_t *DStatePtr)
-{
- FSE_decode_t const DInfo = ((const FSE_decode_t *)(DStatePtr->table))[DStatePtr->state];
- return DInfo.symbol;
-}
-
-ZSTD_STATIC void FSE_updateState(FSE_DState_t *DStatePtr, BIT_DStream_t *bitD)
-{
- FSE_decode_t const DInfo = ((const FSE_decode_t *)(DStatePtr->table))[DStatePtr->state];
- U32 const nbBits = DInfo.nbBits;
- size_t const lowBits = BIT_readBits(bitD, nbBits);
- DStatePtr->state = DInfo.newState + lowBits;
-}
-
-ZSTD_STATIC BYTE FSE_decodeSymbol(FSE_DState_t *DStatePtr, BIT_DStream_t *bitD)
-{
- FSE_decode_t const DInfo = ((const FSE_decode_t *)(DStatePtr->table))[DStatePtr->state];
- U32 const nbBits = DInfo.nbBits;
- BYTE const symbol = DInfo.symbol;
- size_t const lowBits = BIT_readBits(bitD, nbBits);
-
- DStatePtr->state = DInfo.newState + lowBits;
- return symbol;
-}
-
-/*! FSE_decodeSymbolFast() :
- unsafe, only works if no symbol has a probability > 50% */
-ZSTD_STATIC BYTE FSE_decodeSymbolFast(FSE_DState_t *DStatePtr, BIT_DStream_t *bitD)
-{
- FSE_decode_t const DInfo = ((const FSE_decode_t *)(DStatePtr->table))[DStatePtr->state];
- U32 const nbBits = DInfo.nbBits;
- BYTE const symbol = DInfo.symbol;
- size_t const lowBits = BIT_readBitsFast(bitD, nbBits);
-
- DStatePtr->state = DInfo.newState + lowBits;
- return symbol;
-}
-
-ZSTD_STATIC unsigned FSE_endOfDState(const FSE_DState_t *DStatePtr) { return DStatePtr->state == 0; }
-
-/* **************************************************************
-* Tuning parameters
-****************************************************************/
-/*!MEMORY_USAGE :
-* Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.)
-* Increasing memory usage improves compression ratio
-* Reduced memory usage can improve speed, due to cache effect
-* Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */
-#ifndef FSE_MAX_MEMORY_USAGE
-#define FSE_MAX_MEMORY_USAGE 14
-#endif
-#ifndef FSE_DEFAULT_MEMORY_USAGE
-#define FSE_DEFAULT_MEMORY_USAGE 13
-#endif
-
-/*!FSE_MAX_SYMBOL_VALUE :
-* Maximum symbol value authorized.
-* Required for proper stack allocation */
-#ifndef FSE_MAX_SYMBOL_VALUE
-#define FSE_MAX_SYMBOL_VALUE 255
-#endif
-
-/* **************************************************************
-* template functions type & suffix
-****************************************************************/
-#define FSE_FUNCTION_TYPE BYTE
-#define FSE_FUNCTION_EXTENSION
-#define FSE_DECODE_TYPE FSE_decode_t
-
-/* ***************************************************************
-* Constants
-*****************************************************************/
-#define FSE_MAX_TABLELOG (FSE_MAX_MEMORY_USAGE - 2)
-#define FSE_MAX_TABLESIZE (1U << FSE_MAX_TABLELOG)
-#define FSE_MAXTABLESIZE_MASK (FSE_MAX_TABLESIZE - 1)
-#define FSE_DEFAULT_TABLELOG (FSE_DEFAULT_MEMORY_USAGE - 2)
-#define FSE_MIN_TABLELOG 5
-
-#define FSE_TABLELOG_ABSOLUTE_MAX 15
-#if FSE_MAX_TABLELOG > FSE_TABLELOG_ABSOLUTE_MAX
-#error "FSE_MAX_TABLELOG > FSE_TABLELOG_ABSOLUTE_MAX is not supported"
-#endif
-
-#define FSE_TABLESTEP(tableSize) ((tableSize >> 1) + (tableSize >> 3) + 3)
-
-#endif /* FSE_H */
diff --git a/lib/zstd/fse_compress.c b/lib/zstd/fse_compress.c
deleted file mode 100644
index ef3d1741d532..000000000000
--- a/lib/zstd/fse_compress.c
+++ /dev/null
@@ -1,795 +0,0 @@
-/*
- * FSE : Finite State Entropy encoder
- * Copyright (C) 2013-2015, Yann Collet.
- *
- * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following disclaimer
- * in the documentation and/or other materials provided with the
- * distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * This program is free software; you can redistribute it and/or modify it under
- * the terms of the GNU General Public License version 2 as published by the
- * Free Software Foundation. This program is dual-licensed; you may select
- * either version 2 of the GNU General Public License ("GPL") or BSD license
- * ("BSD").
- *
- * You can contact the author at :
- * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
- */
-
-/* **************************************************************
-* Compiler specifics
-****************************************************************/
-#define FORCE_INLINE static __always_inline
-
-/* **************************************************************
-* Includes
-****************************************************************/
-#include "bitstream.h"
-#include "fse.h"
-#include <linux/compiler.h>
-#include <linux/kernel.h>
-#include <linux/math64.h>
-#include <linux/string.h> /* memcpy, memset */
-
-/* **************************************************************
-* Error Management
-****************************************************************/
-#define FSE_STATIC_ASSERT(c) \
- { \
- enum { FSE_static_assert = 1 / (int)(!!(c)) }; \
- } /* use only *after* variable declarations */
-
-/* **************************************************************
-* Templates
-****************************************************************/
-/*
- designed to be included
- for type-specific functions (template emulation in C)
- Objective is to write these functions only once, for improved maintenance
-*/
-
-/* safety checks */
-#ifndef FSE_FUNCTION_EXTENSION
-#error "FSE_FUNCTION_EXTENSION must be defined"
-#endif
-#ifndef FSE_FUNCTION_TYPE
-#error "FSE_FUNCTION_TYPE must be defined"
-#endif
-
-/* Function names */
-#define FSE_CAT(X, Y) X##Y
-#define FSE_FUNCTION_NAME(X, Y) FSE_CAT(X, Y)
-#define FSE_TYPE_NAME(X, Y) FSE_CAT(X, Y)
-
-/* Function templates */
-
-/* FSE_buildCTable_wksp() :
- * Same as FSE_buildCTable(), but using an externally allocated scratch buffer (`workSpace`).
- * wkspSize should be sized to handle worst case situation, which is `1<<max_tableLog * sizeof(FSE_FUNCTION_TYPE)`
- * workSpace must also be properly aligned with FSE_FUNCTION_TYPE requirements
- */
-size_t FSE_buildCTable_wksp(FSE_CTable *ct, const short *normalizedCounter, unsigned maxSymbolValue, unsigned tableLog, void *workspace, size_t workspaceSize)
-{
- U32 const tableSize = 1 << tableLog;
- U32 const tableMask = tableSize - 1;
- void *const ptr = ct;
- U16 *const tableU16 = ((U16 *)ptr) + 2;
- void *const FSCT = ((U32 *)ptr) + 1 /* header */ + (tableLog ? tableSize >> 1 : 1);
- FSE_symbolCompressionTransform *const symbolTT = (FSE_symbolCompressionTransform *)(FSCT);
- U32 const step = FSE_TABLESTEP(tableSize);
- U32 highThreshold = tableSize - 1;
-
- U32 *cumul;
- FSE_FUNCTION_TYPE *tableSymbol;
- size_t spaceUsed32 = 0;
-
- cumul = (U32 *)workspace + spaceUsed32;
- spaceUsed32 += FSE_MAX_SYMBOL_VALUE + 2;
- tableSymbol = (FSE_FUNCTION_TYPE *)((U32 *)workspace + spaceUsed32);
- spaceUsed32 += ALIGN(sizeof(FSE_FUNCTION_TYPE) * ((size_t)1 << tableLog), sizeof(U32)) >> 2;
-
- if ((spaceUsed32 << 2) > workspaceSize)
- return ERROR(tableLog_tooLarge);
- workspace = (U32 *)workspace + spaceUsed32;
- workspaceSize -= (spaceUsed32 << 2);
-
- /* CTable header */
- tableU16[-2] = (U16)tableLog;
- tableU16[-1] = (U16)maxSymbolValue;
-
- /* For explanations on how to distribute symbol values over the table :
- * http://fastcompression.blogspot.fr/2014/02/fse-distributing-symbol-values.html */
-
- /* symbol start positions */
- {
- U32 u;
- cumul[0] = 0;
- for (u = 1; u <= maxSymbolValue + 1; u++) {
- if (normalizedCounter[u - 1] == -1) { /* Low proba symbol */
- cumul[u] = cumul[u - 1] + 1;
- tableSymbol[highThreshold--] = (FSE_FUNCTION_TYPE)(u - 1);
- } else {
- cumul[u] = cumul[u - 1] + normalizedCounter[u - 1];
- }
- }
- cumul[maxSymbolValue + 1] = tableSize + 1;
- }
-
- /* Spread symbols */
- {
- U32 position = 0;
- U32 symbol;
- for (symbol = 0; symbol <= maxSymbolValue; symbol++) {
- int nbOccurences;
- for (nbOccurences = 0; nbOccurences < normalizedCounter[symbol]; nbOccurences++) {
- tableSymbol[position] = (FSE_FUNCTION_TYPE)symbol;
- position = (position + step) & tableMask;
- while (position > highThreshold)
- position = (position + step) & tableMask; /* Low proba area */
- }
- }
-
- if (position != 0)
- return ERROR(GENERIC); /* Must have gone through all positions */
- }
-
- /* Build table */
- {
- U32 u;
- for (u = 0; u < tableSize; u++) {
- FSE_FUNCTION_TYPE s = tableSymbol[u]; /* note : static analyzer may not understand tableSymbol is properly initialized */
- tableU16[cumul[s]++] = (U16)(tableSize + u); /* TableU16 : sorted by symbol order; gives next state value */
- }
- }
-
- /* Build Symbol Transformation Table */
- {
- unsigned total = 0;
- unsigned s;
- for (s = 0; s <= maxSymbolValue; s++) {
- switch (normalizedCounter[s]) {
- case 0: break;
-
- case -1:
- case 1:
- symbolTT[s].deltaNbBits = (tableLog << 16) - (1 << tableLog);
- symbolTT[s].deltaFindState = total - 1;
- total++;
- break;
- default: {
- U32 const maxBitsOut = tableLog - BIT_highbit32(normalizedCounter[s] - 1);
- U32 const minStatePlus = normalizedCounter[s] << maxBitsOut;
- symbolTT[s].deltaNbBits = (maxBitsOut << 16) - minStatePlus;
- symbolTT[s].deltaFindState = total - normalizedCounter[s];
- total += normalizedCounter[s];
- }
- }
- }
- }
-
- return 0;
-}
-
-/*-**************************************************************
-* FSE NCount encoding-decoding
-****************************************************************/
-size_t FSE_NCountWriteBound(unsigned maxSymbolValue, unsigned tableLog)
-{
- size_t const maxHeaderSize = (((maxSymbolValue + 1) * tableLog) >> 3) + 3;
- return maxSymbolValue ? maxHeaderSize : FSE_NCOUNTBOUND; /* maxSymbolValue==0 ? use default */
-}
-
-static size_t FSE_writeNCount_generic(void *header, size_t headerBufferSize, const short *normalizedCounter, unsigned maxSymbolValue, unsigned tableLog,
- unsigned writeIsSafe)
-{
- BYTE *const ostart = (BYTE *)header;
- BYTE *out = ostart;
- BYTE *const oend = ostart + headerBufferSize;
- int nbBits;
- const int tableSize = 1 << tableLog;
- int remaining;
- int threshold;
- U32 bitStream;
- int bitCount;
- unsigned charnum = 0;
- int previous0 = 0;
-
- bitStream = 0;
- bitCount = 0;
- /* Table Size */
- bitStream += (tableLog - FSE_MIN_TABLELOG) << bitCount;
- bitCount += 4;
-
- /* Init */
- remaining = tableSize + 1; /* +1 for extra accuracy */
- threshold = tableSize;
- nbBits = tableLog + 1;
-
- while (remaining > 1) { /* stops at 1 */
- if (previous0) {
- unsigned start = charnum;
- while (!normalizedCounter[charnum])
- charnum++;
- while (charnum >= start + 24) {
- start += 24;
- bitStream += 0xFFFFU << bitCount;
- if ((!writeIsSafe) && (out > oend - 2))
- return ERROR(dstSize_tooSmall); /* Buffer overflow */
- out[0] = (BYTE)bitStream;
- out[1] = (BYTE)(bitStream >> 8);
- out += 2;
- bitStream >>= 16;
- }
- while (charnum >= start + 3) {
- start += 3;
- bitStream += 3 << bitCount;
- bitCount += 2;
- }
- bitStream += (charnum - start) << bitCount;
- bitCount += 2;
- if (bitCount > 16) {
- if ((!writeIsSafe) && (out > oend - 2))
- return ERROR(dstSize_tooSmall); /* Buffer overflow */
- out[0] = (BYTE)bitStream;
- out[1] = (BYTE)(bitStream >> 8);
- out += 2;
- bitStream >>= 16;
- bitCount -= 16;
- }
- }
- {
- int count = normalizedCounter[charnum++];
- int const max = (2 * threshold - 1) - remaining;
- remaining -= count < 0 ? -count : count;
- count++; /* +1 for extra accuracy */
- if (count >= threshold)
- count += max; /* [0..max[ [max..threshold[ (...) [threshold+max 2*threshold[ */
- bitStream += count << bitCount;
- bitCount += nbBits;
- bitCount -= (count < max);
- previous0 = (count == 1);
- if (remaining < 1)
- return ERROR(GENERIC);
- while (remaining < threshold)
- nbBits--, threshold >>= 1;
- }
- if (bitCount > 16) {
- if ((!writeIsSafe) && (out > oend - 2))
- return ERROR(dstSize_tooSmall); /* Buffer overflow */
- out[0] = (BYTE)bitStream;
- out[1] = (BYTE)(bitStream >> 8);
- out += 2;
- bitStream >>= 16;
- bitCount -= 16;
- }
- }
-
- /* flush remaining bitStream */
- if ((!writeIsSafe) && (out > oend - 2))
- return ERROR(dstSize_tooSmall); /* Buffer overflow */
- out[0] = (BYTE)bitStream;
- out[1] = (BYTE)(bitStream >> 8);
- out += (bitCount + 7) / 8;
-
- if (charnum > maxSymbolValue + 1)
- return ERROR(GENERIC);
-
- return (out - ostart);
-}
-
-size_t FSE_writeNCount(void *buffer, size_t bufferSize, const short *normalizedCounter, unsigned maxSymbolValue, unsigned tableLog)
-{
- if (tableLog > FSE_MAX_TABLELOG)
- return ERROR(tableLog_tooLarge); /* Unsupported */
- if (tableLog < FSE_MIN_TABLELOG)
- return ERROR(GENERIC); /* Unsupported */
-
- if (bufferSize < FSE_NCountWriteBound(maxSymbolValue, tableLog))
- return FSE_writeNCount_generic(buffer, bufferSize, normalizedCounter, maxSymbolValue, tableLog, 0);
-
- return FSE_writeNCount_generic(buffer, bufferSize, normalizedCounter, maxSymbolValue, tableLog, 1);
-}
-
-/*-**************************************************************
-* Counting histogram
-****************************************************************/
-/*! FSE_count_simple
- This function counts byte values within `src`, and store the histogram into table `count`.
- It doesn't use any additional memory.
- But this function is unsafe : it doesn't check that all values within `src` can fit into `count`.
- For this reason, prefer using a table `count` with 256 elements.
- @return : count of most numerous element
-*/
-size_t FSE_count_simple(unsigned *count, unsigned *maxSymbolValuePtr, const void *src, size_t srcSize)
-{
- const BYTE *ip = (const BYTE *)src;
- const BYTE *const end = ip + srcSize;
- unsigned maxSymbolValue = *maxSymbolValuePtr;
- unsigned max = 0;
-
- memset(count, 0, (maxSymbolValue + 1) * sizeof(*count));
- if (srcSize == 0) {
- *maxSymbolValuePtr = 0;
- return 0;
- }
-
- while (ip < end)
- count[*ip++]++;
-
- while (!count[maxSymbolValue])
- maxSymbolValue--;
- *maxSymbolValuePtr = maxSymbolValue;
-
- {
- U32 s;
- for (s = 0; s <= maxSymbolValue; s++)
- if (count[s] > max)
- max = count[s];
- }
-
- return (size_t)max;
-}
-
-/* FSE_count_parallel_wksp() :
- * Same as FSE_count_parallel(), but using an externally provided scratch buffer.
- * `workSpace` size must be a minimum of `1024 * sizeof(unsigned)`` */
-static size_t FSE_count_parallel_wksp(unsigned *count, unsigned *maxSymbolValuePtr, const void *source, size_t sourceSize, unsigned checkMax,
- unsigned *const workSpace)
-{
- const BYTE *ip = (const BYTE *)source;
- const BYTE *const iend = ip + sourceSize;
- unsigned maxSymbolValue = *maxSymbolValuePtr;
- unsigned max = 0;
- U32 *const Counting1 = workSpace;
- U32 *const Counting2 = Counting1 + 256;
- U32 *const Counting3 = Counting2 + 256;
- U32 *const Counting4 = Counting3 + 256;
-
- memset(Counting1, 0, 4 * 256 * sizeof(unsigned));
-
- /* safety checks */
- if (!sourceSize) {
- memset(count, 0, maxSymbolValue + 1);
- *maxSymbolValuePtr = 0;
- return 0;
- }
- if (!maxSymbolValue)
- maxSymbolValue = 255; /* 0 == default */
-
- /* by stripes of 16 bytes */
- {
- U32 cached = ZSTD_read32(ip);
- ip += 4;
- while (ip < iend - 15) {
- U32 c = cached;
- cached = ZSTD_read32(ip);
- ip += 4;
- Counting1[(BYTE)c]++;
- Counting2[(BYTE)(c >> 8)]++;
- Counting3[(BYTE)(c >> 16)]++;
- Counting4[c >> 24]++;
- c = cached;
- cached = ZSTD_read32(ip);
- ip += 4;
- Counting1[(BYTE)c]++;
- Counting2[(BYTE)(c >> 8)]++;
- Counting3[(BYTE)(c >> 16)]++;
- Counting4[c >> 24]++;
- c = cached;
- cached = ZSTD_read32(ip);
- ip += 4;
- Counting1[(BYTE)c]++;
- Counting2[(BYTE)(c >> 8)]++;
- Counting3[(BYTE)(c >> 16)]++;
- Counting4[c >> 24]++;
- c = cached;
- cached = ZSTD_read32(ip);
- ip += 4;
- Counting1[(BYTE)c]++;
- Counting2[(BYTE)(c >> 8)]++;
- Counting3[(BYTE)(c >> 16)]++;
- Counting4[c >> 24]++;
- }
- ip -= 4;
- }
-
- /* finish last symbols */
- while (ip < iend)
- Counting1[*ip++]++;
-
- if (checkMax) { /* verify stats will fit into destination table */
- U32 s;
- for (s = 255; s > maxSymbolValue; s--) {
- Counting1[s] += Counting2[s] + Counting3[s] + Counting4[s];
- if (Counting1[s])
- return ERROR(maxSymbolValue_tooSmall);
- }
- }
-
- {
- U32 s;
- for (s = 0; s <= maxSymbolValue; s++) {
- count[s] = Counting1[s] + Counting2[s] + Counting3[s] + Counting4[s];
- if (count[s] > max)
- max = count[s];
- }
- }
-
- while (!count[maxSymbolValue])
- maxSymbolValue--;
- *maxSymbolValuePtr = maxSymbolValue;
- return (size_t)max;
-}
-
-/* FSE_countFast_wksp() :
- * Same as FSE_countFast(), but using an externally provided scratch buffer.
- * `workSpace` size must be table of >= `1024` unsigned */
-size_t FSE_countFast_wksp(unsigned *count, unsigned *maxSymbolValuePtr, const void *source, size_t sourceSize, unsigned *workSpace)
-{
- if (sourceSize < 1500)
- return FSE_count_simple(count, maxSymbolValuePtr, source, sourceSize);
- return FSE_count_parallel_wksp(count, maxSymbolValuePtr, source, sourceSize, 0, workSpace);
-}
-
-/* FSE_count_wksp() :
- * Same as FSE_count(), but using an externally provided scratch buffer.
- * `workSpace` size must be table of >= `1024` unsigned */
-size_t FSE_count_wksp(unsigned *count, unsigned *maxSymbolValuePtr, const void *source, size_t sourceSize, unsigned *workSpace)
-{
- if (*maxSymbolValuePtr < 255)
- return FSE_count_parallel_wksp(count, maxSymbolValuePtr, source, sourceSize, 1, workSpace);
- *maxSymbolValuePtr = 255;
- return FSE_countFast_wksp(count, maxSymbolValuePtr, source, sourceSize, workSpace);
-}
-
-/*-**************************************************************
-* FSE Compression Code
-****************************************************************/
-/*! FSE_sizeof_CTable() :
- FSE_CTable is a variable size structure which contains :
- `U16 tableLog;`
- `U16 maxSymbolValue;`
- `U16 nextStateNumber[1 << tableLog];` // This size is variable
- `FSE_symbolCompressionTransform symbolTT[maxSymbolValue+1];` // This size is variable
-Allocation is manual (C standard does not support variable-size structures).
-*/
-size_t FSE_sizeof_CTable(unsigned maxSymbolValue, unsigned tableLog)
-{
- if (tableLog > FSE_MAX_TABLELOG)
- return ERROR(tableLog_tooLarge);
- return FSE_CTABLE_SIZE_U32(tableLog, maxSymbolValue) * sizeof(U32);
-}
-
-/* provides the minimum logSize to safely represent a distribution */
-static unsigned FSE_minTableLog(size_t srcSize, unsigned maxSymbolValue)
-{
- U32 minBitsSrc = BIT_highbit32((U32)(srcSize - 1)) + 1;
- U32 minBitsSymbols = BIT_highbit32(maxSymbolValue) + 2;
- U32 minBits = minBitsSrc < minBitsSymbols ? minBitsSrc : minBitsSymbols;
- return minBits;
-}
-
-unsigned FSE_optimalTableLog_internal(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue, unsigned minus)
-{
- U32 maxBitsSrc = BIT_highbit32((U32)(srcSize - 1)) - minus;
- U32 tableLog = maxTableLog;
- U32 minBits = FSE_minTableLog(srcSize, maxSymbolValue);
- if (tableLog == 0)
- tableLog = FSE_DEFAULT_TABLELOG;
- if (maxBitsSrc < tableLog)
- tableLog = maxBitsSrc; /* Accuracy can be reduced */
- if (minBits > tableLog)
- tableLog = minBits; /* Need a minimum to safely represent all symbol values */
- if (tableLog < FSE_MIN_TABLELOG)
- tableLog = FSE_MIN_TABLELOG;
- if (tableLog > FSE_MAX_TABLELOG)
- tableLog = FSE_MAX_TABLELOG;
- return tableLog;
-}
-
-unsigned FSE_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue)
-{
- return FSE_optimalTableLog_internal(maxTableLog, srcSize, maxSymbolValue, 2);
-}
-
-/* Secondary normalization method.
- To be used when primary method fails. */
-
-static size_t FSE_normalizeM2(short *norm, U32 tableLog, const unsigned *count, size_t total, U32 maxSymbolValue)
-{
- short const NOT_YET_ASSIGNED = -2;
- U32 s;
- U32 distributed = 0;
- U32 ToDistribute;
-
- /* Init */
- U32 const lowThreshold = (U32)(total >> tableLog);
- U32 lowOne = (U32)((total * 3) >> (tableLog + 1));
-
- for (s = 0; s <= maxSymbolValue; s++) {
- if (count[s] == 0) {
- norm[s] = 0;
- continue;
- }
- if (count[s] <= lowThreshold) {
- norm[s] = -1;
- distributed++;
- total -= count[s];
- continue;
- }
- if (count[s] <= lowOne) {
- norm[s] = 1;
- distributed++;
- total -= count[s];
- continue;
- }
-
- norm[s] = NOT_YET_ASSIGNED;
- }
- ToDistribute = (1 << tableLog) - distributed;
-
- if ((total / ToDistribute) > lowOne) {
- /* risk of rounding to zero */
- lowOne = (U32)((total * 3) / (ToDistribute * 2));
- for (s = 0; s <= maxSymbolValue; s++) {
- if ((norm[s] == NOT_YET_ASSIGNED) && (count[s] <= lowOne)) {
- norm[s] = 1;
- distributed++;
- total -= count[s];
- continue;
- }
- }
- ToDistribute = (1 << tableLog) - distributed;
- }
-
- if (distributed == maxSymbolValue + 1) {
- /* all values are pretty poor;
- probably incompressible data (should have already been detected);
- find max, then give all remaining points to max */
- U32 maxV = 0, maxC = 0;
- for (s = 0; s <= maxSymbolValue; s++)
- if (count[s] > maxC)
- maxV = s, maxC = count[s];
- norm[maxV] += (short)ToDistribute;
- return 0;
- }
-
- if (total == 0) {
- /* all of the symbols were low enough for the lowOne or lowThreshold */
- for (s = 0; ToDistribute > 0; s = (s + 1) % (maxSymbolValue + 1))
- if (norm[s] > 0)
- ToDistribute--, norm[s]++;
- return 0;
- }
-
- {
- U64 const vStepLog = 62 - tableLog;
- U64 const mid = (1ULL << (vStepLog - 1)) - 1;
- U64 const rStep = div_u64((((U64)1 << vStepLog) * ToDistribute) + mid, (U32)total); /* scale on remaining */
- U64 tmpTotal = mid;
- for (s = 0; s <= maxSymbolValue; s++) {
- if (norm[s] == NOT_YET_ASSIGNED) {
- U64 const end = tmpTotal + (count[s] * rStep);
- U32 const sStart = (U32)(tmpTotal >> vStepLog);
- U32 const sEnd = (U32)(end >> vStepLog);
- U32 const weight = sEnd - sStart;
- if (weight < 1)
- return ERROR(GENERIC);
- norm[s] = (short)weight;
- tmpTotal = end;
- }
- }
- }
-
- return 0;
-}
-
-size_t FSE_normalizeCount(short *normalizedCounter, unsigned tableLog, const unsigned *count, size_t total, unsigned maxSymbolValue)
-{
- /* Sanity checks */
- if (tableLog == 0)
- tableLog = FSE_DEFAULT_TABLELOG;
- if (tableLog < FSE_MIN_TABLELOG)
- return ERROR(GENERIC); /* Unsupported size */
- if (tableLog > FSE_MAX_TABLELOG)
- return ERROR(tableLog_tooLarge); /* Unsupported size */
- if (tableLog < FSE_minTableLog(total, maxSymbolValue))
- return ERROR(GENERIC); /* Too small tableLog, compression potentially impossible */
-
- {
- U32 const rtbTable[] = {0, 473195, 504333, 520860, 550000, 700000, 750000, 830000};
- U64 const scale = 62 - tableLog;
- U64 const step = div_u64((U64)1 << 62, (U32)total); /* <== here, one division ! */
- U64 const vStep = 1ULL << (scale - 20);
- int stillToDistribute = 1 << tableLog;
- unsigned s;
- unsigned largest = 0;
- short largestP = 0;
- U32 lowThreshold = (U32)(total >> tableLog);
-
- for (s = 0; s <= maxSymbolValue; s++) {
- if (count[s] == total)
- return 0; /* rle special case */
- if (count[s] == 0) {
- normalizedCounter[s] = 0;
- continue;
- }
- if (count[s] <= lowThreshold) {
- normalizedCounter[s] = -1;
- stillToDistribute--;
- } else {
- short proba = (short)((count[s] * step) >> scale);
- if (proba < 8) {
- U64 restToBeat = vStep * rtbTable[proba];
- proba += (count[s] * step) - ((U64)proba << scale) > restToBeat;
- }
- if (proba > largestP)
- largestP = proba, largest = s;
- normalizedCounter[s] = proba;
- stillToDistribute -= proba;
- }
- }
- if (-stillToDistribute >= (normalizedCounter[largest] >> 1)) {
- /* corner case, need another normalization method */
- size_t const errorCode = FSE_normalizeM2(normalizedCounter, tableLog, count, total, maxSymbolValue);
- if (FSE_isError(errorCode))
- return errorCode;
- } else
- normalizedCounter[largest] += (short)stillToDistribute;
- }
-
- return tableLog;
-}
-
-/* fake FSE_CTable, for raw (uncompressed) input */
-size_t FSE_buildCTable_raw(FSE_CTable *ct, unsigned nbBits)
-{
- const unsigned tableSize = 1 << nbBits;
- const unsigned tableMask = tableSize - 1;
- const unsigned maxSymbolValue = tableMask;
- void *const ptr = ct;
- U16 *const tableU16 = ((U16 *)ptr) + 2;
- void *const FSCT = ((U32 *)ptr) + 1 /* header */ + (tableSize >> 1); /* assumption : tableLog >= 1 */
- FSE_symbolCompressionTransform *const symbolTT = (FSE_symbolCompressionTransform *)(FSCT);
- unsigned s;
-
- /* Sanity checks */
- if (nbBits < 1)
- return ERROR(GENERIC); /* min size */
-
- /* header */
- tableU16[-2] = (U16)nbBits;
- tableU16[-1] = (U16)maxSymbolValue;
-
- /* Build table */
- for (s = 0; s < tableSize; s++)
- tableU16[s] = (U16)(tableSize + s);
-
- /* Build Symbol Transformation Table */
- {
- const U32 deltaNbBits = (nbBits << 16) - (1 << nbBits);
- for (s = 0; s <= maxSymbolValue; s++) {
- symbolTT[s].deltaNbBits = deltaNbBits;
- symbolTT[s].deltaFindState = s - 1;
- }
- }
-
- return 0;
-}
-
-/* fake FSE_CTable, for rle input (always same symbol) */
-size_t FSE_buildCTable_rle(FSE_CTable *ct, BYTE symbolValue)
-{
- void *ptr = ct;
- U16 *tableU16 = ((U16 *)ptr) + 2;
- void *FSCTptr = (U32 *)ptr + 2;
- FSE_symbolCompressionTransform *symbolTT = (FSE_symbolCompressionTransform *)FSCTptr;
-
- /* header */
- tableU16[-2] = (U16)0;
- tableU16[-1] = (U16)symbolValue;
-
- /* Build table */
- tableU16[0] = 0;
- tableU16[1] = 0; /* just in case */
-
- /* Build Symbol Transformation Table */
- symbolTT[symbolValue].deltaNbBits = 0;
- symbolTT[symbolValue].deltaFindState = 0;
-
- return 0;
-}
-
-static size_t FSE_compress_usingCTable_generic(void *dst, size_t dstSize, const void *src, size_t srcSize, const FSE_CTable *ct, const unsigned fast)
-{
- const BYTE *const istart = (const BYTE *)src;
- const BYTE *const iend = istart + srcSize;
- const BYTE *ip = iend;
-
- BIT_CStream_t bitC;
- FSE_CState_t CState1, CState2;
-
- /* init */
- if (srcSize <= 2)
- return 0;
- {
- size_t const initError = BIT_initCStream(&bitC, dst, dstSize);
- if (FSE_isError(initError))
- return 0; /* not enough space available to write a bitstream */
- }
-
-#define FSE_FLUSHBITS(s) (fast ? BIT_flushBitsFast(s) : BIT_flushBits(s))
-
- if (srcSize & 1) {
- FSE_initCState2(&CState1, ct, *--ip);
- FSE_initCState2(&CState2, ct, *--ip);
- FSE_encodeSymbol(&bitC, &CState1, *--ip);
- FSE_FLUSHBITS(&bitC);
- } else {
- FSE_initCState2(&CState2, ct, *--ip);
- FSE_initCState2(&CState1, ct, *--ip);
- }
-
- /* join to mod 4 */
- srcSize -= 2;
- if ((sizeof(bitC.bitContainer) * 8 > FSE_MAX_TABLELOG * 4 + 7) && (srcSize & 2)) { /* test bit 2 */
- FSE_encodeSymbol(&bitC, &CState2, *--ip);
- FSE_encodeSymbol(&bitC, &CState1, *--ip);
- FSE_FLUSHBITS(&bitC);
- }
-
- /* 2 or 4 encoding per loop */
- while (ip > istart) {
-
- FSE_encodeSymbol(&bitC, &CState2, *--ip);
-
- if (sizeof(bitC.bitContainer) * 8 < FSE_MAX_TABLELOG * 2 + 7) /* this test must be static */
- FSE_FLUSHBITS(&bitC);
-
- FSE_encodeSymbol(&bitC, &CState1, *--ip);
-
- if (sizeof(bitC.bitContainer) * 8 > FSE_MAX_TABLELOG * 4 + 7) { /* this test must be static */
- FSE_encodeSymbol(&bitC, &CState2, *--ip);
- FSE_encodeSymbol(&bitC, &CState1, *--ip);
- }
-
- FSE_FLUSHBITS(&bitC);
- }
-
- FSE_flushCState(&bitC, &CState2);
- FSE_flushCState(&bitC, &CState1);
- return BIT_closeCStream(&bitC);
-}
-
-size_t FSE_compress_usingCTable(void *dst, size_t dstSize, const void *src, size_t srcSize, const FSE_CTable *ct)
-{
- unsigned const fast = (dstSize >= FSE_BLOCKBOUND(srcSize));
-
- if (fast)
- return FSE_compress_usingCTable_generic(dst, dstSize, src, srcSize, ct, 1);
- else
- return FSE_compress_usingCTable_generic(dst, dstSize, src, srcSize, ct, 0);
-}
-
-size_t FSE_compressBound(size_t size) { return FSE_COMPRESSBOUND(size); }
diff --git a/lib/zstd/fse_decompress.c b/lib/zstd/fse_decompress.c
deleted file mode 100644
index a84300e5a013..000000000000
--- a/lib/zstd/fse_decompress.c
+++ /dev/null
@@ -1,332 +0,0 @@
-/*
- * FSE : Finite State Entropy decoder
- * Copyright (C) 2013-2015, Yann Collet.
- *
- * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following disclaimer
- * in the documentation and/or other materials provided with the
- * distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * This program is free software; you can redistribute it and/or modify it under
- * the terms of the GNU General Public License version 2 as published by the
- * Free Software Foundation. This program is dual-licensed; you may select
- * either version 2 of the GNU General Public License ("GPL") or BSD license
- * ("BSD").
- *
- * You can contact the author at :
- * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
- */
-
-/* **************************************************************
-* Compiler specifics
-****************************************************************/
-#define FORCE_INLINE static __always_inline
-
-/* **************************************************************
-* Includes
-****************************************************************/
-#include "bitstream.h"
-#include "fse.h"
-#include <linux/compiler.h>
-#include <linux/kernel.h>
-#include <linux/string.h> /* memcpy, memset */
-
-/* **************************************************************
-* Error Management
-****************************************************************/
-#define FSE_isError ERR_isError
-#define FSE_STATIC_ASSERT(c) \
- { \
- enum { FSE_static_assert = 1 / (int)(!!(c)) }; \
- } /* use only *after* variable declarations */
-
-/* check and forward error code */
-#define CHECK_F(f) \
- { \
- size_t const e = f; \
- if (FSE_isError(e)) \
- return e; \
- }
-
-/* **************************************************************
-* Templates
-****************************************************************/
-/*
- designed to be included
- for type-specific functions (template emulation in C)
- Objective is to write these functions only once, for improved maintenance
-*/
-
-/* safety checks */
-#ifndef FSE_FUNCTION_EXTENSION
-#error "FSE_FUNCTION_EXTENSION must be defined"
-#endif
-#ifndef FSE_FUNCTION_TYPE
-#error "FSE_FUNCTION_TYPE must be defined"
-#endif
-
-/* Function names */
-#define FSE_CAT(X, Y) X##Y
-#define FSE_FUNCTION_NAME(X, Y) FSE_CAT(X, Y)
-#define FSE_TYPE_NAME(X, Y) FSE_CAT(X, Y)
-
-/* Function templates */
-
-size_t FSE_buildDTable_wksp(FSE_DTable *dt, const short *normalizedCounter, unsigned maxSymbolValue, unsigned tableLog, void *workspace, size_t workspaceSize)
-{
- void *const tdPtr = dt + 1; /* because *dt is unsigned, 32-bits aligned on 32-bits */
- FSE_DECODE_TYPE *const tableDecode = (FSE_DECODE_TYPE *)(tdPtr);
- U16 *symbolNext = (U16 *)workspace;
-
- U32 const maxSV1 = maxSymbolValue + 1;
- U32 const tableSize = 1 << tableLog;
- U32 highThreshold = tableSize - 1;
-
- /* Sanity Checks */
- if (workspaceSize < sizeof(U16) * (FSE_MAX_SYMBOL_VALUE + 1))
- return ERROR(tableLog_tooLarge);
- if (maxSymbolValue > FSE_MAX_SYMBOL_VALUE)
- return ERROR(maxSymbolValue_tooLarge);
- if (tableLog > FSE_MAX_TABLELOG)
- return ERROR(tableLog_tooLarge);
-
- /* Init, lay down lowprob symbols */
- {
- FSE_DTableHeader DTableH;
- DTableH.tableLog = (U16)tableLog;
- DTableH.fastMode = 1;
- {
- S16 const largeLimit = (S16)(1 << (tableLog - 1));
- U32 s;
- for (s = 0; s < maxSV1; s++) {
- if (normalizedCounter[s] == -1) {
- tableDecode[highThreshold--].symbol = (FSE_FUNCTION_TYPE)s;
- symbolNext[s] = 1;
- } else {
- if (normalizedCounter[s] >= largeLimit)
- DTableH.fastMode = 0;
- symbolNext[s] = normalizedCounter[s];
- }
- }
- }
- memcpy(dt, &DTableH, sizeof(DTableH));
- }
-
- /* Spread symbols */
- {
- U32 const tableMask = tableSize - 1;
- U32 const step = FSE_TABLESTEP(tableSize);
- U32 s, position = 0;
- for (s = 0; s < maxSV1; s++) {
- int i;
- for (i = 0; i < normalizedCounter[s]; i++) {
- tableDecode[position].symbol = (FSE_FUNCTION_TYPE)s;
- position = (position + step) & tableMask;
- while (position > highThreshold)
- position = (position + step) & tableMask; /* lowprob area */
- }
- }
- if (position != 0)
- return ERROR(GENERIC); /* position must reach all cells once, otherwise normalizedCounter is incorrect */
- }
-
- /* Build Decoding table */
- {
- U32 u;
- for (u = 0; u < tableSize; u++) {
- FSE_FUNCTION_TYPE const symbol = (FSE_FUNCTION_TYPE)(tableDecode[u].symbol);
- U16 nextState = symbolNext[symbol]++;
- tableDecode[u].nbBits = (BYTE)(tableLog - BIT_highbit32((U32)nextState));
- tableDecode[u].newState = (U16)((nextState << tableDecode[u].nbBits) - tableSize);
- }
- }
-
- return 0;
-}
-
-/*-*******************************************************
-* Decompression (Byte symbols)
-*********************************************************/
-size_t FSE_buildDTable_rle(FSE_DTable *dt, BYTE symbolValue)
-{
- void *ptr = dt;
- FSE_DTableHeader *const DTableH = (FSE_DTableHeader *)ptr;
- void *dPtr = dt + 1;
- FSE_decode_t *const cell = (FSE_decode_t *)dPtr;
-
- DTableH->tableLog = 0;
- DTableH->fastMode = 0;
-
- cell->newState = 0;
- cell->symbol = symbolValue;
- cell->nbBits = 0;
-
- return 0;
-}
-
-size_t FSE_buildDTable_raw(FSE_DTable *dt, unsigned nbBits)
-{
- void *ptr = dt;
- FSE_DTableHeader *const DTableH = (FSE_DTableHeader *)ptr;
- void *dPtr = dt + 1;
- FSE_decode_t *const dinfo = (FSE_decode_t *)dPtr;
- const unsigned tableSize = 1 << nbBits;
- const unsigned tableMask = tableSize - 1;
- const unsigned maxSV1 = tableMask + 1;
- unsigned s;
-
- /* Sanity checks */
- if (nbBits < 1)
- return ERROR(GENERIC); /* min size */
-
- /* Build Decoding Table */
- DTableH->tableLog = (U16)nbBits;
- DTableH->fastMode = 1;
- for (s = 0; s < maxSV1; s++) {
- dinfo[s].newState = 0;
- dinfo[s].symbol = (BYTE)s;
- dinfo[s].nbBits = (BYTE)nbBits;
- }
-
- return 0;
-}
-
-FORCE_INLINE size_t FSE_decompress_usingDTable_generic(void *dst, size_t maxDstSize, const void *cSrc, size_t cSrcSize, const FSE_DTable *dt,
- const unsigned fast)
-{
- BYTE *const ostart = (BYTE *)dst;
- BYTE *op = ostart;
- BYTE *const omax = op + maxDstSize;
- BYTE *const olimit = omax - 3;
-
- BIT_DStream_t bitD;
- FSE_DState_t state1;
- FSE_DState_t state2;
-
- /* Init */
- CHECK_F(BIT_initDStream(&bitD, cSrc, cSrcSize));
-
- FSE_initDState(&state1, &bitD, dt);
- FSE_initDState(&state2, &bitD, dt);
-
-#define FSE_GETSYMBOL(statePtr) fast ? FSE_decodeSymbolFast(statePtr, &bitD) : FSE_decodeSymbol(statePtr, &bitD)
-
- /* 4 symbols per loop */
- for (; (BIT_reloadDStream(&bitD) == BIT_DStream_unfinished) & (op < olimit); op += 4) {
- op[0] = FSE_GETSYMBOL(&state1);
-
- if (FSE_MAX_TABLELOG * 2 + 7 > sizeof(bitD.bitContainer) * 8) /* This test must be static */
- BIT_reloadDStream(&bitD);
-
- op[1] = FSE_GETSYMBOL(&state2);
-
- if (FSE_MAX_TABLELOG * 4 + 7 > sizeof(bitD.bitContainer) * 8) /* This test must be static */
- {
- if (BIT_reloadDStream(&bitD) > BIT_DStream_unfinished) {
- op += 2;
- break;
- }
- }
-
- op[2] = FSE_GETSYMBOL(&state1);
-
- if (FSE_MAX_TABLELOG * 2 + 7 > sizeof(bitD.bitContainer) * 8) /* This test must be static */
- BIT_reloadDStream(&bitD);
-
- op[3] = FSE_GETSYMBOL(&state2);
- }
-
- /* tail */
- /* note : BIT_reloadDStream(&bitD) >= FSE_DStream_partiallyFilled; Ends at exactly BIT_DStream_completed */
- while (1) {
- if (op > (omax - 2))
- return ERROR(dstSize_tooSmall);
- *op++ = FSE_GETSYMBOL(&state1);
- if (BIT_reloadDStream(&bitD) == BIT_DStream_overflow) {
- *op++ = FSE_GETSYMBOL(&state2);
- break;
- }
-
- if (op > (omax - 2))
- return ERROR(dstSize_tooSmall);
- *op++ = FSE_GETSYMBOL(&state2);
- if (BIT_reloadDStream(&bitD) == BIT_DStream_overflow) {
- *op++ = FSE_GETSYMBOL(&state1);
- break;
- }
- }
-
- return op - ostart;
-}
-
-size_t FSE_decompress_usingDTable(void *dst, size_t originalSize, const void *cSrc, size_t cSrcSize, const FSE_DTable *dt)
-{
- const void *ptr = dt;
- const FSE_DTableHeader *DTableH = (const FSE_DTableHeader *)ptr;
- const U32 fastMode = DTableH->fastMode;
-
- /* select fast mode (static) */
- if (fastMode)
- return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 1);
- return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 0);
-}
-
-size_t FSE_decompress_wksp(void *dst, size_t dstCapacity, const void *cSrc, size_t cSrcSize, unsigned maxLog, void *workspace, size_t workspaceSize)
-{
- const BYTE *const istart = (const BYTE *)cSrc;
- const BYTE *ip = istart;
- unsigned tableLog;
- unsigned maxSymbolValue = FSE_MAX_SYMBOL_VALUE;
- size_t NCountLength;
-
- FSE_DTable *dt;
- short *counting;
- size_t spaceUsed32 = 0;
-
- FSE_STATIC_ASSERT(sizeof(FSE_DTable) == sizeof(U32));
-
- dt = (FSE_DTable *)((U32 *)workspace + spaceUsed32);
- spaceUsed32 += FSE_DTABLE_SIZE_U32(maxLog);
- counting = (short *)((U32 *)workspace + spaceUsed32);
- spaceUsed32 += ALIGN(sizeof(short) * (FSE_MAX_SYMBOL_VALUE + 1), sizeof(U32)) >> 2;
-
- if ((spaceUsed32 << 2) > workspaceSize)
- return ERROR(tableLog_tooLarge);
- workspace = (U32 *)workspace + spaceUsed32;
- workspaceSize -= (spaceUsed32 << 2);
-
- /* normal FSE decoding mode */
- NCountLength = FSE_readNCount(counting, &maxSymbolValue, &tableLog, istart, cSrcSize);
- if (FSE_isError(NCountLength))
- return NCountLength;
- // if (NCountLength >= cSrcSize) return ERROR(srcSize_wrong); /* too small input size; supposed to be already checked in NCountLength, only remaining
- // case : NCountLength==cSrcSize */
- if (tableLog > maxLog)
- return ERROR(tableLog_tooLarge);
- ip += NCountLength;
- cSrcSize -= NCountLength;
-
- CHECK_F(FSE_buildDTable_wksp(dt, counting, maxSymbolValue, tableLog, workspace, workspaceSize));
-
- return FSE_decompress_usingDTable(dst, dstCapacity, ip, cSrcSize, dt); /* always return, even if it is an error code */
-}
diff --git a/lib/zstd/huf.h b/lib/zstd/huf.h
deleted file mode 100644
index 2143da28d952..000000000000
--- a/lib/zstd/huf.h
+++ /dev/null
@@ -1,212 +0,0 @@
-/*
- * Huffman coder, part of New Generation Entropy library
- * header file
- * Copyright (C) 2013-2016, Yann Collet.
- *
- * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following disclaimer
- * in the documentation and/or other materials provided with the
- * distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * This program is free software; you can redistribute it and/or modify it under
- * the terms of the GNU General Public License version 2 as published by the
- * Free Software Foundation. This program is dual-licensed; you may select
- * either version 2 of the GNU General Public License ("GPL") or BSD license
- * ("BSD").
- *
- * You can contact the author at :
- * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
- */
-#ifndef HUF_H_298734234
-#define HUF_H_298734234
-
-/* *** Dependencies *** */
-#include <linux/types.h> /* size_t */
-
-/* *** Tool functions *** */
-#define HUF_BLOCKSIZE_MAX (128 * 1024) /**< maximum input size for a single block compressed with HUF_compress */
-size_t HUF_compressBound(size_t size); /**< maximum compressed size (worst case) */
-
-/* Error Management */
-unsigned HUF_isError(size_t code); /**< tells if a return value is an error code */
-
-/* *** Advanced function *** */
-
-/** HUF_compress4X_wksp() :
-* Same as HUF_compress2(), but uses externally allocated `workSpace`, which must be a table of >= 1024 unsigned */
-size_t HUF_compress4X_wksp(void *dst, size_t dstSize, const void *src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void *workSpace,
- size_t wkspSize); /**< `workSpace` must be a table of at least HUF_COMPRESS_WORKSPACE_SIZE_U32 unsigned */
-
-/* *** Dependencies *** */
-#include "mem.h" /* U32 */
-
-/* *** Constants *** */
-#define HUF_TABLELOG_MAX 12 /* max configured tableLog (for static allocation); can be modified up to HUF_ABSOLUTEMAX_TABLELOG */
-#define HUF_TABLELOG_DEFAULT 11 /* tableLog by default, when not specified */
-#define HUF_SYMBOLVALUE_MAX 255
-
-#define HUF_TABLELOG_ABSOLUTEMAX 15 /* absolute limit of HUF_MAX_TABLELOG. Beyond that value, code does not work */
-#if (HUF_TABLELOG_MAX > HUF_TABLELOG_ABSOLUTEMAX)
-#error "HUF_TABLELOG_MAX is too large !"
-#endif
-
-/* ****************************************
-* Static allocation
-******************************************/
-/* HUF buffer bounds */
-#define HUF_CTABLEBOUND 129
-#define HUF_BLOCKBOUND(size) (size + (size >> 8) + 8) /* only true if incompressible pre-filtered with fast heuristic */
-#define HUF_COMPRESSBOUND(size) (HUF_CTABLEBOUND + HUF_BLOCKBOUND(size)) /* Macro version, useful for static allocation */
-
-/* static allocation of HUF's Compression Table */
-#define HUF_CREATE_STATIC_CTABLE(name, maxSymbolValue) \
- U32 name##hb[maxSymbolValue + 1]; \
- void *name##hv = &(name##hb); \
- HUF_CElt *name = (HUF_CElt *)(name##hv) /* no final ; */
-
-/* static allocation of HUF's DTable */
-typedef U32 HUF_DTable;
-#define HUF_DTABLE_SIZE(maxTableLog) (1 + (1 << (maxTableLog)))
-#define HUF_CREATE_STATIC_DTABLEX2(DTable, maxTableLog) HUF_DTable DTable[HUF_DTABLE_SIZE((maxTableLog)-1)] = {((U32)((maxTableLog)-1) * 0x01000001)}
-#define HUF_CREATE_STATIC_DTABLEX4(DTable, maxTableLog) HUF_DTable DTable[HUF_DTABLE_SIZE(maxTableLog)] = {((U32)(maxTableLog)*0x01000001)}
-
-/* The workspace must have alignment at least 4 and be at least this large */
-#define HUF_COMPRESS_WORKSPACE_SIZE (6 << 10)
-#define HUF_COMPRESS_WORKSPACE_SIZE_U32 (HUF_COMPRESS_WORKSPACE_SIZE / sizeof(U32))
-
-/* The workspace must have alignment at least 4 and be at least this large */
-#define HUF_DECOMPRESS_WORKSPACE_SIZE (3 << 10)
-#define HUF_DECOMPRESS_WORKSPACE_SIZE_U32 (HUF_DECOMPRESS_WORKSPACE_SIZE / sizeof(U32))
-
-/* ****************************************
-* Advanced decompression functions
-******************************************/
-size_t HUF_decompress4X_DCtx_wksp(HUF_DTable *dctx, void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, void *workspace, size_t workspaceSize); /**< decodes RLE and uncompressed */
-size_t HUF_decompress4X_hufOnly_wksp(HUF_DTable *dctx, void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, void *workspace,
- size_t workspaceSize); /**< considers RLE and uncompressed as errors */
-size_t HUF_decompress4X2_DCtx_wksp(HUF_DTable *dctx, void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, void *workspace,
- size_t workspaceSize); /**< single-symbol decoder */
-size_t HUF_decompress4X4_DCtx_wksp(HUF_DTable *dctx, void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, void *workspace,
- size_t workspaceSize); /**< double-symbols decoder */
-
-/* ****************************************
-* HUF detailed API
-******************************************/
-/*!
-HUF_compress() does the following:
-1. count symbol occurrence from source[] into table count[] using FSE_count()
-2. (optional) refine tableLog using HUF_optimalTableLog()
-3. build Huffman table from count using HUF_buildCTable()
-4. save Huffman table to memory buffer using HUF_writeCTable_wksp()
-5. encode the data stream using HUF_compress4X_usingCTable()
-
-The following API allows targeting specific sub-functions for advanced tasks.
-For example, it's possible to compress several blocks using the same 'CTable',
-or to save and regenerate 'CTable' using external methods.
-*/
-/* FSE_count() : find it within "fse.h" */
-unsigned HUF_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue);
-typedef struct HUF_CElt_s HUF_CElt; /* incomplete type */
-size_t HUF_writeCTable_wksp(void *dst, size_t maxDstSize, const HUF_CElt *CTable, unsigned maxSymbolValue, unsigned huffLog, void *workspace, size_t workspaceSize);
-size_t HUF_compress4X_usingCTable(void *dst, size_t dstSize, const void *src, size_t srcSize, const HUF_CElt *CTable);
-
-typedef enum {
- HUF_repeat_none, /**< Cannot use the previous table */
- HUF_repeat_check, /**< Can use the previous table but it must be checked. Note : The previous table must have been constructed by HUF_compress{1,
- 4}X_repeat */
- HUF_repeat_valid /**< Can use the previous table and it is asumed to be valid */
-} HUF_repeat;
-/** HUF_compress4X_repeat() :
-* Same as HUF_compress4X_wksp(), but considers using hufTable if *repeat != HUF_repeat_none.
-* If it uses hufTable it does not modify hufTable or repeat.
-* If it doesn't, it sets *repeat = HUF_repeat_none, and it sets hufTable to the table used.
-* If preferRepeat then the old table will always be used if valid. */
-size_t HUF_compress4X_repeat(void *dst, size_t dstSize, const void *src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void *workSpace,
- size_t wkspSize, HUF_CElt *hufTable, HUF_repeat *repeat,
- int preferRepeat); /**< `workSpace` must be a table of at least HUF_COMPRESS_WORKSPACE_SIZE_U32 unsigned */
-
-/** HUF_buildCTable_wksp() :
- * Same as HUF_buildCTable(), but using externally allocated scratch buffer.
- * `workSpace` must be aligned on 4-bytes boundaries, and be at least as large as a table of 1024 unsigned.
- */
-size_t HUF_buildCTable_wksp(HUF_CElt *tree, const U32 *count, U32 maxSymbolValue, U32 maxNbBits, void *workSpace, size_t wkspSize);
-
-/*! HUF_readStats() :
- Read compact Huffman tree, saved by HUF_writeCTable().
- `huffWeight` is destination buffer.
- @return : size read from `src` , or an error Code .
- Note : Needed by HUF_readCTable() and HUF_readDTableXn() . */
-size_t HUF_readStats_wksp(BYTE *huffWeight, size_t hwSize, U32 *rankStats, U32 *nbSymbolsPtr, U32 *tableLogPtr, const void *src, size_t srcSize,
- void *workspace, size_t workspaceSize);
-
-/** HUF_readCTable() :
-* Loading a CTable saved with HUF_writeCTable() */
-size_t HUF_readCTable_wksp(HUF_CElt *CTable, unsigned maxSymbolValue, const void *src, size_t srcSize, void *workspace, size_t workspaceSize);
-
-/*
-HUF_decompress() does the following:
-1. select the decompression algorithm (X2, X4) based on pre-computed heuristics
-2. build Huffman table from save, using HUF_readDTableXn()
-3. decode 1 or 4 segments in parallel using HUF_decompressSXn_usingDTable
-*/
-
-/** HUF_selectDecoder() :
-* Tells which decoder is likely to decode faster,
-* based on a set of pre-determined metrics.
-* @return : 0==HUF_decompress4X2, 1==HUF_decompress4X4 .
-* Assumption : 0 < cSrcSize < dstSize <= 128 KB */
-U32 HUF_selectDecoder(size_t dstSize, size_t cSrcSize);
-
-size_t HUF_readDTableX2_wksp(HUF_DTable *DTable, const void *src, size_t srcSize, void *workspace, size_t workspaceSize);
-size_t HUF_readDTableX4_wksp(HUF_DTable *DTable, const void *src, size_t srcSize, void *workspace, size_t workspaceSize);
-
-size_t HUF_decompress4X_usingDTable(void *dst, size_t maxDstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable);
-size_t HUF_decompress4X2_usingDTable(void *dst, size_t maxDstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable);
-size_t HUF_decompress4X4_usingDTable(void *dst, size_t maxDstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable);
-
-/* single stream variants */
-
-size_t HUF_compress1X_wksp(void *dst, size_t dstSize, const void *src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void *workSpace,
- size_t wkspSize); /**< `workSpace` must be a table of at least HUF_COMPRESS_WORKSPACE_SIZE_U32 unsigned */
-size_t HUF_compress1X_usingCTable(void *dst, size_t dstSize, const void *src, size_t srcSize, const HUF_CElt *CTable);
-/** HUF_compress1X_repeat() :
-* Same as HUF_compress1X_wksp(), but considers using hufTable if *repeat != HUF_repeat_none.
-* If it uses hufTable it does not modify hufTable or repeat.
-* If it doesn't, it sets *repeat = HUF_repeat_none, and it sets hufTable to the table used.
-* If preferRepeat then the old table will always be used if valid. */
-size_t HUF_compress1X_repeat(void *dst, size_t dstSize, const void *src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void *workSpace,
- size_t wkspSize, HUF_CElt *hufTable, HUF_repeat *repeat,
- int preferRepeat); /**< `workSpace` must be a table of at least HUF_COMPRESS_WORKSPACE_SIZE_U32 unsigned */
-
-size_t HUF_decompress1X_DCtx_wksp(HUF_DTable *dctx, void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, void *workspace, size_t workspaceSize);
-size_t HUF_decompress1X2_DCtx_wksp(HUF_DTable *dctx, void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, void *workspace,
- size_t workspaceSize); /**< single-symbol decoder */
-size_t HUF_decompress1X4_DCtx_wksp(HUF_DTable *dctx, void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, void *workspace,
- size_t workspaceSize); /**< double-symbols decoder */
-
-size_t HUF_decompress1X_usingDTable(void *dst, size_t maxDstSize, const void *cSrc, size_t cSrcSize,
- const HUF_DTable *DTable); /**< automatic selection of sing or double symbol decoder, based on DTable */
-size_t HUF_decompress1X2_usingDTable(void *dst, size_t maxDstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable);
-size_t HUF_decompress1X4_usingDTable(void *dst, size_t maxDstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable);
-
-#endif /* HUF_H_298734234 */
diff --git a/lib/zstd/huf_compress.c b/lib/zstd/huf_compress.c
deleted file mode 100644
index e727812d12aa..000000000000
--- a/lib/zstd/huf_compress.c
+++ /dev/null
@@ -1,772 +0,0 @@
-/*
- * Huffman encoder, part of New Generation Entropy library
- * Copyright (C) 2013-2016, Yann Collet.
- *
- * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following disclaimer
- * in the documentation and/or other materials provided with the
- * distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * This program is free software; you can redistribute it and/or modify it under
- * the terms of the GNU General Public License version 2 as published by the
- * Free Software Foundation. This program is dual-licensed; you may select
- * either version 2 of the GNU General Public License ("GPL") or BSD license
- * ("BSD").
- *
- * You can contact the author at :
- * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
- */
-
-/* **************************************************************
-* Includes
-****************************************************************/
-#include "bitstream.h"
-#include "fse.h" /* header compression */
-#include "huf.h"
-#include <linux/kernel.h>
-#include <linux/string.h> /* memcpy, memset */
-
-/* **************************************************************
-* Error Management
-****************************************************************/
-#define HUF_STATIC_ASSERT(c) \
- { \
- enum { HUF_static_assert = 1 / (int)(!!(c)) }; \
- } /* use only *after* variable declarations */
-#define CHECK_V_F(e, f) \
- size_t const e = f; \
- if (ERR_isError(e)) \
- return f
-#define CHECK_F(f) \
- { \
- CHECK_V_F(_var_err__, f); \
- }
-
-/* **************************************************************
-* Utils
-****************************************************************/
-unsigned HUF_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue)
-{
- return FSE_optimalTableLog_internal(maxTableLog, srcSize, maxSymbolValue, 1);
-}
-
-/* *******************************************************
-* HUF : Huffman block compression
-*********************************************************/
-/* HUF_compressWeights() :
- * Same as FSE_compress(), but dedicated to huff0's weights compression.
- * The use case needs much less stack memory.
- * Note : all elements within weightTable are supposed to be <= HUF_TABLELOG_MAX.
- */
-#define MAX_FSE_TABLELOG_FOR_HUFF_HEADER 6
-size_t HUF_compressWeights_wksp(void *dst, size_t dstSize, const void *weightTable, size_t wtSize, void *workspace, size_t workspaceSize)
-{
- BYTE *const ostart = (BYTE *)dst;
- BYTE *op = ostart;
- BYTE *const oend = ostart + dstSize;
-
- U32 maxSymbolValue = HUF_TABLELOG_MAX;
- U32 tableLog = MAX_FSE_TABLELOG_FOR_HUFF_HEADER;
-
- FSE_CTable *CTable;
- U32 *count;
- S16 *norm;
- size_t spaceUsed32 = 0;
-
- HUF_STATIC_ASSERT(sizeof(FSE_CTable) == sizeof(U32));
-
- CTable = (FSE_CTable *)((U32 *)workspace + spaceUsed32);
- spaceUsed32 += FSE_CTABLE_SIZE_U32(MAX_FSE_TABLELOG_FOR_HUFF_HEADER, HUF_TABLELOG_MAX);
- count = (U32 *)workspace + spaceUsed32;
- spaceUsed32 += HUF_TABLELOG_MAX + 1;
- norm = (S16 *)((U32 *)workspace + spaceUsed32);
- spaceUsed32 += ALIGN(sizeof(S16) * (HUF_TABLELOG_MAX + 1), sizeof(U32)) >> 2;
-
- if ((spaceUsed32 << 2) > workspaceSize)
- return ERROR(tableLog_tooLarge);
- workspace = (U32 *)workspace + spaceUsed32;
- workspaceSize -= (spaceUsed32 << 2);
-
- /* init conditions */
- if (wtSize <= 1)
- return 0; /* Not compressible */
-
- /* Scan input and build symbol stats */
- {
- CHECK_V_F(maxCount, FSE_count_simple(count, &maxSymbolValue, weightTable, wtSize));
- if (maxCount == wtSize)
- return 1; /* only a single symbol in src : rle */
- if (maxCount == 1)
- return 0; /* each symbol present maximum once => not compressible */
- }
-
- tableLog = FSE_optimalTableLog(tableLog, wtSize, maxSymbolValue);
- CHECK_F(FSE_normalizeCount(norm, tableLog, count, wtSize, maxSymbolValue));
-
- /* Write table description header */
- {
- CHECK_V_F(hSize, FSE_writeNCount(op, oend - op, norm, maxSymbolValue, tableLog));
- op += hSize;
- }
-
- /* Compress */
- CHECK_F(FSE_buildCTable_wksp(CTable, norm, maxSymbolValue, tableLog, workspace, workspaceSize));
- {
- CHECK_V_F(cSize, FSE_compress_usingCTable(op, oend - op, weightTable, wtSize, CTable));
- if (cSize == 0)
- return 0; /* not enough space for compressed data */
- op += cSize;
- }
-
- return op - ostart;
-}
-
-struct HUF_CElt_s {
- U16 val;
- BYTE nbBits;
-}; /* typedef'd to HUF_CElt within "huf.h" */
-
-/*! HUF_writeCTable_wksp() :
- `CTable` : Huffman tree to save, using huf representation.
- @return : size of saved CTable */
-size_t HUF_writeCTable_wksp(void *dst, size_t maxDstSize, const HUF_CElt *CTable, U32 maxSymbolValue, U32 huffLog, void *workspace, size_t workspaceSize)
-{
- BYTE *op = (BYTE *)dst;
- U32 n;
-
- BYTE *bitsToWeight;
- BYTE *huffWeight;
- size_t spaceUsed32 = 0;
-
- bitsToWeight = (BYTE *)((U32 *)workspace + spaceUsed32);
- spaceUsed32 += ALIGN(HUF_TABLELOG_MAX + 1, sizeof(U32)) >> 2;
- huffWeight = (BYTE *)((U32 *)workspace + spaceUsed32);
- spaceUsed32 += ALIGN(HUF_SYMBOLVALUE_MAX, sizeof(U32)) >> 2;
-
- if ((spaceUsed32 << 2) > workspaceSize)
- return ERROR(tableLog_tooLarge);
- workspace = (U32 *)workspace + spaceUsed32;
- workspaceSize -= (spaceUsed32 << 2);
-
- /* check conditions */
- if (maxSymbolValue > HUF_SYMBOLVALUE_MAX)
- return ERROR(maxSymbolValue_tooLarge);
-
- /* convert to weight */
- bitsToWeight[0] = 0;
- for (n = 1; n < huffLog + 1; n++)
- bitsToWeight[n] = (BYTE)(huffLog + 1 - n);
- for (n = 0; n < maxSymbolValue; n++)
- huffWeight[n] = bitsToWeight[CTable[n].nbBits];
-
- /* attempt weights compression by FSE */
- {
- CHECK_V_F(hSize, HUF_compressWeights_wksp(op + 1, maxDstSize - 1, huffWeight, maxSymbolValue, workspace, workspaceSize));
- if ((hSize > 1) & (hSize < maxSymbolValue / 2)) { /* FSE compressed */
- op[0] = (BYTE)hSize;
- return hSize + 1;
- }
- }
-
- /* write raw values as 4-bits (max : 15) */
- if (maxSymbolValue > (256 - 128))
- return ERROR(GENERIC); /* should not happen : likely means source cannot be compressed */
- if (((maxSymbolValue + 1) / 2) + 1 > maxDstSize)
- return ERROR(dstSize_tooSmall); /* not enough space within dst buffer */
- op[0] = (BYTE)(128 /*special case*/ + (maxSymbolValue - 1));
- huffWeight[maxSymbolValue] = 0; /* to be sure it doesn't cause msan issue in final combination */
- for (n = 0; n < maxSymbolValue; n += 2)
- op[(n / 2) + 1] = (BYTE)((huffWeight[n] << 4) + huffWeight[n + 1]);
- return ((maxSymbolValue + 1) / 2) + 1;
-}
-
-size_t HUF_readCTable_wksp(HUF_CElt *CTable, U32 maxSymbolValue, const void *src, size_t srcSize, void *workspace, size_t workspaceSize)
-{
- U32 *rankVal;
- BYTE *huffWeight;
- U32 tableLog = 0;
- U32 nbSymbols = 0;
- size_t readSize;
- size_t spaceUsed32 = 0;
-
- rankVal = (U32 *)workspace + spaceUsed32;
- spaceUsed32 += HUF_TABLELOG_ABSOLUTEMAX + 1;
- huffWeight = (BYTE *)((U32 *)workspace + spaceUsed32);
- spaceUsed32 += ALIGN(HUF_SYMBOLVALUE_MAX + 1, sizeof(U32)) >> 2;
-
- if ((spaceUsed32 << 2) > workspaceSize)
- return ERROR(tableLog_tooLarge);
- workspace = (U32 *)workspace + spaceUsed32;
- workspaceSize -= (spaceUsed32 << 2);
-
- /* get symbol weights */
- readSize = HUF_readStats_wksp(huffWeight, HUF_SYMBOLVALUE_MAX + 1, rankVal, &nbSymbols, &tableLog, src, srcSize, workspace, workspaceSize);
- if (ERR_isError(readSize))
- return readSize;
-
- /* check result */
- if (tableLog > HUF_TABLELOG_MAX)
- return ERROR(tableLog_tooLarge);
- if (nbSymbols > maxSymbolValue + 1)
- return ERROR(maxSymbolValue_tooSmall);
-
- /* Prepare base value per rank */
- {
- U32 n, nextRankStart = 0;
- for (n = 1; n <= tableLog; n++) {
- U32 curr = nextRankStart;
- nextRankStart += (rankVal[n] << (n - 1));
- rankVal[n] = curr;
- }
- }
-
- /* fill nbBits */
- {
- U32 n;
- for (n = 0; n < nbSymbols; n++) {
- const U32 w = huffWeight[n];
- CTable[n].nbBits = (BYTE)(tableLog + 1 - w);
- }
- }
-
- /* fill val */
- {
- U16 nbPerRank[HUF_TABLELOG_MAX + 2] = {0}; /* support w=0=>n=tableLog+1 */
- U16 valPerRank[HUF_TABLELOG_MAX + 2] = {0};
- {
- U32 n;
- for (n = 0; n < nbSymbols; n++)
- nbPerRank[CTable[n].nbBits]++;
- }
- /* determine stating value per rank */
- valPerRank[tableLog + 1] = 0; /* for w==0 */
- {
- U16 min = 0;
- U32 n;
- for (n = tableLog; n > 0; n--) { /* start at n=tablelog <-> w=1 */
- valPerRank[n] = min; /* get starting value within each rank */
- min += nbPerRank[n];
- min >>= 1;
- }
- }
- /* assign value within rank, symbol order */
- {
- U32 n;
- for (n = 0; n <= maxSymbolValue; n++)
- CTable[n].val = valPerRank[CTable[n].nbBits]++;
- }
- }
-
- return readSize;
-}
-
-typedef struct nodeElt_s {
- U32 count;
- U16 parent;
- BYTE byte;
- BYTE nbBits;
-} nodeElt;
-
-static U32 HUF_setMaxHeight(nodeElt *huffNode, U32 lastNonNull, U32 maxNbBits)
-{
- const U32 largestBits = huffNode[lastNonNull].nbBits;
- if (largestBits <= maxNbBits)
- return largestBits; /* early exit : no elt > maxNbBits */
-
- /* there are several too large elements (at least >= 2) */
- {
- int totalCost = 0;
- const U32 baseCost = 1 << (largestBits - maxNbBits);
- U32 n = lastNonNull;
-
- while (huffNode[n].nbBits > maxNbBits) {
- totalCost += baseCost - (1 << (largestBits - huffNode[n].nbBits));
- huffNode[n].nbBits = (BYTE)maxNbBits;
- n--;
- } /* n stops at huffNode[n].nbBits <= maxNbBits */
- while (huffNode[n].nbBits == maxNbBits)
- n--; /* n end at index of smallest symbol using < maxNbBits */
-
- /* renorm totalCost */
- totalCost >>= (largestBits - maxNbBits); /* note : totalCost is necessarily a multiple of baseCost */
-
- /* repay normalized cost */
- {
- U32 const noSymbol = 0xF0F0F0F0;
- U32 rankLast[HUF_TABLELOG_MAX + 2];
- int pos;
-
- /* Get pos of last (smallest) symbol per rank */
- memset(rankLast, 0xF0, sizeof(rankLast));
- {
- U32 currNbBits = maxNbBits;
- for (pos = n; pos >= 0; pos--) {
- if (huffNode[pos].nbBits >= currNbBits)
- continue;
- currNbBits = huffNode[pos].nbBits; /* < maxNbBits */
- rankLast[maxNbBits - currNbBits] = pos;
- }
- }
-
- while (totalCost > 0) {
- U32 nBitsToDecrease = BIT_highbit32(totalCost) + 1;
- for (; nBitsToDecrease > 1; nBitsToDecrease--) {
- U32 highPos = rankLast[nBitsToDecrease];
- U32 lowPos = rankLast[nBitsToDecrease - 1];
- if (highPos == noSymbol)
- continue;
- if (lowPos == noSymbol)
- break;
- {
- U32 const highTotal = huffNode[highPos].count;
- U32 const lowTotal = 2 * huffNode[lowPos].count;
- if (highTotal <= lowTotal)
- break;
- }
- }
- /* only triggered when no more rank 1 symbol left => find closest one (note : there is necessarily at least one !) */
- /* HUF_MAX_TABLELOG test just to please gcc 5+; but it should not be necessary */
- while ((nBitsToDecrease <= HUF_TABLELOG_MAX) && (rankLast[nBitsToDecrease] == noSymbol))
- nBitsToDecrease++;
- totalCost -= 1 << (nBitsToDecrease - 1);
- if (rankLast[nBitsToDecrease - 1] == noSymbol)
- rankLast[nBitsToDecrease - 1] = rankLast[nBitsToDecrease]; /* this rank is no longer empty */
- huffNode[rankLast[nBitsToDecrease]].nbBits++;
- if (rankLast[nBitsToDecrease] == 0) /* special case, reached largest symbol */
- rankLast[nBitsToDecrease] = noSymbol;
- else {
- rankLast[nBitsToDecrease]--;
- if (huffNode[rankLast[nBitsToDecrease]].nbBits != maxNbBits - nBitsToDecrease)
- rankLast[nBitsToDecrease] = noSymbol; /* this rank is now empty */
- }
- } /* while (totalCost > 0) */
-
- while (totalCost < 0) { /* Sometimes, cost correction overshoot */
- if (rankLast[1] == noSymbol) { /* special case : no rank 1 symbol (using maxNbBits-1); let's create one from largest rank 0
- (using maxNbBits) */
- while (huffNode[n].nbBits == maxNbBits)
- n--;
- huffNode[n + 1].nbBits--;
- rankLast[1] = n + 1;
- totalCost++;
- continue;
- }
- huffNode[rankLast[1] + 1].nbBits--;
- rankLast[1]++;
- totalCost++;
- }
- }
- } /* there are several too large elements (at least >= 2) */
-
- return maxNbBits;
-}
-
-typedef struct {
- U32 base;
- U32 curr;
-} rankPos;
-
-static void HUF_sort(nodeElt *huffNode, const U32 *count, U32 maxSymbolValue)
-{
- rankPos rank[32];
- U32 n;
-
- memset(rank, 0, sizeof(rank));
- for (n = 0; n <= maxSymbolValue; n++) {
- U32 r = BIT_highbit32(count[n] + 1);
- rank[r].base++;
- }
- for (n = 30; n > 0; n--)
- rank[n - 1].base += rank[n].base;
- for (n = 0; n < 32; n++)
- rank[n].curr = rank[n].base;
- for (n = 0; n <= maxSymbolValue; n++) {
- U32 const c = count[n];
- U32 const r = BIT_highbit32(c + 1) + 1;
- U32 pos = rank[r].curr++;
- while ((pos > rank[r].base) && (c > huffNode[pos - 1].count))
- huffNode[pos] = huffNode[pos - 1], pos--;
- huffNode[pos].count = c;
- huffNode[pos].byte = (BYTE)n;
- }
-}
-
-/** HUF_buildCTable_wksp() :
- * Same as HUF_buildCTable(), but using externally allocated scratch buffer.
- * `workSpace` must be aligned on 4-bytes boundaries, and be at least as large as a table of 1024 unsigned.
- */
-#define STARTNODE (HUF_SYMBOLVALUE_MAX + 1)
-typedef nodeElt huffNodeTable[2 * HUF_SYMBOLVALUE_MAX + 1 + 1];
-size_t HUF_buildCTable_wksp(HUF_CElt *tree, const U32 *count, U32 maxSymbolValue, U32 maxNbBits, void *workSpace, size_t wkspSize)
-{
- nodeElt *const huffNode0 = (nodeElt *)workSpace;
- nodeElt *const huffNode = huffNode0 + 1;
- U32 n, nonNullRank;
- int lowS, lowN;
- U16 nodeNb = STARTNODE;
- U32 nodeRoot;
-
- /* safety checks */
- if (wkspSize < sizeof(huffNodeTable))
- return ERROR(GENERIC); /* workSpace is not large enough */
- if (maxNbBits == 0)
- maxNbBits = HUF_TABLELOG_DEFAULT;
- if (maxSymbolValue > HUF_SYMBOLVALUE_MAX)
- return ERROR(GENERIC);
- memset(huffNode0, 0, sizeof(huffNodeTable));
-
- /* sort, decreasing order */
- HUF_sort(huffNode, count, maxSymbolValue);
-
- /* init for parents */
- nonNullRank = maxSymbolValue;
- while (huffNode[nonNullRank].count == 0)
- nonNullRank--;
- lowS = nonNullRank;
- nodeRoot = nodeNb + lowS - 1;
- lowN = nodeNb;
- huffNode[nodeNb].count = huffNode[lowS].count + huffNode[lowS - 1].count;
- huffNode[lowS].parent = huffNode[lowS - 1].parent = nodeNb;
- nodeNb++;
- lowS -= 2;
- for (n = nodeNb; n <= nodeRoot; n++)
- huffNode[n].count = (U32)(1U << 30);
- huffNode0[0].count = (U32)(1U << 31); /* fake entry, strong barrier */
-
- /* create parents */
- while (nodeNb <= nodeRoot) {
- U32 n1 = (huffNode[lowS].count < huffNode[lowN].count) ? lowS-- : lowN++;
- U32 n2 = (huffNode[lowS].count < huffNode[lowN].count) ? lowS-- : lowN++;
- huffNode[nodeNb].count = huffNode[n1].count + huffNode[n2].count;
- huffNode[n1].parent = huffNode[n2].parent = nodeNb;
- nodeNb++;
- }
-
- /* distribute weights (unlimited tree height) */
- huffNode[nodeRoot].nbBits = 0;
- for (n = nodeRoot - 1; n >= STARTNODE; n--)
- huffNode[n].nbBits = huffNode[huffNode[n].parent].nbBits + 1;
- for (n = 0; n <= nonNullRank; n++)
- huffNode[n].nbBits = huffNode[huffNode[n].parent].nbBits + 1;
-
- /* enforce maxTableLog */
- maxNbBits = HUF_setMaxHeight(huffNode, nonNullRank, maxNbBits);
-
- /* fill result into tree (val, nbBits) */
- {
- U16 nbPerRank[HUF_TABLELOG_MAX + 1] = {0};
- U16 valPerRank[HUF_TABLELOG_MAX + 1] = {0};
- if (maxNbBits > HUF_TABLELOG_MAX)
- return ERROR(GENERIC); /* check fit into table */
- for (n = 0; n <= nonNullRank; n++)
- nbPerRank[huffNode[n].nbBits]++;
- /* determine stating value per rank */
- {
- U16 min = 0;
- for (n = maxNbBits; n > 0; n--) {
- valPerRank[n] = min; /* get starting value within each rank */
- min += nbPerRank[n];
- min >>= 1;
- }
- }
- for (n = 0; n <= maxSymbolValue; n++)
- tree[huffNode[n].byte].nbBits = huffNode[n].nbBits; /* push nbBits per symbol, symbol order */
- for (n = 0; n <= maxSymbolValue; n++)
- tree[n].val = valPerRank[tree[n].nbBits]++; /* assign value within rank, symbol order */
- }
-
- return maxNbBits;
-}
-
-static size_t HUF_estimateCompressedSize(HUF_CElt *CTable, const unsigned *count, unsigned maxSymbolValue)
-{
- size_t nbBits = 0;
- int s;
- for (s = 0; s <= (int)maxSymbolValue; ++s) {
- nbBits += CTable[s].nbBits * count[s];
- }
- return nbBits >> 3;
-}
-
-static int HUF_validateCTable(const HUF_CElt *CTable, const unsigned *count, unsigned maxSymbolValue)
-{
- int bad = 0;
- int s;
- for (s = 0; s <= (int)maxSymbolValue; ++s) {
- bad |= (count[s] != 0) & (CTable[s].nbBits == 0);
- }
- return !bad;
-}
-
-static void HUF_encodeSymbol(BIT_CStream_t *bitCPtr, U32 symbol, const HUF_CElt *CTable)
-{
- BIT_addBitsFast(bitCPtr, CTable[symbol].val, CTable[symbol].nbBits);
-}
-
-size_t HUF_compressBound(size_t size) { return HUF_COMPRESSBOUND(size); }
-
-#define HUF_FLUSHBITS(s) BIT_flushBits(s)
-
-#define HUF_FLUSHBITS_1(stream) \
- if (sizeof((stream)->bitContainer) * 8 < HUF_TABLELOG_MAX * 2 + 7) \
- HUF_FLUSHBITS(stream)
-
-#define HUF_FLUSHBITS_2(stream) \
- if (sizeof((stream)->bitContainer) * 8 < HUF_TABLELOG_MAX * 4 + 7) \
- HUF_FLUSHBITS(stream)
-
-size_t HUF_compress1X_usingCTable(void *dst, size_t dstSize, const void *src, size_t srcSize, const HUF_CElt *CTable)
-{
- const BYTE *ip = (const BYTE *)src;
- BYTE *const ostart = (BYTE *)dst;
- BYTE *const oend = ostart + dstSize;
- BYTE *op = ostart;
- size_t n;
- BIT_CStream_t bitC;
-
- /* init */
- if (dstSize < 8)
- return 0; /* not enough space to compress */
- {
- size_t const initErr = BIT_initCStream(&bitC, op, oend - op);
- if (HUF_isError(initErr))
- return 0;
- }
-
- n = srcSize & ~3; /* join to mod 4 */
- switch (srcSize & 3) {
- case 3: HUF_encodeSymbol(&bitC, ip[n + 2], CTable); HUF_FLUSHBITS_2(&bitC);
- /* fall through */
- case 2: HUF_encodeSymbol(&bitC, ip[n + 1], CTable); HUF_FLUSHBITS_1(&bitC);
- /* fall through */
- case 1: HUF_encodeSymbol(&bitC, ip[n + 0], CTable); HUF_FLUSHBITS(&bitC);
- case 0:
- default:;
- }
-
- for (; n > 0; n -= 4) { /* note : n&3==0 at this stage */
- HUF_encodeSymbol(&bitC, ip[n - 1], CTable);
- HUF_FLUSHBITS_1(&bitC);
- HUF_encodeSymbol(&bitC, ip[n - 2], CTable);
- HUF_FLUSHBITS_2(&bitC);
- HUF_encodeSymbol(&bitC, ip[n - 3], CTable);
- HUF_FLUSHBITS_1(&bitC);
- HUF_encodeSymbol(&bitC, ip[n - 4], CTable);
- HUF_FLUSHBITS(&bitC);
- }
-
- return BIT_closeCStream(&bitC);
-}
-
-size_t HUF_compress4X_usingCTable(void *dst, size_t dstSize, const void *src, size_t srcSize, const HUF_CElt *CTable)
-{
- size_t const segmentSize = (srcSize + 3) / 4; /* first 3 segments */
- const BYTE *ip = (const BYTE *)src;
- const BYTE *const iend = ip + srcSize;
- BYTE *const ostart = (BYTE *)dst;
- BYTE *const oend = ostart + dstSize;
- BYTE *op = ostart;
-
- if (dstSize < 6 + 1 + 1 + 1 + 8)
- return 0; /* minimum space to compress successfully */
- if (srcSize < 12)
- return 0; /* no saving possible : too small input */
- op += 6; /* jumpTable */
-
- {
- CHECK_V_F(cSize, HUF_compress1X_usingCTable(op, oend - op, ip, segmentSize, CTable));
- if (cSize == 0)
- return 0;
- ZSTD_writeLE16(ostart, (U16)cSize);
- op += cSize;
- }
-
- ip += segmentSize;
- {
- CHECK_V_F(cSize, HUF_compress1X_usingCTable(op, oend - op, ip, segmentSize, CTable));
- if (cSize == 0)
- return 0;
- ZSTD_writeLE16(ostart + 2, (U16)cSize);
- op += cSize;
- }
-
- ip += segmentSize;
- {
- CHECK_V_F(cSize, HUF_compress1X_usingCTable(op, oend - op, ip, segmentSize, CTable));
- if (cSize == 0)
- return 0;
- ZSTD_writeLE16(ostart + 4, (U16)cSize);
- op += cSize;
- }
-
- ip += segmentSize;
- {
- CHECK_V_F(cSize, HUF_compress1X_usingCTable(op, oend - op, ip, iend - ip, CTable));
- if (cSize == 0)
- return 0;
- op += cSize;
- }
-
- return op - ostart;
-}
-
-static size_t HUF_compressCTable_internal(BYTE *const ostart, BYTE *op, BYTE *const oend, const void *src, size_t srcSize, unsigned singleStream,
- const HUF_CElt *CTable)
-{
- size_t const cSize =
- singleStream ? HUF_compress1X_usingCTable(op, oend - op, src, srcSize, CTable) : HUF_compress4X_usingCTable(op, oend - op, src, srcSize, CTable);
- if (HUF_isError(cSize)) {
- return cSize;
- }
- if (cSize == 0) {
- return 0;
- } /* uncompressible */
- op += cSize;
- /* check compressibility */
- if ((size_t)(op - ostart) >= srcSize - 1) {
- return 0;
- }
- return op - ostart;
-}
-
-/* `workSpace` must a table of at least 1024 unsigned */
-static size_t HUF_compress_internal(void *dst, size_t dstSize, const void *src, size_t srcSize, unsigned maxSymbolValue, unsigned huffLog,
- unsigned singleStream, void *workSpace, size_t wkspSize, HUF_CElt *oldHufTable, HUF_repeat *repeat, int preferRepeat)
-{
- BYTE *const ostart = (BYTE *)dst;
- BYTE *const oend = ostart + dstSize;
- BYTE *op = ostart;
-
- U32 *count;
- size_t const countSize = sizeof(U32) * (HUF_SYMBOLVALUE_MAX + 1);
- HUF_CElt *CTable;
- size_t const CTableSize = sizeof(HUF_CElt) * (HUF_SYMBOLVALUE_MAX + 1);
-
- /* checks & inits */
- if (wkspSize < sizeof(huffNodeTable) + countSize + CTableSize)
- return ERROR(GENERIC);
- if (!srcSize)
- return 0; /* Uncompressed (note : 1 means rle, so first byte must be correct) */
- if (!dstSize)
- return 0; /* cannot fit within dst budget */
- if (srcSize > HUF_BLOCKSIZE_MAX)
- return ERROR(srcSize_wrong); /* curr block size limit */
- if (huffLog > HUF_TABLELOG_MAX)
- return ERROR(tableLog_tooLarge);
- if (!maxSymbolValue)
- maxSymbolValue = HUF_SYMBOLVALUE_MAX;
- if (!huffLog)
- huffLog = HUF_TABLELOG_DEFAULT;
-
- count = (U32 *)workSpace;
- workSpace = (BYTE *)workSpace + countSize;
- wkspSize -= countSize;
- CTable = (HUF_CElt *)workSpace;
- workSpace = (BYTE *)workSpace + CTableSize;
- wkspSize -= CTableSize;
-
- /* Heuristic : If we don't need to check the validity of the old table use the old table for small inputs */
- if (preferRepeat && repeat && *repeat == HUF_repeat_valid) {
- return HUF_compressCTable_internal(ostart, op, oend, src, srcSize, singleStream, oldHufTable);
- }
-
- /* Scan input and build symbol stats */
- {
- CHECK_V_F(largest, FSE_count_wksp(count, &maxSymbolValue, (const BYTE *)src, srcSize, (U32 *)workSpace));
- if (largest == srcSize) {
- *ostart = ((const BYTE *)src)[0];
- return 1;
- } /* single symbol, rle */
- if (largest <= (srcSize >> 7) + 1)
- return 0; /* Fast heuristic : not compressible enough */
- }
-
- /* Check validity of previous table */
- if (repeat && *repeat == HUF_repeat_check && !HUF_validateCTable(oldHufTable, count, maxSymbolValue)) {
- *repeat = HUF_repeat_none;
- }
- /* Heuristic : use existing table for small inputs */
- if (preferRepeat && repeat && *repeat != HUF_repeat_none) {
- return HUF_compressCTable_internal(ostart, op, oend, src, srcSize, singleStream, oldHufTable);
- }
-
- /* Build Huffman Tree */
- huffLog = HUF_optimalTableLog(huffLog, srcSize, maxSymbolValue);
- {
- CHECK_V_F(maxBits, HUF_buildCTable_wksp(CTable, count, maxSymbolValue, huffLog, workSpace, wkspSize));
- huffLog = (U32)maxBits;
- /* Zero the unused symbols so we can check it for validity */
- memset(CTable + maxSymbolValue + 1, 0, CTableSize - (maxSymbolValue + 1) * sizeof(HUF_CElt));
- }
-
- /* Write table description header */
- {
- CHECK_V_F(hSize, HUF_writeCTable_wksp(op, dstSize, CTable, maxSymbolValue, huffLog, workSpace, wkspSize));
- /* Check if using the previous table will be beneficial */
- if (repeat && *repeat != HUF_repeat_none) {
- size_t const oldSize = HUF_estimateCompressedSize(oldHufTable, count, maxSymbolValue);
- size_t const newSize = HUF_estimateCompressedSize(CTable, count, maxSymbolValue);
- if (oldSize <= hSize + newSize || hSize + 12 >= srcSize) {
- return HUF_compressCTable_internal(ostart, op, oend, src, srcSize, singleStream, oldHufTable);
- }
- }
- /* Use the new table */
- if (hSize + 12ul >= srcSize) {
- return 0;
- }
- op += hSize;
- if (repeat) {
- *repeat = HUF_repeat_none;
- }
- if (oldHufTable) {
- memcpy(oldHufTable, CTable, CTableSize);
- } /* Save the new table */
- }
- return HUF_compressCTable_internal(ostart, op, oend, src, srcSize, singleStream, CTable);
-}
-
-size_t HUF_compress1X_wksp(void *dst, size_t dstSize, const void *src, size_t srcSize, unsigned maxSymbolValue, unsigned huffLog, void *workSpace,
- size_t wkspSize)
-{
- return HUF_compress_internal(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, 1 /* single stream */, workSpace, wkspSize, NULL, NULL, 0);
-}
-
-size_t HUF_compress1X_repeat(void *dst, size_t dstSize, const void *src, size_t srcSize, unsigned maxSymbolValue, unsigned huffLog, void *workSpace,
- size_t wkspSize, HUF_CElt *hufTable, HUF_repeat *repeat, int preferRepeat)
-{
- return HUF_compress_internal(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, 1 /* single stream */, workSpace, wkspSize, hufTable, repeat,
- preferRepeat);
-}
-
-size_t HUF_compress4X_wksp(void *dst, size_t dstSize, const void *src, size_t srcSize, unsigned maxSymbolValue, unsigned huffLog, void *workSpace,
- size_t wkspSize)
-{
- return HUF_compress_internal(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, 0 /* 4 streams */, workSpace, wkspSize, NULL, NULL, 0);
-}
-
-size_t HUF_compress4X_repeat(void *dst, size_t dstSize, const void *src, size_t srcSize, unsigned maxSymbolValue, unsigned huffLog, void *workSpace,
- size_t wkspSize, HUF_CElt *hufTable, HUF_repeat *repeat, int preferRepeat)
-{
- return HUF_compress_internal(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, 0 /* 4 streams */, workSpace, wkspSize, hufTable, repeat,
- preferRepeat);
-}
diff --git a/lib/zstd/huf_decompress.c b/lib/zstd/huf_decompress.c
deleted file mode 100644
index 6526482047dc..000000000000
--- a/lib/zstd/huf_decompress.c
+++ /dev/null
@@ -1,960 +0,0 @@
-/*
- * Huffman decoder, part of New Generation Entropy library
- * Copyright (C) 2013-2016, Yann Collet.
- *
- * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following disclaimer
- * in the documentation and/or other materials provided with the
- * distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * This program is free software; you can redistribute it and/or modify it under
- * the terms of the GNU General Public License version 2 as published by the
- * Free Software Foundation. This program is dual-licensed; you may select
- * either version 2 of the GNU General Public License ("GPL") or BSD license
- * ("BSD").
- *
- * You can contact the author at :
- * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
- */
-
-/* **************************************************************
-* Compiler specifics
-****************************************************************/
-#define FORCE_INLINE static __always_inline
-
-/* **************************************************************
-* Dependencies
-****************************************************************/
-#include "bitstream.h" /* BIT_* */
-#include "fse.h" /* header compression */
-#include "huf.h"
-#include <linux/compiler.h>
-#include <linux/kernel.h>
-#include <linux/string.h> /* memcpy, memset */
-
-/* **************************************************************
-* Error Management
-****************************************************************/
-#define HUF_STATIC_ASSERT(c) \
- { \
- enum { HUF_static_assert = 1 / (int)(!!(c)) }; \
- } /* use only *after* variable declarations */
-
-/*-***************************/
-/* generic DTableDesc */
-/*-***************************/
-
-typedef struct {
- BYTE maxTableLog;
- BYTE tableType;
- BYTE tableLog;
- BYTE reserved;
-} DTableDesc;
-
-static DTableDesc HUF_getDTableDesc(const HUF_DTable *table)
-{
- DTableDesc dtd;
- memcpy(&dtd, table, sizeof(dtd));
- return dtd;
-}
-
-/*-***************************/
-/* single-symbol decoding */
-/*-***************************/
-
-typedef struct {
- BYTE byte;
- BYTE nbBits;
-} HUF_DEltX2; /* single-symbol decoding */
-
-size_t HUF_readDTableX2_wksp(HUF_DTable *DTable, const void *src, size_t srcSize, void *workspace, size_t workspaceSize)
-{
- U32 tableLog = 0;
- U32 nbSymbols = 0;
- size_t iSize;
- void *const dtPtr = DTable + 1;
- HUF_DEltX2 *const dt = (HUF_DEltX2 *)dtPtr;
-
- U32 *rankVal;
- BYTE *huffWeight;
- size_t spaceUsed32 = 0;
-
- rankVal = (U32 *)workspace + spaceUsed32;
- spaceUsed32 += HUF_TABLELOG_ABSOLUTEMAX + 1;
- huffWeight = (BYTE *)((U32 *)workspace + spaceUsed32);
- spaceUsed32 += ALIGN(HUF_SYMBOLVALUE_MAX + 1, sizeof(U32)) >> 2;
-
- if ((spaceUsed32 << 2) > workspaceSize)
- return ERROR(tableLog_tooLarge);
- workspace = (U32 *)workspace + spaceUsed32;
- workspaceSize -= (spaceUsed32 << 2);
-
- HUF_STATIC_ASSERT(sizeof(DTableDesc) == sizeof(HUF_DTable));
- /* memset(huffWeight, 0, sizeof(huffWeight)); */ /* is not necessary, even though some analyzer complain ... */
-
- iSize = HUF_readStats_wksp(huffWeight, HUF_SYMBOLVALUE_MAX + 1, rankVal, &nbSymbols, &tableLog, src, srcSize, workspace, workspaceSize);
- if (HUF_isError(iSize))
- return iSize;
-
- /* Table header */
- {
- DTableDesc dtd = HUF_getDTableDesc(DTable);
- if (tableLog > (U32)(dtd.maxTableLog + 1))
- return ERROR(tableLog_tooLarge); /* DTable too small, Huffman tree cannot fit in */
- dtd.tableType = 0;
- dtd.tableLog = (BYTE)tableLog;
- memcpy(DTable, &dtd, sizeof(dtd));
- }
-
- /* Calculate starting value for each rank */
- {
- U32 n, nextRankStart = 0;
- for (n = 1; n < tableLog + 1; n++) {
- U32 const curr = nextRankStart;
- nextRankStart += (rankVal[n] << (n - 1));
- rankVal[n] = curr;
- }
- }
-
- /* fill DTable */
- {
- U32 n;
- for (n = 0; n < nbSymbols; n++) {
- U32 const w = huffWeight[n];
- U32 const length = (1 << w) >> 1;
- U32 u;
- HUF_DEltX2 D;
- D.byte = (BYTE)n;
- D.nbBits = (BYTE)(tableLog + 1 - w);
- for (u = rankVal[w]; u < rankVal[w] + length; u++)
- dt[u] = D;
- rankVal[w] += length;
- }
- }
-
- return iSize;
-}
-
-static BYTE HUF_decodeSymbolX2(BIT_DStream_t *Dstream, const HUF_DEltX2 *dt, const U32 dtLog)
-{
- size_t const val = BIT_lookBitsFast(Dstream, dtLog); /* note : dtLog >= 1 */
- BYTE const c = dt[val].byte;
- BIT_skipBits(Dstream, dt[val].nbBits);
- return c;
-}
-
-#define HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr) *ptr++ = HUF_decodeSymbolX2(DStreamPtr, dt, dtLog)
-
-#define HUF_DECODE_SYMBOLX2_1(ptr, DStreamPtr) \
- if (ZSTD_64bits() || (HUF_TABLELOG_MAX <= 12)) \
- HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr)
-
-#define HUF_DECODE_SYMBOLX2_2(ptr, DStreamPtr) \
- if (ZSTD_64bits()) \
- HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr)
-
-FORCE_INLINE size_t HUF_decodeStreamX2(BYTE *p, BIT_DStream_t *const bitDPtr, BYTE *const pEnd, const HUF_DEltX2 *const dt, const U32 dtLog)
-{
- BYTE *const pStart = p;
-
- /* up to 4 symbols at a time */
- while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) && (p <= pEnd - 4)) {
- HUF_DECODE_SYMBOLX2_2(p, bitDPtr);
- HUF_DECODE_SYMBOLX2_1(p, bitDPtr);
- HUF_DECODE_SYMBOLX2_2(p, bitDPtr);
- HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
- }
-
- /* closer to the end */
- while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) && (p < pEnd))
- HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
-
- /* no more data to retrieve from bitstream, hence no need to reload */
- while (p < pEnd)
- HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
-
- return pEnd - pStart;
-}
-
-static size_t HUF_decompress1X2_usingDTable_internal(void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable)
-{
- BYTE *op = (BYTE *)dst;
- BYTE *const oend = op + dstSize;
- const void *dtPtr = DTable + 1;
- const HUF_DEltX2 *const dt = (const HUF_DEltX2 *)dtPtr;
- BIT_DStream_t bitD;
- DTableDesc const dtd = HUF_getDTableDesc(DTable);
- U32 const dtLog = dtd.tableLog;
-
- {
- size_t const errorCode = BIT_initDStream(&bitD, cSrc, cSrcSize);
- if (HUF_isError(errorCode))
- return errorCode;
- }
-
- HUF_decodeStreamX2(op, &bitD, oend, dt, dtLog);
-
- /* check */
- if (!BIT_endOfDStream(&bitD))
- return ERROR(corruption_detected);
-
- return dstSize;
-}
-
-size_t HUF_decompress1X2_usingDTable(void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable)
-{
- DTableDesc dtd = HUF_getDTableDesc(DTable);
- if (dtd.tableType != 0)
- return ERROR(GENERIC);
- return HUF_decompress1X2_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable);
-}
-
-size_t HUF_decompress1X2_DCtx_wksp(HUF_DTable *DCtx, void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, void *workspace, size_t workspaceSize)
-{
- const BYTE *ip = (const BYTE *)cSrc;
-
- size_t const hSize = HUF_readDTableX2_wksp(DCtx, cSrc, cSrcSize, workspace, workspaceSize);
- if (HUF_isError(hSize))
- return hSize;
- if (hSize >= cSrcSize)
- return ERROR(srcSize_wrong);
- ip += hSize;
- cSrcSize -= hSize;
-
- return HUF_decompress1X2_usingDTable_internal(dst, dstSize, ip, cSrcSize, DCtx);
-}
-
-static size_t HUF_decompress4X2_usingDTable_internal(void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable)
-{
- /* Check */
- if (cSrcSize < 10)
- return ERROR(corruption_detected); /* strict minimum : jump table + 1 byte per stream */
-
- {
- const BYTE *const istart = (const BYTE *)cSrc;
- BYTE *const ostart = (BYTE *)dst;
- BYTE *const oend = ostart + dstSize;
- const void *const dtPtr = DTable + 1;
- const HUF_DEltX2 *const dt = (const HUF_DEltX2 *)dtPtr;
-
- /* Init */
- BIT_DStream_t bitD1;
- BIT_DStream_t bitD2;
- BIT_DStream_t bitD3;
- BIT_DStream_t bitD4;
- size_t const length1 = ZSTD_readLE16(istart);
- size_t const length2 = ZSTD_readLE16(istart + 2);
- size_t const length3 = ZSTD_readLE16(istart + 4);
- size_t const length4 = cSrcSize - (length1 + length2 + length3 + 6);
- const BYTE *const istart1 = istart + 6; /* jumpTable */
- const BYTE *const istart2 = istart1 + length1;
- const BYTE *const istart3 = istart2 + length2;
- const BYTE *const istart4 = istart3 + length3;
- const size_t segmentSize = (dstSize + 3) / 4;
- BYTE *const opStart2 = ostart + segmentSize;
- BYTE *const opStart3 = opStart2 + segmentSize;
- BYTE *const opStart4 = opStart3 + segmentSize;
- BYTE *op1 = ostart;
- BYTE *op2 = opStart2;
- BYTE *op3 = opStart3;
- BYTE *op4 = opStart4;
- U32 endSignal;
- DTableDesc const dtd = HUF_getDTableDesc(DTable);
- U32 const dtLog = dtd.tableLog;
-
- if (length4 > cSrcSize)
- return ERROR(corruption_detected); /* overflow */
- {
- size_t const errorCode = BIT_initDStream(&bitD1, istart1, length1);
- if (HUF_isError(errorCode))
- return errorCode;
- }
- {
- size_t const errorCode = BIT_initDStream(&bitD2, istart2, length2);
- if (HUF_isError(errorCode))
- return errorCode;
- }
- {
- size_t const errorCode = BIT_initDStream(&bitD3, istart3, length3);
- if (HUF_isError(errorCode))
- return errorCode;
- }
- {
- size_t const errorCode = BIT_initDStream(&bitD4, istart4, length4);
- if (HUF_isError(errorCode))
- return errorCode;
- }
-
- /* 16-32 symbols per loop (4-8 symbols per stream) */
- endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4);
- for (; (endSignal == BIT_DStream_unfinished) && (op4 < (oend - 7));) {
- HUF_DECODE_SYMBOLX2_2(op1, &bitD1);
- HUF_DECODE_SYMBOLX2_2(op2, &bitD2);
- HUF_DECODE_SYMBOLX2_2(op3, &bitD3);
- HUF_DECODE_SYMBOLX2_2(op4, &bitD4);
- HUF_DECODE_SYMBOLX2_1(op1, &bitD1);
- HUF_DECODE_SYMBOLX2_1(op2, &bitD2);
- HUF_DECODE_SYMBOLX2_1(op3, &bitD3);
- HUF_DECODE_SYMBOLX2_1(op4, &bitD4);
- HUF_DECODE_SYMBOLX2_2(op1, &bitD1);
- HUF_DECODE_SYMBOLX2_2(op2, &bitD2);
- HUF_DECODE_SYMBOLX2_2(op3, &bitD3);
- HUF_DECODE_SYMBOLX2_2(op4, &bitD4);
- HUF_DECODE_SYMBOLX2_0(op1, &bitD1);
- HUF_DECODE_SYMBOLX2_0(op2, &bitD2);
- HUF_DECODE_SYMBOLX2_0(op3, &bitD3);
- HUF_DECODE_SYMBOLX2_0(op4, &bitD4);
- endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4);
- }
-
- /* check corruption */
- if (op1 > opStart2)
- return ERROR(corruption_detected);
- if (op2 > opStart3)
- return ERROR(corruption_detected);
- if (op3 > opStart4)
- return ERROR(corruption_detected);
- /* note : op4 supposed already verified within main loop */
-
- /* finish bitStreams one by one */
- HUF_decodeStreamX2(op1, &bitD1, opStart2, dt, dtLog);
- HUF_decodeStreamX2(op2, &bitD2, opStart3, dt, dtLog);
- HUF_decodeStreamX2(op3, &bitD3, opStart4, dt, dtLog);
- HUF_decodeStreamX2(op4, &bitD4, oend, dt, dtLog);
-
- /* check */
- endSignal = BIT_endOfDStream(&bitD1) & BIT_endOfDStream(&bitD2) & BIT_endOfDStream(&bitD3) & BIT_endOfDStream(&bitD4);
- if (!endSignal)
- return ERROR(corruption_detected);
-
- /* decoded size */
- return dstSize;
- }
-}
-
-size_t HUF_decompress4X2_usingDTable(void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable)
-{
- DTableDesc dtd = HUF_getDTableDesc(DTable);
- if (dtd.tableType != 0)
- return ERROR(GENERIC);
- return HUF_decompress4X2_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable);
-}
-
-size_t HUF_decompress4X2_DCtx_wksp(HUF_DTable *dctx, void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, void *workspace, size_t workspaceSize)
-{
- const BYTE *ip = (const BYTE *)cSrc;
-
- size_t const hSize = HUF_readDTableX2_wksp(dctx, cSrc, cSrcSize, workspace, workspaceSize);
- if (HUF_isError(hSize))
- return hSize;
- if (hSize >= cSrcSize)
- return ERROR(srcSize_wrong);
- ip += hSize;
- cSrcSize -= hSize;
-
- return HUF_decompress4X2_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx);
-}
-
-/* *************************/
-/* double-symbols decoding */
-/* *************************/
-typedef struct {
- U16 sequence;
- BYTE nbBits;
- BYTE length;
-} HUF_DEltX4; /* double-symbols decoding */
-
-typedef struct {
- BYTE symbol;
- BYTE weight;
-} sortedSymbol_t;
-
-/* HUF_fillDTableX4Level2() :
- * `rankValOrigin` must be a table of at least (HUF_TABLELOG_MAX + 1) U32 */
-static void HUF_fillDTableX4Level2(HUF_DEltX4 *DTable, U32 sizeLog, const U32 consumed, const U32 *rankValOrigin, const int minWeight,
- const sortedSymbol_t *sortedSymbols, const U32 sortedListSize, U32 nbBitsBaseline, U16 baseSeq)
-{
- HUF_DEltX4 DElt;
- U32 rankVal[HUF_TABLELOG_MAX + 1];
-
- /* get pre-calculated rankVal */
- memcpy(rankVal, rankValOrigin, sizeof(rankVal));
-
- /* fill skipped values */
- if (minWeight > 1) {
- U32 i, skipSize = rankVal[minWeight];
- ZSTD_writeLE16(&(DElt.sequence), baseSeq);
- DElt.nbBits = (BYTE)(consumed);
- DElt.length = 1;
- for (i = 0; i < skipSize; i++)
- DTable[i] = DElt;
- }
-
- /* fill DTable */
- {
- U32 s;
- for (s = 0; s < sortedListSize; s++) { /* note : sortedSymbols already skipped */
- const U32 symbol = sortedSymbols[s].symbol;
- const U32 weight = sortedSymbols[s].weight;
- const U32 nbBits = nbBitsBaseline - weight;
- const U32 length = 1 << (sizeLog - nbBits);
- const U32 start = rankVal[weight];
- U32 i = start;
- const U32 end = start + length;
-
- ZSTD_writeLE16(&(DElt.sequence), (U16)(baseSeq + (symbol << 8)));
- DElt.nbBits = (BYTE)(nbBits + consumed);
- DElt.length = 2;
- do {
- DTable[i++] = DElt;
- } while (i < end); /* since length >= 1 */
-
- rankVal[weight] += length;
- }
- }
-}
-
-typedef U32 rankVal_t[HUF_TABLELOG_MAX][HUF_TABLELOG_MAX + 1];
-typedef U32 rankValCol_t[HUF_TABLELOG_MAX + 1];
-
-static void HUF_fillDTableX4(HUF_DEltX4 *DTable, const U32 targetLog, const sortedSymbol_t *sortedList, const U32 sortedListSize, const U32 *rankStart,
- rankVal_t rankValOrigin, const U32 maxWeight, const U32 nbBitsBaseline)
-{
- U32 rankVal[HUF_TABLELOG_MAX + 1];
- const int scaleLog = nbBitsBaseline - targetLog; /* note : targetLog >= srcLog, hence scaleLog <= 1 */
- const U32 minBits = nbBitsBaseline - maxWeight;
- U32 s;
-
- memcpy(rankVal, rankValOrigin, sizeof(rankVal));
-
- /* fill DTable */
- for (s = 0; s < sortedListSize; s++) {
- const U16 symbol = sortedList[s].symbol;
- const U32 weight = sortedList[s].weight;
- const U32 nbBits = nbBitsBaseline - weight;
- const U32 start = rankVal[weight];
- const U32 length = 1 << (targetLog - nbBits);
-
- if (targetLog - nbBits >= minBits) { /* enough room for a second symbol */
- U32 sortedRank;
- int minWeight = nbBits + scaleLog;
- if (minWeight < 1)
- minWeight = 1;
- sortedRank = rankStart[minWeight];
- HUF_fillDTableX4Level2(DTable + start, targetLog - nbBits, nbBits, rankValOrigin[nbBits], minWeight, sortedList + sortedRank,
- sortedListSize - sortedRank, nbBitsBaseline, symbol);
- } else {
- HUF_DEltX4 DElt;
- ZSTD_writeLE16(&(DElt.sequence), symbol);
- DElt.nbBits = (BYTE)(nbBits);
- DElt.length = 1;
- {
- U32 const end = start + length;
- U32 u;
- for (u = start; u < end; u++)
- DTable[u] = DElt;
- }
- }
- rankVal[weight] += length;
- }
-}
-
-size_t HUF_readDTableX4_wksp(HUF_DTable *DTable, const void *src, size_t srcSize, void *workspace, size_t workspaceSize)
-{
- U32 tableLog, maxW, sizeOfSort, nbSymbols;
- DTableDesc dtd = HUF_getDTableDesc(DTable);
- U32 const maxTableLog = dtd.maxTableLog;
- size_t iSize;
- void *dtPtr = DTable + 1; /* force compiler to avoid strict-aliasing */
- HUF_DEltX4 *const dt = (HUF_DEltX4 *)dtPtr;
- U32 *rankStart;
-
- rankValCol_t *rankVal;
- U32 *rankStats;
- U32 *rankStart0;
- sortedSymbol_t *sortedSymbol;
- BYTE *weightList;
- size_t spaceUsed32 = 0;
-
- HUF_STATIC_ASSERT((sizeof(rankValCol_t) & 3) == 0);
-
- rankVal = (rankValCol_t *)((U32 *)workspace + spaceUsed32);
- spaceUsed32 += (sizeof(rankValCol_t) * HUF_TABLELOG_MAX) >> 2;
- rankStats = (U32 *)workspace + spaceUsed32;
- spaceUsed32 += HUF_TABLELOG_MAX + 1;
- rankStart0 = (U32 *)workspace + spaceUsed32;
- spaceUsed32 += HUF_TABLELOG_MAX + 2;
- sortedSymbol = (sortedSymbol_t *)((U32 *)workspace + spaceUsed32);
- spaceUsed32 += ALIGN(sizeof(sortedSymbol_t) * (HUF_SYMBOLVALUE_MAX + 1), sizeof(U32)) >> 2;
- weightList = (BYTE *)((U32 *)workspace + spaceUsed32);
- spaceUsed32 += ALIGN(HUF_SYMBOLVALUE_MAX + 1, sizeof(U32)) >> 2;
-
- if ((spaceUsed32 << 2) > workspaceSize)
- return ERROR(tableLog_tooLarge);
- workspace = (U32 *)workspace + spaceUsed32;
- workspaceSize -= (spaceUsed32 << 2);
-
- rankStart = rankStart0 + 1;
- memset(rankStats, 0, sizeof(U32) * (2 * HUF_TABLELOG_MAX + 2 + 1));
-
- HUF_STATIC_ASSERT(sizeof(HUF_DEltX4) == sizeof(HUF_DTable)); /* if compiler fails here, assertion is wrong */
- if (maxTableLog > HUF_TABLELOG_MAX)
- return ERROR(tableLog_tooLarge);
- /* memset(weightList, 0, sizeof(weightList)); */ /* is not necessary, even though some analyzer complain ... */
-
- iSize = HUF_readStats_wksp(weightList, HUF_SYMBOLVALUE_MAX + 1, rankStats, &nbSymbols, &tableLog, src, srcSize, workspace, workspaceSize);
- if (HUF_isError(iSize))
- return iSize;
-
- /* check result */
- if (tableLog > maxTableLog)
- return ERROR(tableLog_tooLarge); /* DTable can't fit code depth */
-
- /* find maxWeight */
- for (maxW = tableLog; rankStats[maxW] == 0; maxW--) {
- } /* necessarily finds a solution before 0 */
-
- /* Get start index of each weight */
- {
- U32 w, nextRankStart = 0;
- for (w = 1; w < maxW + 1; w++) {
- U32 curr = nextRankStart;
- nextRankStart += rankStats[w];
- rankStart[w] = curr;
- }
- rankStart[0] = nextRankStart; /* put all 0w symbols at the end of sorted list*/
- sizeOfSort = nextRankStart;
- }
-
- /* sort symbols by weight */
- {
- U32 s;
- for (s = 0; s < nbSymbols; s++) {
- U32 const w = weightList[s];
- U32 const r = rankStart[w]++;
- sortedSymbol[r].symbol = (BYTE)s;
- sortedSymbol[r].weight = (BYTE)w;
- }
- rankStart[0] = 0; /* forget 0w symbols; this is beginning of weight(1) */
- }
-
- /* Build rankVal */
- {
- U32 *const rankVal0 = rankVal[0];
- {
- int const rescale = (maxTableLog - tableLog) - 1; /* tableLog <= maxTableLog */
- U32 nextRankVal = 0;
- U32 w;
- for (w = 1; w < maxW + 1; w++) {
- U32 curr = nextRankVal;
- nextRankVal += rankStats[w] << (w + rescale);
- rankVal0[w] = curr;
- }
- }
- {
- U32 const minBits = tableLog + 1 - maxW;
- U32 consumed;
- for (consumed = minBits; consumed < maxTableLog - minBits + 1; consumed++) {
- U32 *const rankValPtr = rankVal[consumed];
- U32 w;
- for (w = 1; w < maxW + 1; w++) {
- rankValPtr[w] = rankVal0[w] >> consumed;
- }
- }
- }
- }
-
- HUF_fillDTableX4(dt, maxTableLog, sortedSymbol, sizeOfSort, rankStart0, rankVal, maxW, tableLog + 1);
-
- dtd.tableLog = (BYTE)maxTableLog;
- dtd.tableType = 1;
- memcpy(DTable, &dtd, sizeof(dtd));
- return iSize;
-}
-
-static U32 HUF_decodeSymbolX4(void *op, BIT_DStream_t *DStream, const HUF_DEltX4 *dt, const U32 dtLog)
-{
- size_t const val = BIT_lookBitsFast(DStream, dtLog); /* note : dtLog >= 1 */
- memcpy(op, dt + val, 2);
- BIT_skipBits(DStream, dt[val].nbBits);
- return dt[val].length;
-}
-
-static U32 HUF_decodeLastSymbolX4(void *op, BIT_DStream_t *DStream, const HUF_DEltX4 *dt, const U32 dtLog)
-{
- size_t const val = BIT_lookBitsFast(DStream, dtLog); /* note : dtLog >= 1 */
- memcpy(op, dt + val, 1);
- if (dt[val].length == 1)
- BIT_skipBits(DStream, dt[val].nbBits);
- else {
- if (DStream->bitsConsumed < (sizeof(DStream->bitContainer) * 8)) {
- BIT_skipBits(DStream, dt[val].nbBits);
- if (DStream->bitsConsumed > (sizeof(DStream->bitContainer) * 8))
- /* ugly hack; works only because it's the last symbol. Note : can't easily extract nbBits from just this symbol */
- DStream->bitsConsumed = (sizeof(DStream->bitContainer) * 8);
- }
- }
- return 1;
-}
-
-#define HUF_DECODE_SYMBOLX4_0(ptr, DStreamPtr) ptr += HUF_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog)
-
-#define HUF_DECODE_SYMBOLX4_1(ptr, DStreamPtr) \
- if (ZSTD_64bits() || (HUF_TABLELOG_MAX <= 12)) \
- ptr += HUF_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog)
-
-#define HUF_DECODE_SYMBOLX4_2(ptr, DStreamPtr) \
- if (ZSTD_64bits()) \
- ptr += HUF_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog)
-
-FORCE_INLINE size_t HUF_decodeStreamX4(BYTE *p, BIT_DStream_t *bitDPtr, BYTE *const pEnd, const HUF_DEltX4 *const dt, const U32 dtLog)
-{
- BYTE *const pStart = p;
-
- /* up to 8 symbols at a time */
- while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p < pEnd - (sizeof(bitDPtr->bitContainer) - 1))) {
- HUF_DECODE_SYMBOLX4_2(p, bitDPtr);
- HUF_DECODE_SYMBOLX4_1(p, bitDPtr);
- HUF_DECODE_SYMBOLX4_2(p, bitDPtr);
- HUF_DECODE_SYMBOLX4_0(p, bitDPtr);
- }
-
- /* closer to end : up to 2 symbols at a time */
- while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p <= pEnd - 2))
- HUF_DECODE_SYMBOLX4_0(p, bitDPtr);
-
- while (p <= pEnd - 2)
- HUF_DECODE_SYMBOLX4_0(p, bitDPtr); /* no need to reload : reached the end of DStream */
-
- if (p < pEnd)
- p += HUF_decodeLastSymbolX4(p, bitDPtr, dt, dtLog);
-
- return p - pStart;
-}
-
-static size_t HUF_decompress1X4_usingDTable_internal(void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable)
-{
- BIT_DStream_t bitD;
-
- /* Init */
- {
- size_t const errorCode = BIT_initDStream(&bitD, cSrc, cSrcSize);
- if (HUF_isError(errorCode))
- return errorCode;
- }
-
- /* decode */
- {
- BYTE *const ostart = (BYTE *)dst;
- BYTE *const oend = ostart + dstSize;
- const void *const dtPtr = DTable + 1; /* force compiler to not use strict-aliasing */
- const HUF_DEltX4 *const dt = (const HUF_DEltX4 *)dtPtr;
- DTableDesc const dtd = HUF_getDTableDesc(DTable);
- HUF_decodeStreamX4(ostart, &bitD, oend, dt, dtd.tableLog);
- }
-
- /* check */
- if (!BIT_endOfDStream(&bitD))
- return ERROR(corruption_detected);
-
- /* decoded size */
- return dstSize;
-}
-
-size_t HUF_decompress1X4_usingDTable(void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable)
-{
- DTableDesc dtd = HUF_getDTableDesc(DTable);
- if (dtd.tableType != 1)
- return ERROR(GENERIC);
- return HUF_decompress1X4_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable);
-}
-
-size_t HUF_decompress1X4_DCtx_wksp(HUF_DTable *DCtx, void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, void *workspace, size_t workspaceSize)
-{
- const BYTE *ip = (const BYTE *)cSrc;
-
- size_t const hSize = HUF_readDTableX4_wksp(DCtx, cSrc, cSrcSize, workspace, workspaceSize);
- if (HUF_isError(hSize))
- return hSize;
- if (hSize >= cSrcSize)
- return ERROR(srcSize_wrong);
- ip += hSize;
- cSrcSize -= hSize;
-
- return HUF_decompress1X4_usingDTable_internal(dst, dstSize, ip, cSrcSize, DCtx);
-}
-
-static size_t HUF_decompress4X4_usingDTable_internal(void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable)
-{
- if (cSrcSize < 10)
- return ERROR(corruption_detected); /* strict minimum : jump table + 1 byte per stream */
-
- {
- const BYTE *const istart = (const BYTE *)cSrc;
- BYTE *const ostart = (BYTE *)dst;
- BYTE *const oend = ostart + dstSize;
- const void *const dtPtr = DTable + 1;
- const HUF_DEltX4 *const dt = (const HUF_DEltX4 *)dtPtr;
-
- /* Init */
- BIT_DStream_t bitD1;
- BIT_DStream_t bitD2;
- BIT_DStream_t bitD3;
- BIT_DStream_t bitD4;
- size_t const length1 = ZSTD_readLE16(istart);
- size_t const length2 = ZSTD_readLE16(istart + 2);
- size_t const length3 = ZSTD_readLE16(istart + 4);
- size_t const length4 = cSrcSize - (length1 + length2 + length3 + 6);
- const BYTE *const istart1 = istart + 6; /* jumpTable */
- const BYTE *const istart2 = istart1 + length1;
- const BYTE *const istart3 = istart2 + length2;
- const BYTE *const istart4 = istart3 + length3;
- size_t const segmentSize = (dstSize + 3) / 4;
- BYTE *const opStart2 = ostart + segmentSize;
- BYTE *const opStart3 = opStart2 + segmentSize;
- BYTE *const opStart4 = opStart3 + segmentSize;
- BYTE *op1 = ostart;
- BYTE *op2 = opStart2;
- BYTE *op3 = opStart3;
- BYTE *op4 = opStart4;
- U32 endSignal;
- DTableDesc const dtd = HUF_getDTableDesc(DTable);
- U32 const dtLog = dtd.tableLog;
-
- if (length4 > cSrcSize)
- return ERROR(corruption_detected); /* overflow */
- {
- size_t const errorCode = BIT_initDStream(&bitD1, istart1, length1);
- if (HUF_isError(errorCode))
- return errorCode;
- }
- {
- size_t const errorCode = BIT_initDStream(&bitD2, istart2, length2);
- if (HUF_isError(errorCode))
- return errorCode;
- }
- {
- size_t const errorCode = BIT_initDStream(&bitD3, istart3, length3);
- if (HUF_isError(errorCode))
- return errorCode;
- }
- {
- size_t const errorCode = BIT_initDStream(&bitD4, istart4, length4);
- if (HUF_isError(errorCode))
- return errorCode;
- }
-
- /* 16-32 symbols per loop (4-8 symbols per stream) */
- endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4);
- for (; (endSignal == BIT_DStream_unfinished) & (op4 < (oend - (sizeof(bitD4.bitContainer) - 1)));) {
- HUF_DECODE_SYMBOLX4_2(op1, &bitD1);
- HUF_DECODE_SYMBOLX4_2(op2, &bitD2);
- HUF_DECODE_SYMBOLX4_2(op3, &bitD3);
- HUF_DECODE_SYMBOLX4_2(op4, &bitD4);
- HUF_DECODE_SYMBOLX4_1(op1, &bitD1);
- HUF_DECODE_SYMBOLX4_1(op2, &bitD2);
- HUF_DECODE_SYMBOLX4_1(op3, &bitD3);
- HUF_DECODE_SYMBOLX4_1(op4, &bitD4);
- HUF_DECODE_SYMBOLX4_2(op1, &bitD1);
- HUF_DECODE_SYMBOLX4_2(op2, &bitD2);
- HUF_DECODE_SYMBOLX4_2(op3, &bitD3);
- HUF_DECODE_SYMBOLX4_2(op4, &bitD4);
- HUF_DECODE_SYMBOLX4_0(op1, &bitD1);
- HUF_DECODE_SYMBOLX4_0(op2, &bitD2);
- HUF_DECODE_SYMBOLX4_0(op3, &bitD3);
- HUF_DECODE_SYMBOLX4_0(op4, &bitD4);
-
- endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4);
- }
-
- /* check corruption */
- if (op1 > opStart2)
- return ERROR(corruption_detected);
- if (op2 > opStart3)
- return ERROR(corruption_detected);
- if (op3 > opStart4)
- return ERROR(corruption_detected);
- /* note : op4 already verified within main loop */
-
- /* finish bitStreams one by one */
- HUF_decodeStreamX4(op1, &bitD1, opStart2, dt, dtLog);
- HUF_decodeStreamX4(op2, &bitD2, opStart3, dt, dtLog);
- HUF_decodeStreamX4(op3, &bitD3, opStart4, dt, dtLog);
- HUF_decodeStreamX4(op4, &bitD4, oend, dt, dtLog);
-
- /* check */
- {
- U32 const endCheck = BIT_endOfDStream(&bitD1) & BIT_endOfDStream(&bitD2) & BIT_endOfDStream(&bitD3) & BIT_endOfDStream(&bitD4);
- if (!endCheck)
- return ERROR(corruption_detected);
- }
-
- /* decoded size */
- return dstSize;
- }
-}
-
-size_t HUF_decompress4X4_usingDTable(void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable)
-{
- DTableDesc dtd = HUF_getDTableDesc(DTable);
- if (dtd.tableType != 1)
- return ERROR(GENERIC);
- return HUF_decompress4X4_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable);
-}
-
-size_t HUF_decompress4X4_DCtx_wksp(HUF_DTable *dctx, void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, void *workspace, size_t workspaceSize)
-{
- const BYTE *ip = (const BYTE *)cSrc;
-
- size_t hSize = HUF_readDTableX4_wksp(dctx, cSrc, cSrcSize, workspace, workspaceSize);
- if (HUF_isError(hSize))
- return hSize;
- if (hSize >= cSrcSize)
- return ERROR(srcSize_wrong);
- ip += hSize;
- cSrcSize -= hSize;
-
- return HUF_decompress4X4_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx);
-}
-
-/* ********************************/
-/* Generic decompression selector */
-/* ********************************/
-
-size_t HUF_decompress1X_usingDTable(void *dst, size_t maxDstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable)
-{
- DTableDesc const dtd = HUF_getDTableDesc(DTable);
- return dtd.tableType ? HUF_decompress1X4_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable)
- : HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable);
-}
-
-size_t HUF_decompress4X_usingDTable(void *dst, size_t maxDstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable)
-{
- DTableDesc const dtd = HUF_getDTableDesc(DTable);
- return dtd.tableType ? HUF_decompress4X4_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable)
- : HUF_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable);
-}
-
-typedef struct {
- U32 tableTime;
- U32 decode256Time;
-} algo_time_t;
-static const algo_time_t algoTime[16 /* Quantization */][3 /* single, double, quad */] = {
- /* single, double, quad */
- {{0, 0}, {1, 1}, {2, 2}}, /* Q==0 : impossible */
- {{0, 0}, {1, 1}, {2, 2}}, /* Q==1 : impossible */
- {{38, 130}, {1313, 74}, {2151, 38}}, /* Q == 2 : 12-18% */
- {{448, 128}, {1353, 74}, {2238, 41}}, /* Q == 3 : 18-25% */
- {{556, 128}, {1353, 74}, {2238, 47}}, /* Q == 4 : 25-32% */
- {{714, 128}, {1418, 74}, {2436, 53}}, /* Q == 5 : 32-38% */
- {{883, 128}, {1437, 74}, {2464, 61}}, /* Q == 6 : 38-44% */
- {{897, 128}, {1515, 75}, {2622, 68}}, /* Q == 7 : 44-50% */
- {{926, 128}, {1613, 75}, {2730, 75}}, /* Q == 8 : 50-56% */
- {{947, 128}, {1729, 77}, {3359, 77}}, /* Q == 9 : 56-62% */
- {{1107, 128}, {2083, 81}, {4006, 84}}, /* Q ==10 : 62-69% */
- {{1177, 128}, {2379, 87}, {4785, 88}}, /* Q ==11 : 69-75% */
- {{1242, 128}, {2415, 93}, {5155, 84}}, /* Q ==12 : 75-81% */
- {{1349, 128}, {2644, 106}, {5260, 106}}, /* Q ==13 : 81-87% */
- {{1455, 128}, {2422, 124}, {4174, 124}}, /* Q ==14 : 87-93% */
- {{722, 128}, {1891, 145}, {1936, 146}}, /* Q ==15 : 93-99% */
-};
-
-/** HUF_selectDecoder() :
-* Tells which decoder is likely to decode faster,
-* based on a set of pre-determined metrics.
-* @return : 0==HUF_decompress4X2, 1==HUF_decompress4X4 .
-* Assumption : 0 < cSrcSize < dstSize <= 128 KB */
-U32 HUF_selectDecoder(size_t dstSize, size_t cSrcSize)
-{
- /* decoder timing evaluation */
- U32 const Q = (U32)(cSrcSize * 16 / dstSize); /* Q < 16 since dstSize > cSrcSize */
- U32 const D256 = (U32)(dstSize >> 8);
- U32 const DTime0 = algoTime[Q][0].tableTime + (algoTime[Q][0].decode256Time * D256);
- U32 DTime1 = algoTime[Q][1].tableTime + (algoTime[Q][1].decode256Time * D256);
- DTime1 += DTime1 >> 3; /* advantage to algorithm using less memory, for cache eviction */
-
- return DTime1 < DTime0;
-}
-
-typedef size_t (*decompressionAlgo)(void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize);
-
-size_t HUF_decompress4X_DCtx_wksp(HUF_DTable *dctx, void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, void *workspace, size_t workspaceSize)
-{
- /* validation checks */
- if (dstSize == 0)
- return ERROR(dstSize_tooSmall);
- if (cSrcSize > dstSize)
- return ERROR(corruption_detected); /* invalid */
- if (cSrcSize == dstSize) {
- memcpy(dst, cSrc, dstSize);
- return dstSize;
- } /* not compressed */
- if (cSrcSize == 1) {
- memset(dst, *(const BYTE *)cSrc, dstSize);
- return dstSize;
- } /* RLE */
-
- {
- U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize);
- return algoNb ? HUF_decompress4X4_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workspace, workspaceSize)
- : HUF_decompress4X2_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workspace, workspaceSize);
- }
-}
-
-size_t HUF_decompress4X_hufOnly_wksp(HUF_DTable *dctx, void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, void *workspace, size_t workspaceSize)
-{
- /* validation checks */
- if (dstSize == 0)
- return ERROR(dstSize_tooSmall);
- if ((cSrcSize >= dstSize) || (cSrcSize <= 1))
- return ERROR(corruption_detected); /* invalid */
-
- {
- U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize);
- return algoNb ? HUF_decompress4X4_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workspace, workspaceSize)
- : HUF_decompress4X2_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workspace, workspaceSize);
- }
-}
-
-size_t HUF_decompress1X_DCtx_wksp(HUF_DTable *dctx, void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, void *workspace, size_t workspaceSize)
-{
- /* validation checks */
- if (dstSize == 0)
- return ERROR(dstSize_tooSmall);
- if (cSrcSize > dstSize)
- return ERROR(corruption_detected); /* invalid */
- if (cSrcSize == dstSize) {
- memcpy(dst, cSrc, dstSize);
- return dstSize;
- } /* not compressed */
- if (cSrcSize == 1) {
- memset(dst, *(const BYTE *)cSrc, dstSize);
- return dstSize;
- } /* RLE */
-
- {
- U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize);
- return algoNb ? HUF_decompress1X4_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workspace, workspaceSize)
- : HUF_decompress1X2_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workspace, workspaceSize);
- }
-}
diff --git a/lib/zstd/mem.h b/lib/zstd/mem.h
deleted file mode 100644
index 3a0f34c8706c..000000000000
--- a/lib/zstd/mem.h
+++ /dev/null
@@ -1,151 +0,0 @@
-/**
- * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
- * All rights reserved.
- *
- * This source code is licensed under the BSD-style license found in the
- * LICENSE file in the root directory of https://github.com/facebook/zstd.
- * An additional grant of patent rights can be found in the PATENTS file in the
- * same directory.
- *
- * This program is free software; you can redistribute it and/or modify it under
- * the terms of the GNU General Public License version 2 as published by the
- * Free Software Foundation. This program is dual-licensed; you may select
- * either version 2 of the GNU General Public License ("GPL") or BSD license
- * ("BSD").
- */
-
-#ifndef MEM_H_MODULE
-#define MEM_H_MODULE
-
-/*-****************************************
-* Dependencies
-******************************************/
-#include <asm/unaligned.h>
-#include <linux/string.h> /* memcpy */
-#include <linux/types.h> /* size_t, ptrdiff_t */
-
-/*-****************************************
-* Compiler specifics
-******************************************/
-#define ZSTD_STATIC static __inline __attribute__((unused))
-
-/*-**************************************************************
-* Basic Types
-*****************************************************************/
-typedef uint8_t BYTE;
-typedef uint16_t U16;
-typedef int16_t S16;
-typedef uint32_t U32;
-typedef int32_t S32;
-typedef uint64_t U64;
-typedef int64_t S64;
-typedef ptrdiff_t iPtrDiff;
-typedef uintptr_t uPtrDiff;
-
-/*-**************************************************************
-* Memory I/O
-*****************************************************************/
-ZSTD_STATIC unsigned ZSTD_32bits(void) { return sizeof(size_t) == 4; }
-ZSTD_STATIC unsigned ZSTD_64bits(void) { return sizeof(size_t) == 8; }
-
-#if defined(__LITTLE_ENDIAN)
-#define ZSTD_LITTLE_ENDIAN 1
-#else
-#define ZSTD_LITTLE_ENDIAN 0
-#endif
-
-ZSTD_STATIC unsigned ZSTD_isLittleEndian(void) { return ZSTD_LITTLE_ENDIAN; }
-
-ZSTD_STATIC U16 ZSTD_read16(const void *memPtr) { return get_unaligned((const U16 *)memPtr); }
-
-ZSTD_STATIC U32 ZSTD_read32(const void *memPtr) { return get_unaligned((const U32 *)memPtr); }
-
-ZSTD_STATIC U64 ZSTD_read64(const void *memPtr) { return get_unaligned((const U64 *)memPtr); }
-
-ZSTD_STATIC size_t ZSTD_readST(const void *memPtr) { return get_unaligned((const size_t *)memPtr); }
-
-ZSTD_STATIC void ZSTD_write16(void *memPtr, U16 value) { put_unaligned(value, (U16 *)memPtr); }
-
-ZSTD_STATIC void ZSTD_write32(void *memPtr, U32 value) { put_unaligned(value, (U32 *)memPtr); }
-
-ZSTD_STATIC void ZSTD_write64(void *memPtr, U64 value) { put_unaligned(value, (U64 *)memPtr); }
-
-/*=== Little endian r/w ===*/
-
-ZSTD_STATIC U16 ZSTD_readLE16(const void *memPtr) { return get_unaligned_le16(memPtr); }
-
-ZSTD_STATIC void ZSTD_writeLE16(void *memPtr, U16 val) { put_unaligned_le16(val, memPtr); }
-
-ZSTD_STATIC U32 ZSTD_readLE24(const void *memPtr) { return ZSTD_readLE16(memPtr) + (((const BYTE *)memPtr)[2] << 16); }
-
-ZSTD_STATIC void ZSTD_writeLE24(void *memPtr, U32 val)
-{
- ZSTD_writeLE16(memPtr, (U16)val);
- ((BYTE *)memPtr)[2] = (BYTE)(val >> 16);
-}
-
-ZSTD_STATIC U32 ZSTD_readLE32(const void *memPtr) { return get_unaligned_le32(memPtr); }
-
-ZSTD_STATIC void ZSTD_writeLE32(void *memPtr, U32 val32) { put_unaligned_le32(val32, memPtr); }
-
-ZSTD_STATIC U64 ZSTD_readLE64(const void *memPtr) { return get_unaligned_le64(memPtr); }
-
-ZSTD_STATIC void ZSTD_writeLE64(void *memPtr, U64 val64) { put_unaligned_le64(val64, memPtr); }
-
-ZSTD_STATIC size_t ZSTD_readLEST(const void *memPtr)
-{
- if (ZSTD_32bits())
- return (size_t)ZSTD_readLE32(memPtr);
- else
- return (size_t)ZSTD_readLE64(memPtr);
-}
-
-ZSTD_STATIC void ZSTD_writeLEST(void *memPtr, size_t val)
-{
- if (ZSTD_32bits())
- ZSTD_writeLE32(memPtr, (U32)val);
- else
- ZSTD_writeLE64(memPtr, (U64)val);
-}
-
-/*=== Big endian r/w ===*/
-
-ZSTD_STATIC U32 ZSTD_readBE32(const void *memPtr) { return get_unaligned_be32(memPtr); }
-
-ZSTD_STATIC void ZSTD_writeBE32(void *memPtr, U32 val32) { put_unaligned_be32(val32, memPtr); }
-
-ZSTD_STATIC U64 ZSTD_readBE64(const void *memPtr) { return get_unaligned_be64(memPtr); }
-
-ZSTD_STATIC void ZSTD_writeBE64(void *memPtr, U64 val64) { put_unaligned_be64(val64, memPtr); }
-
-ZSTD_STATIC size_t ZSTD_readBEST(const void *memPtr)
-{
- if (ZSTD_32bits())
- return (size_t)ZSTD_readBE32(memPtr);
- else
- return (size_t)ZSTD_readBE64(memPtr);
-}
-
-ZSTD_STATIC void ZSTD_writeBEST(void *memPtr, size_t val)
-{
- if (ZSTD_32bits())
- ZSTD_writeBE32(memPtr, (U32)val);
- else
- ZSTD_writeBE64(memPtr, (U64)val);
-}
-
-/* function safe only for comparisons */
-ZSTD_STATIC U32 ZSTD_readMINMATCH(const void *memPtr, U32 length)
-{
- switch (length) {
- default:
- case 4: return ZSTD_read32(memPtr);
- case 3:
- if (ZSTD_isLittleEndian())
- return ZSTD_read32(memPtr) << 8;
- else
- return ZSTD_read32(memPtr) >> 8;
- }
-}
-
-#endif /* MEM_H_MODULE */
diff --git a/lib/zstd/zstd_common.c b/lib/zstd/zstd_common.c
deleted file mode 100644
index a282624ee155..000000000000
--- a/lib/zstd/zstd_common.c
+++ /dev/null
@@ -1,75 +0,0 @@
-/**
- * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
- * All rights reserved.
- *
- * This source code is licensed under the BSD-style license found in the
- * LICENSE file in the root directory of https://github.com/facebook/zstd.
- * An additional grant of patent rights can be found in the PATENTS file in the
- * same directory.
- *
- * This program is free software; you can redistribute it and/or modify it under
- * the terms of the GNU General Public License version 2 as published by the
- * Free Software Foundation. This program is dual-licensed; you may select
- * either version 2 of the GNU General Public License ("GPL") or BSD license
- * ("BSD").
- */
-
-/*-*************************************
-* Dependencies
-***************************************/
-#include "error_private.h"
-#include "zstd_internal.h" /* declaration of ZSTD_isError, ZSTD_getErrorName, ZSTD_getErrorCode, ZSTD_getErrorString, ZSTD_versionNumber */
-#include <linux/kernel.h>
-
-/*=**************************************************************
-* Custom allocator
-****************************************************************/
-
-#define stack_push(stack, size) \
- ({ \
- void *const ptr = ZSTD_PTR_ALIGN((stack)->ptr); \
- (stack)->ptr = (char *)ptr + (size); \
- (stack)->ptr <= (stack)->end ? ptr : NULL; \
- })
-
-ZSTD_customMem ZSTD_initStack(void *workspace, size_t workspaceSize)
-{
- ZSTD_customMem stackMem = {ZSTD_stackAlloc, ZSTD_stackFree, workspace};
- ZSTD_stack *stack = (ZSTD_stack *)workspace;
- /* Verify preconditions */
- if (!workspace || workspaceSize < sizeof(ZSTD_stack) || workspace != ZSTD_PTR_ALIGN(workspace)) {
- ZSTD_customMem error = {NULL, NULL, NULL};
- return error;
- }
- /* Initialize the stack */
- stack->ptr = workspace;
- stack->end = (char *)workspace + workspaceSize;
- stack_push(stack, sizeof(ZSTD_stack));
- return stackMem;
-}
-
-void *ZSTD_stackAllocAll(void *opaque, size_t *size)
-{
- ZSTD_stack *stack = (ZSTD_stack *)opaque;
- *size = (BYTE const *)stack->end - (BYTE *)ZSTD_PTR_ALIGN(stack->ptr);
- return stack_push(stack, *size);
-}
-
-void *ZSTD_stackAlloc(void *opaque, size_t size)
-{
- ZSTD_stack *stack = (ZSTD_stack *)opaque;
- return stack_push(stack, size);
-}
-void ZSTD_stackFree(void *opaque, void *address)
-{
- (void)opaque;
- (void)address;
-}
-
-void *ZSTD_malloc(size_t size, ZSTD_customMem customMem) { return customMem.customAlloc(customMem.opaque, size); }
-
-void ZSTD_free(void *ptr, ZSTD_customMem customMem)
-{
- if (ptr != NULL)
- customMem.customFree(customMem.opaque, ptr);
-}
diff --git a/lib/zstd/zstd_common_module.c b/lib/zstd/zstd_common_module.c
new file mode 100644
index 000000000000..22686e367e6f
--- /dev/null
+++ b/lib/zstd/zstd_common_module.c
@@ -0,0 +1,32 @@
+// SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause
+/*
+ * Copyright (c) Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#include <linux/module.h>
+
+#include "common/huf.h"
+#include "common/fse.h"
+#include "common/zstd_internal.h"
+
+// Export symbols shared by compress and decompress into a common module
+
+#undef ZSTD_isError /* defined within zstd_internal.h */
+EXPORT_SYMBOL_GPL(FSE_readNCount);
+EXPORT_SYMBOL_GPL(HUF_readStats);
+EXPORT_SYMBOL_GPL(HUF_readStats_wksp);
+EXPORT_SYMBOL_GPL(ZSTD_isError);
+EXPORT_SYMBOL_GPL(ZSTD_getErrorName);
+EXPORT_SYMBOL_GPL(ZSTD_getErrorCode);
+EXPORT_SYMBOL_GPL(ZSTD_customMalloc);
+EXPORT_SYMBOL_GPL(ZSTD_customCalloc);
+EXPORT_SYMBOL_GPL(ZSTD_customFree);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("Zstd Common");
diff --git a/lib/zstd/zstd_compress_module.c b/lib/zstd/zstd_compress_module.c
new file mode 100644
index 000000000000..04e1b5c01d9b
--- /dev/null
+++ b/lib/zstd/zstd_compress_module.c
@@ -0,0 +1,164 @@
+// SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause
+/*
+ * Copyright (c) Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/zstd.h>
+
+#include "common/zstd_deps.h"
+#include "common/zstd_internal.h"
+
+#define ZSTD_FORWARD_IF_ERR(ret) \
+ do { \
+ size_t const __ret = (ret); \
+ if (ZSTD_isError(__ret)) \
+ return __ret; \
+ } while (0)
+
+static size_t zstd_cctx_init(zstd_cctx *cctx, const zstd_parameters *parameters,
+ unsigned long long pledged_src_size)
+{
+ ZSTD_FORWARD_IF_ERR(ZSTD_CCtx_reset(
+ cctx, ZSTD_reset_session_and_parameters));
+ ZSTD_FORWARD_IF_ERR(ZSTD_CCtx_setPledgedSrcSize(
+ cctx, pledged_src_size));
+ ZSTD_FORWARD_IF_ERR(ZSTD_CCtx_setParameter(
+ cctx, ZSTD_c_windowLog, parameters->cParams.windowLog));
+ ZSTD_FORWARD_IF_ERR(ZSTD_CCtx_setParameter(
+ cctx, ZSTD_c_hashLog, parameters->cParams.hashLog));
+ ZSTD_FORWARD_IF_ERR(ZSTD_CCtx_setParameter(
+ cctx, ZSTD_c_chainLog, parameters->cParams.chainLog));
+ ZSTD_FORWARD_IF_ERR(ZSTD_CCtx_setParameter(
+ cctx, ZSTD_c_searchLog, parameters->cParams.searchLog));
+ ZSTD_FORWARD_IF_ERR(ZSTD_CCtx_setParameter(
+ cctx, ZSTD_c_minMatch, parameters->cParams.minMatch));
+ ZSTD_FORWARD_IF_ERR(ZSTD_CCtx_setParameter(
+ cctx, ZSTD_c_targetLength, parameters->cParams.targetLength));
+ ZSTD_FORWARD_IF_ERR(ZSTD_CCtx_setParameter(
+ cctx, ZSTD_c_strategy, parameters->cParams.strategy));
+ ZSTD_FORWARD_IF_ERR(ZSTD_CCtx_setParameter(
+ cctx, ZSTD_c_contentSizeFlag, parameters->fParams.contentSizeFlag));
+ ZSTD_FORWARD_IF_ERR(ZSTD_CCtx_setParameter(
+ cctx, ZSTD_c_checksumFlag, parameters->fParams.checksumFlag));
+ ZSTD_FORWARD_IF_ERR(ZSTD_CCtx_setParameter(
+ cctx, ZSTD_c_dictIDFlag, !parameters->fParams.noDictIDFlag));
+ return 0;
+}
+
+int zstd_min_clevel(void)
+{
+ return ZSTD_minCLevel();
+}
+EXPORT_SYMBOL(zstd_min_clevel);
+
+int zstd_max_clevel(void)
+{
+ return ZSTD_maxCLevel();
+}
+EXPORT_SYMBOL(zstd_max_clevel);
+
+size_t zstd_compress_bound(size_t src_size)
+{
+ return ZSTD_compressBound(src_size);
+}
+EXPORT_SYMBOL(zstd_compress_bound);
+
+zstd_parameters zstd_get_params(int level,
+ unsigned long long estimated_src_size)
+{
+ return ZSTD_getParams(level, estimated_src_size, 0);
+}
+EXPORT_SYMBOL(zstd_get_params);
+
+size_t zstd_cctx_workspace_bound(const zstd_compression_parameters *cparams)
+{
+ return ZSTD_estimateCCtxSize_usingCParams(*cparams);
+}
+EXPORT_SYMBOL(zstd_cctx_workspace_bound);
+
+zstd_cctx *zstd_init_cctx(void *workspace, size_t workspace_size)
+{
+ if (workspace == NULL)
+ return NULL;
+ return ZSTD_initStaticCCtx(workspace, workspace_size);
+}
+EXPORT_SYMBOL(zstd_init_cctx);
+
+size_t zstd_compress_cctx(zstd_cctx *cctx, void *dst, size_t dst_capacity,
+ const void *src, size_t src_size, const zstd_parameters *parameters)
+{
+ ZSTD_FORWARD_IF_ERR(zstd_cctx_init(cctx, parameters, src_size));
+ return ZSTD_compress2(cctx, dst, dst_capacity, src, src_size);
+}
+EXPORT_SYMBOL(zstd_compress_cctx);
+
+size_t zstd_cstream_workspace_bound(const zstd_compression_parameters *cparams)
+{
+ return ZSTD_estimateCStreamSize_usingCParams(*cparams);
+}
+EXPORT_SYMBOL(zstd_cstream_workspace_bound);
+
+zstd_cstream *zstd_init_cstream(const zstd_parameters *parameters,
+ unsigned long long pledged_src_size, void *workspace, size_t workspace_size)
+{
+ zstd_cstream *cstream;
+
+ if (workspace == NULL)
+ return NULL;
+
+ cstream = ZSTD_initStaticCStream(workspace, workspace_size);
+ if (cstream == NULL)
+ return NULL;
+
+ /* 0 means unknown in linux zstd API but means 0 in new zstd API */
+ if (pledged_src_size == 0)
+ pledged_src_size = ZSTD_CONTENTSIZE_UNKNOWN;
+
+ if (ZSTD_isError(zstd_cctx_init(cstream, parameters, pledged_src_size)))
+ return NULL;
+
+ return cstream;
+}
+EXPORT_SYMBOL(zstd_init_cstream);
+
+size_t zstd_reset_cstream(zstd_cstream *cstream,
+ unsigned long long pledged_src_size)
+{
+ if (pledged_src_size == 0)
+ pledged_src_size = ZSTD_CONTENTSIZE_UNKNOWN;
+ ZSTD_FORWARD_IF_ERR( ZSTD_CCtx_reset(cstream, ZSTD_reset_session_only) );
+ ZSTD_FORWARD_IF_ERR( ZSTD_CCtx_setPledgedSrcSize(cstream, pledged_src_size) );
+ return 0;
+}
+EXPORT_SYMBOL(zstd_reset_cstream);
+
+size_t zstd_compress_stream(zstd_cstream *cstream, zstd_out_buffer *output,
+ zstd_in_buffer *input)
+{
+ return ZSTD_compressStream(cstream, output, input);
+}
+EXPORT_SYMBOL(zstd_compress_stream);
+
+size_t zstd_flush_stream(zstd_cstream *cstream, zstd_out_buffer *output)
+{
+ return ZSTD_flushStream(cstream, output);
+}
+EXPORT_SYMBOL(zstd_flush_stream);
+
+size_t zstd_end_stream(zstd_cstream *cstream, zstd_out_buffer *output)
+{
+ return ZSTD_endStream(cstream, output);
+}
+EXPORT_SYMBOL(zstd_end_stream);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("Zstd Compressor");
diff --git a/lib/zstd/zstd_decompress_module.c b/lib/zstd/zstd_decompress_module.c
new file mode 100644
index 000000000000..f4ed952ed485
--- /dev/null
+++ b/lib/zstd/zstd_decompress_module.c
@@ -0,0 +1,105 @@
+// SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause
+/*
+ * Copyright (c) Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/zstd.h>
+
+#include "common/zstd_deps.h"
+
+/* Common symbols. zstd_compress must depend on zstd_decompress. */
+
+unsigned int zstd_is_error(size_t code)
+{
+ return ZSTD_isError(code);
+}
+EXPORT_SYMBOL(zstd_is_error);
+
+zstd_error_code zstd_get_error_code(size_t code)
+{
+ return ZSTD_getErrorCode(code);
+}
+EXPORT_SYMBOL(zstd_get_error_code);
+
+const char *zstd_get_error_name(size_t code)
+{
+ return ZSTD_getErrorName(code);
+}
+EXPORT_SYMBOL(zstd_get_error_name);
+
+/* Decompression symbols. */
+
+size_t zstd_dctx_workspace_bound(void)
+{
+ return ZSTD_estimateDCtxSize();
+}
+EXPORT_SYMBOL(zstd_dctx_workspace_bound);
+
+zstd_dctx *zstd_init_dctx(void *workspace, size_t workspace_size)
+{
+ if (workspace == NULL)
+ return NULL;
+ return ZSTD_initStaticDCtx(workspace, workspace_size);
+}
+EXPORT_SYMBOL(zstd_init_dctx);
+
+size_t zstd_decompress_dctx(zstd_dctx *dctx, void *dst, size_t dst_capacity,
+ const void *src, size_t src_size)
+{
+ return ZSTD_decompressDCtx(dctx, dst, dst_capacity, src, src_size);
+}
+EXPORT_SYMBOL(zstd_decompress_dctx);
+
+size_t zstd_dstream_workspace_bound(size_t max_window_size)
+{
+ return ZSTD_estimateDStreamSize(max_window_size);
+}
+EXPORT_SYMBOL(zstd_dstream_workspace_bound);
+
+zstd_dstream *zstd_init_dstream(size_t max_window_size, void *workspace,
+ size_t workspace_size)
+{
+ if (workspace == NULL)
+ return NULL;
+ (void)max_window_size;
+ return ZSTD_initStaticDStream(workspace, workspace_size);
+}
+EXPORT_SYMBOL(zstd_init_dstream);
+
+size_t zstd_reset_dstream(zstd_dstream *dstream)
+{
+ return ZSTD_resetDStream(dstream);
+}
+EXPORT_SYMBOL(zstd_reset_dstream);
+
+size_t zstd_decompress_stream(zstd_dstream *dstream, zstd_out_buffer *output,
+ zstd_in_buffer *input)
+{
+ return ZSTD_decompressStream(dstream, output, input);
+}
+EXPORT_SYMBOL(zstd_decompress_stream);
+
+size_t zstd_find_frame_compressed_size(const void *src, size_t src_size)
+{
+ return ZSTD_findFrameCompressedSize(src, src_size);
+}
+EXPORT_SYMBOL(zstd_find_frame_compressed_size);
+
+size_t zstd_get_frame_header(zstd_frame_header *header, const void *src,
+ size_t src_size)
+{
+ return ZSTD_getFrameHeader(header, src, src_size);
+}
+EXPORT_SYMBOL(zstd_get_frame_header);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("Zstd Decompressor");
diff --git a/lib/zstd/zstd_internal.h b/lib/zstd/zstd_internal.h
deleted file mode 100644
index 1a79fab9e13a..000000000000
--- a/lib/zstd/zstd_internal.h
+++ /dev/null
@@ -1,263 +0,0 @@
-/**
- * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
- * All rights reserved.
- *
- * This source code is licensed under the BSD-style license found in the
- * LICENSE file in the root directory of https://github.com/facebook/zstd.
- * An additional grant of patent rights can be found in the PATENTS file in the
- * same directory.
- *
- * This program is free software; you can redistribute it and/or modify it under
- * the terms of the GNU General Public License version 2 as published by the
- * Free Software Foundation. This program is dual-licensed; you may select
- * either version 2 of the GNU General Public License ("GPL") or BSD license
- * ("BSD").
- */
-
-#ifndef ZSTD_CCOMMON_H_MODULE
-#define ZSTD_CCOMMON_H_MODULE
-
-/*-*******************************************************
-* Compiler specifics
-*********************************************************/
-#define FORCE_INLINE static __always_inline
-#define FORCE_NOINLINE static noinline
-
-/*-*************************************
-* Dependencies
-***************************************/
-#include "error_private.h"
-#include "mem.h"
-#include <linux/compiler.h>
-#include <linux/kernel.h>
-#include <linux/xxhash.h>
-#include <linux/zstd.h>
-
-/*-*************************************
-* shared macros
-***************************************/
-#define MIN(a, b) ((a) < (b) ? (a) : (b))
-#define MAX(a, b) ((a) > (b) ? (a) : (b))
-#define CHECK_F(f) \
- { \
- size_t const errcod = f; \
- if (ERR_isError(errcod)) \
- return errcod; \
- } /* check and Forward error code */
-#define CHECK_E(f, e) \
- { \
- size_t const errcod = f; \
- if (ERR_isError(errcod)) \
- return ERROR(e); \
- } /* check and send Error code */
-#define ZSTD_STATIC_ASSERT(c) \
- { \
- enum { ZSTD_static_assert = 1 / (int)(!!(c)) }; \
- }
-
-/*-*************************************
-* Common constants
-***************************************/
-#define ZSTD_OPT_NUM (1 << 12)
-#define ZSTD_DICT_MAGIC 0xEC30A437 /* v0.7+ */
-
-#define ZSTD_REP_NUM 3 /* number of repcodes */
-#define ZSTD_REP_CHECK (ZSTD_REP_NUM) /* number of repcodes to check by the optimal parser */
-#define ZSTD_REP_MOVE (ZSTD_REP_NUM - 1)
-#define ZSTD_REP_MOVE_OPT (ZSTD_REP_NUM)
-static const U32 repStartValue[ZSTD_REP_NUM] = {1, 4, 8};
-
-#define KB *(1 << 10)
-#define MB *(1 << 20)
-#define GB *(1U << 30)
-
-#define BIT7 128
-#define BIT6 64
-#define BIT5 32
-#define BIT4 16
-#define BIT1 2
-#define BIT0 1
-
-#define ZSTD_WINDOWLOG_ABSOLUTEMIN 10
-static const size_t ZSTD_fcs_fieldSize[4] = {0, 2, 4, 8};
-static const size_t ZSTD_did_fieldSize[4] = {0, 1, 2, 4};
-
-#define ZSTD_BLOCKHEADERSIZE 3 /* C standard doesn't allow `static const` variable to be init using another `static const` variable */
-static const size_t ZSTD_blockHeaderSize = ZSTD_BLOCKHEADERSIZE;
-typedef enum { bt_raw, bt_rle, bt_compressed, bt_reserved } blockType_e;
-
-#define MIN_SEQUENCES_SIZE 1 /* nbSeq==0 */
-#define MIN_CBLOCK_SIZE (1 /*litCSize*/ + 1 /* RLE or RAW */ + MIN_SEQUENCES_SIZE /* nbSeq==0 */) /* for a non-null block */
-
-#define HufLog 12
-typedef enum { set_basic, set_rle, set_compressed, set_repeat } symbolEncodingType_e;
-
-#define LONGNBSEQ 0x7F00
-
-#define MINMATCH 3
-#define EQUAL_READ32 4
-
-#define Litbits 8
-#define MaxLit ((1 << Litbits) - 1)
-#define MaxML 52
-#define MaxLL 35
-#define MaxOff 28
-#define MaxSeq MAX(MaxLL, MaxML) /* Assumption : MaxOff < MaxLL,MaxML */
-#define MLFSELog 9
-#define LLFSELog 9
-#define OffFSELog 8
-
-static const U32 LL_bits[MaxLL + 1] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 3, 3, 4, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16};
-static const S16 LL_defaultNorm[MaxLL + 1] = {4, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 2, 1, 1, 1, 1, 1, -1, -1, -1, -1};
-#define LL_DEFAULTNORMLOG 6 /* for static allocation */
-static const U32 LL_defaultNormLog = LL_DEFAULTNORMLOG;
-
-static const U32 ML_bits[MaxML + 1] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 3, 3, 4, 4, 5, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16};
-static const S16 ML_defaultNorm[MaxML + 1] = {1, 4, 3, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -1, -1, -1, -1, -1, -1, -1};
-#define ML_DEFAULTNORMLOG 6 /* for static allocation */
-static const U32 ML_defaultNormLog = ML_DEFAULTNORMLOG;
-
-static const S16 OF_defaultNorm[MaxOff + 1] = {1, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -1, -1, -1, -1, -1};
-#define OF_DEFAULTNORMLOG 5 /* for static allocation */
-static const U32 OF_defaultNormLog = OF_DEFAULTNORMLOG;
-
-/*-*******************************************
-* Shared functions to include for inlining
-*********************************************/
-ZSTD_STATIC void ZSTD_copy8(void *dst, const void *src) {
- memcpy(dst, src, 8);
-}
-/*! ZSTD_wildcopy() :
-* custom version of memcpy(), can copy up to 7 bytes too many (8 bytes if length==0) */
-#define WILDCOPY_OVERLENGTH 8
-ZSTD_STATIC void ZSTD_wildcopy(void *dst, const void *src, ptrdiff_t length)
-{
- const BYTE* ip = (const BYTE*)src;
- BYTE* op = (BYTE*)dst;
- BYTE* const oend = op + length;
- /* Work around https://gcc.gnu.org/bugzilla/show_bug.cgi?id=81388.
- * Avoid the bad case where the loop only runs once by handling the
- * special case separately. This doesn't trigger the bug because it
- * doesn't involve pointer/integer overflow.
- */
- if (length <= 8)
- return ZSTD_copy8(dst, src);
- do {
- ZSTD_copy8(op, ip);
- op += 8;
- ip += 8;
- } while (op < oend);
-}
-
-/*-*******************************************
-* Private interfaces
-*********************************************/
-typedef struct ZSTD_stats_s ZSTD_stats_t;
-
-typedef struct {
- U32 off;
- U32 len;
-} ZSTD_match_t;
-
-typedef struct {
- U32 price;
- U32 off;
- U32 mlen;
- U32 litlen;
- U32 rep[ZSTD_REP_NUM];
-} ZSTD_optimal_t;
-
-typedef struct seqDef_s {
- U32 offset;
- U16 litLength;
- U16 matchLength;
-} seqDef;
-
-typedef struct {
- seqDef *sequencesStart;
- seqDef *sequences;
- BYTE *litStart;
- BYTE *lit;
- BYTE *llCode;
- BYTE *mlCode;
- BYTE *ofCode;
- U32 longLengthID; /* 0 == no longLength; 1 == Lit.longLength; 2 == Match.longLength; */
- U32 longLengthPos;
- /* opt */
- ZSTD_optimal_t *priceTable;
- ZSTD_match_t *matchTable;
- U32 *matchLengthFreq;
- U32 *litLengthFreq;
- U32 *litFreq;
- U32 *offCodeFreq;
- U32 matchLengthSum;
- U32 matchSum;
- U32 litLengthSum;
- U32 litSum;
- U32 offCodeSum;
- U32 log2matchLengthSum;
- U32 log2matchSum;
- U32 log2litLengthSum;
- U32 log2litSum;
- U32 log2offCodeSum;
- U32 factor;
- U32 staticPrices;
- U32 cachedPrice;
- U32 cachedLitLength;
- const BYTE *cachedLiterals;
-} seqStore_t;
-
-const seqStore_t *ZSTD_getSeqStore(const ZSTD_CCtx *ctx);
-void ZSTD_seqToCodes(const seqStore_t *seqStorePtr);
-int ZSTD_isSkipFrame(ZSTD_DCtx *dctx);
-
-/*= Custom memory allocation functions */
-typedef void *(*ZSTD_allocFunction)(void *opaque, size_t size);
-typedef void (*ZSTD_freeFunction)(void *opaque, void *address);
-typedef struct {
- ZSTD_allocFunction customAlloc;
- ZSTD_freeFunction customFree;
- void *opaque;
-} ZSTD_customMem;
-
-void *ZSTD_malloc(size_t size, ZSTD_customMem customMem);
-void ZSTD_free(void *ptr, ZSTD_customMem customMem);
-
-/*====== stack allocation ======*/
-
-typedef struct {
- void *ptr;
- const void *end;
-} ZSTD_stack;
-
-#define ZSTD_ALIGN(x) ALIGN(x, sizeof(size_t))
-#define ZSTD_PTR_ALIGN(p) PTR_ALIGN(p, sizeof(size_t))
-
-ZSTD_customMem ZSTD_initStack(void *workspace, size_t workspaceSize);
-
-void *ZSTD_stackAllocAll(void *opaque, size_t *size);
-void *ZSTD_stackAlloc(void *opaque, size_t size);
-void ZSTD_stackFree(void *opaque, void *address);
-
-/*====== common function ======*/
-
-ZSTD_STATIC U32 ZSTD_highbit32(U32 val) { return 31 - __builtin_clz(val); }
-
-/* hidden functions */
-
-/* ZSTD_invalidateRepCodes() :
- * ensures next compression will not use repcodes from previous block.
- * Note : only works with regular variant;
- * do not use with extDict variant ! */
-void ZSTD_invalidateRepCodes(ZSTD_CCtx *cctx);
-
-size_t ZSTD_freeCCtx(ZSTD_CCtx *cctx);
-size_t ZSTD_freeDCtx(ZSTD_DCtx *dctx);
-size_t ZSTD_freeCDict(ZSTD_CDict *cdict);
-size_t ZSTD_freeDDict(ZSTD_DDict *cdict);
-size_t ZSTD_freeCStream(ZSTD_CStream *zcs);
-size_t ZSTD_freeDStream(ZSTD_DStream *zds);
-
-#endif /* ZSTD_CCOMMON_H_MODULE */
diff --git a/lib/zstd/zstd_opt.h b/lib/zstd/zstd_opt.h
deleted file mode 100644
index 55e1b4cba808..000000000000
--- a/lib/zstd/zstd_opt.h
+++ /dev/null
@@ -1,1014 +0,0 @@
-/**
- * Copyright (c) 2016-present, Przemyslaw Skibinski, Yann Collet, Facebook, Inc.
- * All rights reserved.
- *
- * This source code is licensed under the BSD-style license found in the
- * LICENSE file in the root directory of https://github.com/facebook/zstd.
- * An additional grant of patent rights can be found in the PATENTS file in the
- * same directory.
- *
- * This program is free software; you can redistribute it and/or modify it under
- * the terms of the GNU General Public License version 2 as published by the
- * Free Software Foundation. This program is dual-licensed; you may select
- * either version 2 of the GNU General Public License ("GPL") or BSD license
- * ("BSD").
- */
-
-/* Note : this file is intended to be included within zstd_compress.c */
-
-#ifndef ZSTD_OPT_H_91842398743
-#define ZSTD_OPT_H_91842398743
-
-#define ZSTD_LITFREQ_ADD 2
-#define ZSTD_FREQ_DIV 4
-#define ZSTD_MAX_PRICE (1 << 30)
-
-/*-*************************************
-* Price functions for optimal parser
-***************************************/
-FORCE_INLINE void ZSTD_setLog2Prices(seqStore_t *ssPtr)
-{
- ssPtr->log2matchLengthSum = ZSTD_highbit32(ssPtr->matchLengthSum + 1);
- ssPtr->log2litLengthSum = ZSTD_highbit32(ssPtr->litLengthSum + 1);
- ssPtr->log2litSum = ZSTD_highbit32(ssPtr->litSum + 1);
- ssPtr->log2offCodeSum = ZSTD_highbit32(ssPtr->offCodeSum + 1);
- ssPtr->factor = 1 + ((ssPtr->litSum >> 5) / ssPtr->litLengthSum) + ((ssPtr->litSum << 1) / (ssPtr->litSum + ssPtr->matchSum));
-}
-
-ZSTD_STATIC void ZSTD_rescaleFreqs(seqStore_t *ssPtr, const BYTE *src, size_t srcSize)
-{
- unsigned u;
-
- ssPtr->cachedLiterals = NULL;
- ssPtr->cachedPrice = ssPtr->cachedLitLength = 0;
- ssPtr->staticPrices = 0;
-
- if (ssPtr->litLengthSum == 0) {
- if (srcSize <= 1024)
- ssPtr->staticPrices = 1;
-
- for (u = 0; u <= MaxLit; u++)
- ssPtr->litFreq[u] = 0;
- for (u = 0; u < srcSize; u++)
- ssPtr->litFreq[src[u]]++;
-
- ssPtr->litSum = 0;
- ssPtr->litLengthSum = MaxLL + 1;
- ssPtr->matchLengthSum = MaxML + 1;
- ssPtr->offCodeSum = (MaxOff + 1);
- ssPtr->matchSum = (ZSTD_LITFREQ_ADD << Litbits);
-
- for (u = 0; u <= MaxLit; u++) {
- ssPtr->litFreq[u] = 1 + (ssPtr->litFreq[u] >> ZSTD_FREQ_DIV);
- ssPtr->litSum += ssPtr->litFreq[u];
- }
- for (u = 0; u <= MaxLL; u++)
- ssPtr->litLengthFreq[u] = 1;
- for (u = 0; u <= MaxML; u++)
- ssPtr->matchLengthFreq[u] = 1;
- for (u = 0; u <= MaxOff; u++)
- ssPtr->offCodeFreq[u] = 1;
- } else {
- ssPtr->matchLengthSum = 0;
- ssPtr->litLengthSum = 0;
- ssPtr->offCodeSum = 0;
- ssPtr->matchSum = 0;
- ssPtr->litSum = 0;
-
- for (u = 0; u <= MaxLit; u++) {
- ssPtr->litFreq[u] = 1 + (ssPtr->litFreq[u] >> (ZSTD_FREQ_DIV + 1));
- ssPtr->litSum += ssPtr->litFreq[u];
- }
- for (u = 0; u <= MaxLL; u++) {
- ssPtr->litLengthFreq[u] = 1 + (ssPtr->litLengthFreq[u] >> (ZSTD_FREQ_DIV + 1));
- ssPtr->litLengthSum += ssPtr->litLengthFreq[u];
- }
- for (u = 0; u <= MaxML; u++) {
- ssPtr->matchLengthFreq[u] = 1 + (ssPtr->matchLengthFreq[u] >> ZSTD_FREQ_DIV);
- ssPtr->matchLengthSum += ssPtr->matchLengthFreq[u];
- ssPtr->matchSum += ssPtr->matchLengthFreq[u] * (u + 3);
- }
- ssPtr->matchSum *= ZSTD_LITFREQ_ADD;
- for (u = 0; u <= MaxOff; u++) {
- ssPtr->offCodeFreq[u] = 1 + (ssPtr->offCodeFreq[u] >> ZSTD_FREQ_DIV);
- ssPtr->offCodeSum += ssPtr->offCodeFreq[u];
- }
- }
-
- ZSTD_setLog2Prices(ssPtr);
-}
-
-FORCE_INLINE U32 ZSTD_getLiteralPrice(seqStore_t *ssPtr, U32 litLength, const BYTE *literals)
-{
- U32 price, u;
-
- if (ssPtr->staticPrices)
- return ZSTD_highbit32((U32)litLength + 1) + (litLength * 6);
-
- if (litLength == 0)
- return ssPtr->log2litLengthSum - ZSTD_highbit32(ssPtr->litLengthFreq[0] + 1);
-
- /* literals */
- if (ssPtr->cachedLiterals == literals) {
- U32 const additional = litLength - ssPtr->cachedLitLength;
- const BYTE *literals2 = ssPtr->cachedLiterals + ssPtr->cachedLitLength;
- price = ssPtr->cachedPrice + additional * ssPtr->log2litSum;
- for (u = 0; u < additional; u++)
- price -= ZSTD_highbit32(ssPtr->litFreq[literals2[u]] + 1);
- ssPtr->cachedPrice = price;
- ssPtr->cachedLitLength = litLength;
- } else {
- price = litLength * ssPtr->log2litSum;
- for (u = 0; u < litLength; u++)
- price -= ZSTD_highbit32(ssPtr->litFreq[literals[u]] + 1);
-
- if (litLength >= 12) {
- ssPtr->cachedLiterals = literals;
- ssPtr->cachedPrice = price;
- ssPtr->cachedLitLength = litLength;
- }
- }
-
- /* literal Length */
- {
- const BYTE LL_deltaCode = 19;
- const BYTE llCode = (litLength > 63) ? (BYTE)ZSTD_highbit32(litLength) + LL_deltaCode : LL_Code[litLength];
- price += LL_bits[llCode] + ssPtr->log2litLengthSum - ZSTD_highbit32(ssPtr->litLengthFreq[llCode] + 1);
- }
-
- return price;
-}
-
-FORCE_INLINE U32 ZSTD_getPrice(seqStore_t *seqStorePtr, U32 litLength, const BYTE *literals, U32 offset, U32 matchLength, const int ultra)
-{
- /* offset */
- U32 price;
- BYTE const offCode = (BYTE)ZSTD_highbit32(offset + 1);
-
- if (seqStorePtr->staticPrices)
- return ZSTD_getLiteralPrice(seqStorePtr, litLength, literals) + ZSTD_highbit32((U32)matchLength + 1) + 16 + offCode;
-
- price = offCode + seqStorePtr->log2offCodeSum - ZSTD_highbit32(seqStorePtr->offCodeFreq[offCode] + 1);
- if (!ultra && offCode >= 20)
- price += (offCode - 19) * 2;
-
- /* match Length */
- {
- const BYTE ML_deltaCode = 36;
- const BYTE mlCode = (matchLength > 127) ? (BYTE)ZSTD_highbit32(matchLength) + ML_deltaCode : ML_Code[matchLength];
- price += ML_bits[mlCode] + seqStorePtr->log2matchLengthSum - ZSTD_highbit32(seqStorePtr->matchLengthFreq[mlCode] + 1);
- }
-
- return price + ZSTD_getLiteralPrice(seqStorePtr, litLength, literals) + seqStorePtr->factor;
-}
-
-ZSTD_STATIC void ZSTD_updatePrice(seqStore_t *seqStorePtr, U32 litLength, const BYTE *literals, U32 offset, U32 matchLength)
-{
- U32 u;
-
- /* literals */
- seqStorePtr->litSum += litLength * ZSTD_LITFREQ_ADD;
- for (u = 0; u < litLength; u++)
- seqStorePtr->litFreq[literals[u]] += ZSTD_LITFREQ_ADD;
-
- /* literal Length */
- {
- const BYTE LL_deltaCode = 19;
- const BYTE llCode = (litLength > 63) ? (BYTE)ZSTD_highbit32(litLength) + LL_deltaCode : LL_Code[litLength];
- seqStorePtr->litLengthFreq[llCode]++;
- seqStorePtr->litLengthSum++;
- }
-
- /* match offset */
- {
- BYTE const offCode = (BYTE)ZSTD_highbit32(offset + 1);
- seqStorePtr->offCodeSum++;
- seqStorePtr->offCodeFreq[offCode]++;
- }
-
- /* match Length */
- {
- const BYTE ML_deltaCode = 36;
- const BYTE mlCode = (matchLength > 127) ? (BYTE)ZSTD_highbit32(matchLength) + ML_deltaCode : ML_Code[matchLength];
- seqStorePtr->matchLengthFreq[mlCode]++;
- seqStorePtr->matchLengthSum++;
- }
-
- ZSTD_setLog2Prices(seqStorePtr);
-}
-
-#define SET_PRICE(pos, mlen_, offset_, litlen_, price_) \
- { \
- while (last_pos < pos) { \
- opt[last_pos + 1].price = ZSTD_MAX_PRICE; \
- last_pos++; \
- } \
- opt[pos].mlen = mlen_; \
- opt[pos].off = offset_; \
- opt[pos].litlen = litlen_; \
- opt[pos].price = price_; \
- }
-
-/* Update hashTable3 up to ip (excluded)
- Assumption : always within prefix (i.e. not within extDict) */
-FORCE_INLINE
-U32 ZSTD_insertAndFindFirstIndexHash3(ZSTD_CCtx *zc, const BYTE *ip)
-{
- U32 *const hashTable3 = zc->hashTable3;
- U32 const hashLog3 = zc->hashLog3;
- const BYTE *const base = zc->base;
- U32 idx = zc->nextToUpdate3;
- const U32 target = zc->nextToUpdate3 = (U32)(ip - base);
- const size_t hash3 = ZSTD_hash3Ptr(ip, hashLog3);
-
- while (idx < target) {
- hashTable3[ZSTD_hash3Ptr(base + idx, hashLog3)] = idx;
- idx++;
- }
-
- return hashTable3[hash3];
-}
-
-/*-*************************************
-* Binary Tree search
-***************************************/
-static U32 ZSTD_insertBtAndGetAllMatches(ZSTD_CCtx *zc, const BYTE *const ip, const BYTE *const iLimit, U32 nbCompares, const U32 mls, U32 extDict,
- ZSTD_match_t *matches, const U32 minMatchLen)
-{
- const BYTE *const base = zc->base;
- const U32 curr = (U32)(ip - base);
- const U32 hashLog = zc->params.cParams.hashLog;
- const size_t h = ZSTD_hashPtr(ip, hashLog, mls);
- U32 *const hashTable = zc->hashTable;
- U32 matchIndex = hashTable[h];
- U32 *const bt = zc->chainTable;
- const U32 btLog = zc->params.cParams.chainLog - 1;
- const U32 btMask = (1U << btLog) - 1;
- size_t commonLengthSmaller = 0, commonLengthLarger = 0;
- const BYTE *const dictBase = zc->dictBase;
- const U32 dictLimit = zc->dictLimit;
- const BYTE *const dictEnd = dictBase + dictLimit;
- const BYTE *const prefixStart = base + dictLimit;
- const U32 btLow = btMask >= curr ? 0 : curr - btMask;
- const U32 windowLow = zc->lowLimit;
- U32 *smallerPtr = bt + 2 * (curr & btMask);
- U32 *largerPtr = bt + 2 * (curr & btMask) + 1;
- U32 matchEndIdx = curr + 8;
- U32 dummy32; /* to be nullified at the end */
- U32 mnum = 0;
-
- const U32 minMatch = (mls == 3) ? 3 : 4;
- size_t bestLength = minMatchLen - 1;
-
- if (minMatch == 3) { /* HC3 match finder */
- U32 const matchIndex3 = ZSTD_insertAndFindFirstIndexHash3(zc, ip);
- if (matchIndex3 > windowLow && (curr - matchIndex3 < (1 << 18))) {
- const BYTE *match;
- size_t currMl = 0;
- if ((!extDict) || matchIndex3 >= dictLimit) {
- match = base + matchIndex3;
- if (match[bestLength] == ip[bestLength])
- currMl = ZSTD_count(ip, match, iLimit);
- } else {
- match = dictBase + matchIndex3;
- if (ZSTD_readMINMATCH(match, MINMATCH) ==
- ZSTD_readMINMATCH(ip, MINMATCH)) /* assumption : matchIndex3 <= dictLimit-4 (by table construction) */
- currMl = ZSTD_count_2segments(ip + MINMATCH, match + MINMATCH, iLimit, dictEnd, prefixStart) + MINMATCH;
- }
-
- /* save best solution */
- if (currMl > bestLength) {
- bestLength = currMl;
- matches[mnum].off = ZSTD_REP_MOVE_OPT + curr - matchIndex3;
- matches[mnum].len = (U32)currMl;
- mnum++;
- if (currMl > ZSTD_OPT_NUM)
- goto update;
- if (ip + currMl == iLimit)
- goto update; /* best possible, and avoid read overflow*/
- }
- }
- }
-
- hashTable[h] = curr; /* Update Hash Table */
-
- while (nbCompares-- && (matchIndex > windowLow)) {
- U32 *nextPtr = bt + 2 * (matchIndex & btMask);
- size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */
- const BYTE *match;
-
- if ((!extDict) || (matchIndex + matchLength >= dictLimit)) {
- match = base + matchIndex;
- if (match[matchLength] == ip[matchLength]) {
- matchLength += ZSTD_count(ip + matchLength + 1, match + matchLength + 1, iLimit) + 1;
- }
- } else {
- match = dictBase + matchIndex;
- matchLength += ZSTD_count_2segments(ip + matchLength, match + matchLength, iLimit, dictEnd, prefixStart);
- if (matchIndex + matchLength >= dictLimit)
- match = base + matchIndex; /* to prepare for next usage of match[matchLength] */
- }
-
- if (matchLength > bestLength) {
- if (matchLength > matchEndIdx - matchIndex)
- matchEndIdx = matchIndex + (U32)matchLength;
- bestLength = matchLength;
- matches[mnum].off = ZSTD_REP_MOVE_OPT + curr - matchIndex;
- matches[mnum].len = (U32)matchLength;
- mnum++;
- if (matchLength > ZSTD_OPT_NUM)
- break;
- if (ip + matchLength == iLimit) /* equal : no way to know if inf or sup */
- break; /* drop, to guarantee consistency (miss a little bit of compression) */
- }
-
- if (match[matchLength] < ip[matchLength]) {
- /* match is smaller than curr */
- *smallerPtr = matchIndex; /* update smaller idx */
- commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */
- if (matchIndex <= btLow) {
- smallerPtr = &dummy32;
- break;
- } /* beyond tree size, stop the search */
- smallerPtr = nextPtr + 1; /* new "smaller" => larger of match */
- matchIndex = nextPtr[1]; /* new matchIndex larger than previous (closer to curr) */
- } else {
- /* match is larger than curr */
- *largerPtr = matchIndex;
- commonLengthLarger = matchLength;
- if (matchIndex <= btLow) {
- largerPtr = &dummy32;
- break;
- } /* beyond tree size, stop the search */
- largerPtr = nextPtr;
- matchIndex = nextPtr[0];
- }
- }
-
- *smallerPtr = *largerPtr = 0;
-
-update:
- zc->nextToUpdate = (matchEndIdx > curr + 8) ? matchEndIdx - 8 : curr + 1;
- return mnum;
-}
-
-/** Tree updater, providing best match */
-static U32 ZSTD_BtGetAllMatches(ZSTD_CCtx *zc, const BYTE *const ip, const BYTE *const iLimit, const U32 maxNbAttempts, const U32 mls, ZSTD_match_t *matches,
- const U32 minMatchLen)
-{
- if (ip < zc->base + zc->nextToUpdate)
- return 0; /* skipped area */
- ZSTD_updateTree(zc, ip, iLimit, maxNbAttempts, mls);
- return ZSTD_insertBtAndGetAllMatches(zc, ip, iLimit, maxNbAttempts, mls, 0, matches, minMatchLen);
-}
-
-static U32 ZSTD_BtGetAllMatches_selectMLS(ZSTD_CCtx *zc, /* Index table will be updated */
- const BYTE *ip, const BYTE *const iHighLimit, const U32 maxNbAttempts, const U32 matchLengthSearch,
- ZSTD_match_t *matches, const U32 minMatchLen)
-{
- switch (matchLengthSearch) {
- case 3: return ZSTD_BtGetAllMatches(zc, ip, iHighLimit, maxNbAttempts, 3, matches, minMatchLen);
- default:
- case 4: return ZSTD_BtGetAllMatches(zc, ip, iHighLimit, maxNbAttempts, 4, matches, minMatchLen);
- case 5: return ZSTD_BtGetAllMatches(zc, ip, iHighLimit, maxNbAttempts, 5, matches, minMatchLen);
- case 7:
- case 6: return ZSTD_BtGetAllMatches(zc, ip, iHighLimit, maxNbAttempts, 6, matches, minMatchLen);
- }
-}
-
-/** Tree updater, providing best match */
-static U32 ZSTD_BtGetAllMatches_extDict(ZSTD_CCtx *zc, const BYTE *const ip, const BYTE *const iLimit, const U32 maxNbAttempts, const U32 mls,
- ZSTD_match_t *matches, const U32 minMatchLen)
-{
- if (ip < zc->base + zc->nextToUpdate)
- return 0; /* skipped area */
- ZSTD_updateTree_extDict(zc, ip, iLimit, maxNbAttempts, mls);
- return ZSTD_insertBtAndGetAllMatches(zc, ip, iLimit, maxNbAttempts, mls, 1, matches, minMatchLen);
-}
-
-static U32 ZSTD_BtGetAllMatches_selectMLS_extDict(ZSTD_CCtx *zc, /* Index table will be updated */
- const BYTE *ip, const BYTE *const iHighLimit, const U32 maxNbAttempts, const U32 matchLengthSearch,
- ZSTD_match_t *matches, const U32 minMatchLen)
-{
- switch (matchLengthSearch) {
- case 3: return ZSTD_BtGetAllMatches_extDict(zc, ip, iHighLimit, maxNbAttempts, 3, matches, minMatchLen);
- default:
- case 4: return ZSTD_BtGetAllMatches_extDict(zc, ip, iHighLimit, maxNbAttempts, 4, matches, minMatchLen);
- case 5: return ZSTD_BtGetAllMatches_extDict(zc, ip, iHighLimit, maxNbAttempts, 5, matches, minMatchLen);
- case 7:
- case 6: return ZSTD_BtGetAllMatches_extDict(zc, ip, iHighLimit, maxNbAttempts, 6, matches, minMatchLen);
- }
-}
-
-/*-*******************************
-* Optimal parser
-*********************************/
-FORCE_INLINE
-void ZSTD_compressBlock_opt_generic(ZSTD_CCtx *ctx, const void *src, size_t srcSize, const int ultra)
-{
- seqStore_t *seqStorePtr = &(ctx->seqStore);
- const BYTE *const istart = (const BYTE *)src;
- const BYTE *ip = istart;
- const BYTE *anchor = istart;
- const BYTE *const iend = istart + srcSize;
- const BYTE *const ilimit = iend - 8;
- const BYTE *const base = ctx->base;
- const BYTE *const prefixStart = base + ctx->dictLimit;
-
- const U32 maxSearches = 1U << ctx->params.cParams.searchLog;
- const U32 sufficient_len = ctx->params.cParams.targetLength;
- const U32 mls = ctx->params.cParams.searchLength;
- const U32 minMatch = (ctx->params.cParams.searchLength == 3) ? 3 : 4;
-
- ZSTD_optimal_t *opt = seqStorePtr->priceTable;
- ZSTD_match_t *matches = seqStorePtr->matchTable;
- const BYTE *inr;
- U32 offset, rep[ZSTD_REP_NUM];
-
- /* init */
- ctx->nextToUpdate3 = ctx->nextToUpdate;
- ZSTD_rescaleFreqs(seqStorePtr, (const BYTE *)src, srcSize);
- ip += (ip == prefixStart);
- {
- U32 i;
- for (i = 0; i < ZSTD_REP_NUM; i++)
- rep[i] = ctx->rep[i];
- }
-
- /* Match Loop */
- while (ip < ilimit) {
- U32 cur, match_num, last_pos, litlen, price;
- U32 u, mlen, best_mlen, best_off, litLength;
- memset(opt, 0, sizeof(ZSTD_optimal_t));
- last_pos = 0;
- litlen = (U32)(ip - anchor);
-
- /* check repCode */
- {
- U32 i, last_i = ZSTD_REP_CHECK + (ip == anchor);
- for (i = (ip == anchor); i < last_i; i++) {
- const S32 repCur = (i == ZSTD_REP_MOVE_OPT) ? (rep[0] - 1) : rep[i];
- if ((repCur > 0) && (repCur < (S32)(ip - prefixStart)) &&
- (ZSTD_readMINMATCH(ip, minMatch) == ZSTD_readMINMATCH(ip - repCur, minMatch))) {
- mlen = (U32)ZSTD_count(ip + minMatch, ip + minMatch - repCur, iend) + minMatch;
- if (mlen > sufficient_len || mlen >= ZSTD_OPT_NUM) {
- best_mlen = mlen;
- best_off = i;
- cur = 0;
- last_pos = 1;
- goto _storeSequence;
- }
- best_off = i - (ip == anchor);
- do {
- price = ZSTD_getPrice(seqStorePtr, litlen, anchor, best_off, mlen - MINMATCH, ultra);
- if (mlen > last_pos || price < opt[mlen].price)
- SET_PRICE(mlen, mlen, i, litlen, price); /* note : macro modifies last_pos */
- mlen--;
- } while (mlen >= minMatch);
- }
- }
- }
-
- match_num = ZSTD_BtGetAllMatches_selectMLS(ctx, ip, iend, maxSearches, mls, matches, minMatch);
-
- if (!last_pos && !match_num) {
- ip++;
- continue;
- }
-
- if (match_num && (matches[match_num - 1].len > sufficient_len || matches[match_num - 1].len >= ZSTD_OPT_NUM)) {
- best_mlen = matches[match_num - 1].len;
- best_off = matches[match_num - 1].off;
- cur = 0;
- last_pos = 1;
- goto _storeSequence;
- }
-
- /* set prices using matches at position = 0 */
- best_mlen = (last_pos) ? last_pos : minMatch;
- for (u = 0; u < match_num; u++) {
- mlen = (u > 0) ? matches[u - 1].len + 1 : best_mlen;
- best_mlen = matches[u].len;
- while (mlen <= best_mlen) {
- price = ZSTD_getPrice(seqStorePtr, litlen, anchor, matches[u].off - 1, mlen - MINMATCH, ultra);
- if (mlen > last_pos || price < opt[mlen].price)
- SET_PRICE(mlen, mlen, matches[u].off, litlen, price); /* note : macro modifies last_pos */
- mlen++;
- }
- }
-
- if (last_pos < minMatch) {
- ip++;
- continue;
- }
-
- /* initialize opt[0] */
- {
- U32 i;
- for (i = 0; i < ZSTD_REP_NUM; i++)
- opt[0].rep[i] = rep[i];
- }
- opt[0].mlen = 1;
- opt[0].litlen = litlen;
-
- /* check further positions */
- for (cur = 1; cur <= last_pos; cur++) {
- inr = ip + cur;
-
- if (opt[cur - 1].mlen == 1) {
- litlen = opt[cur - 1].litlen + 1;
- if (cur > litlen) {
- price = opt[cur - litlen].price + ZSTD_getLiteralPrice(seqStorePtr, litlen, inr - litlen);
- } else
- price = ZSTD_getLiteralPrice(seqStorePtr, litlen, anchor);
- } else {
- litlen = 1;
- price = opt[cur - 1].price + ZSTD_getLiteralPrice(seqStorePtr, litlen, inr - 1);
- }
-
- if (cur > last_pos || price <= opt[cur].price)
- SET_PRICE(cur, 1, 0, litlen, price);
-
- if (cur == last_pos)
- break;
-
- if (inr > ilimit) /* last match must start at a minimum distance of 8 from oend */
- continue;
-
- mlen = opt[cur].mlen;
- if (opt[cur].off > ZSTD_REP_MOVE_OPT) {
- opt[cur].rep[2] = opt[cur - mlen].rep[1];
- opt[cur].rep[1] = opt[cur - mlen].rep[0];
- opt[cur].rep[0] = opt[cur].off - ZSTD_REP_MOVE_OPT;
- } else {
- opt[cur].rep[2] = (opt[cur].off > 1) ? opt[cur - mlen].rep[1] : opt[cur - mlen].rep[2];
- opt[cur].rep[1] = (opt[cur].off > 0) ? opt[cur - mlen].rep[0] : opt[cur - mlen].rep[1];
- opt[cur].rep[0] =
- ((opt[cur].off == ZSTD_REP_MOVE_OPT) && (mlen != 1)) ? (opt[cur - mlen].rep[0] - 1) : (opt[cur - mlen].rep[opt[cur].off]);
- }
-
- best_mlen = minMatch;
- {
- U32 i, last_i = ZSTD_REP_CHECK + (mlen != 1);
- for (i = (opt[cur].mlen != 1); i < last_i; i++) { /* check rep */
- const S32 repCur = (i == ZSTD_REP_MOVE_OPT) ? (opt[cur].rep[0] - 1) : opt[cur].rep[i];
- if ((repCur > 0) && (repCur < (S32)(inr - prefixStart)) &&
- (ZSTD_readMINMATCH(inr, minMatch) == ZSTD_readMINMATCH(inr - repCur, minMatch))) {
- mlen = (U32)ZSTD_count(inr + minMatch, inr + minMatch - repCur, iend) + minMatch;
-
- if (mlen > sufficient_len || cur + mlen >= ZSTD_OPT_NUM) {
- best_mlen = mlen;
- best_off = i;
- last_pos = cur + 1;
- goto _storeSequence;
- }
-
- best_off = i - (opt[cur].mlen != 1);
- if (mlen > best_mlen)
- best_mlen = mlen;
-
- do {
- if (opt[cur].mlen == 1) {
- litlen = opt[cur].litlen;
- if (cur > litlen) {
- price = opt[cur - litlen].price + ZSTD_getPrice(seqStorePtr, litlen, inr - litlen,
- best_off, mlen - MINMATCH, ultra);
- } else
- price = ZSTD_getPrice(seqStorePtr, litlen, anchor, best_off, mlen - MINMATCH, ultra);
- } else {
- litlen = 0;
- price = opt[cur].price + ZSTD_getPrice(seqStorePtr, 0, NULL, best_off, mlen - MINMATCH, ultra);
- }
-
- if (cur + mlen > last_pos || price <= opt[cur + mlen].price)
- SET_PRICE(cur + mlen, mlen, i, litlen, price);
- mlen--;
- } while (mlen >= minMatch);
- }
- }
- }
-
- match_num = ZSTD_BtGetAllMatches_selectMLS(ctx, inr, iend, maxSearches, mls, matches, best_mlen);
-
- if (match_num > 0 && (matches[match_num - 1].len > sufficient_len || cur + matches[match_num - 1].len >= ZSTD_OPT_NUM)) {
- best_mlen = matches[match_num - 1].len;
- best_off = matches[match_num - 1].off;
- last_pos = cur + 1;
- goto _storeSequence;
- }
-
- /* set prices using matches at position = cur */
- for (u = 0; u < match_num; u++) {
- mlen = (u > 0) ? matches[u - 1].len + 1 : best_mlen;
- best_mlen = matches[u].len;
-
- while (mlen <= best_mlen) {
- if (opt[cur].mlen == 1) {
- litlen = opt[cur].litlen;
- if (cur > litlen)
- price = opt[cur - litlen].price + ZSTD_getPrice(seqStorePtr, litlen, ip + cur - litlen,
- matches[u].off - 1, mlen - MINMATCH, ultra);
- else
- price = ZSTD_getPrice(seqStorePtr, litlen, anchor, matches[u].off - 1, mlen - MINMATCH, ultra);
- } else {
- litlen = 0;
- price = opt[cur].price + ZSTD_getPrice(seqStorePtr, 0, NULL, matches[u].off - 1, mlen - MINMATCH, ultra);
- }
-
- if (cur + mlen > last_pos || (price < opt[cur + mlen].price))
- SET_PRICE(cur + mlen, mlen, matches[u].off, litlen, price);
-
- mlen++;
- }
- }
- }
-
- best_mlen = opt[last_pos].mlen;
- best_off = opt[last_pos].off;
- cur = last_pos - best_mlen;
-
- /* store sequence */
-_storeSequence: /* cur, last_pos, best_mlen, best_off have to be set */
- opt[0].mlen = 1;
-
- while (1) {
- mlen = opt[cur].mlen;
- offset = opt[cur].off;
- opt[cur].mlen = best_mlen;
- opt[cur].off = best_off;
- best_mlen = mlen;
- best_off = offset;
- if (mlen > cur)
- break;
- cur -= mlen;
- }
-
- for (u = 0; u <= last_pos;) {
- u += opt[u].mlen;
- }
-
- for (cur = 0; cur < last_pos;) {
- mlen = opt[cur].mlen;
- if (mlen == 1) {
- ip++;
- cur++;
- continue;
- }
- offset = opt[cur].off;
- cur += mlen;
- litLength = (U32)(ip - anchor);
-
- if (offset > ZSTD_REP_MOVE_OPT) {
- rep[2] = rep[1];
- rep[1] = rep[0];
- rep[0] = offset - ZSTD_REP_MOVE_OPT;
- offset--;
- } else {
- if (offset != 0) {
- best_off = (offset == ZSTD_REP_MOVE_OPT) ? (rep[0] - 1) : (rep[offset]);
- if (offset != 1)
- rep[2] = rep[1];
- rep[1] = rep[0];
- rep[0] = best_off;
- }
- if (litLength == 0)
- offset--;
- }
-
- ZSTD_updatePrice(seqStorePtr, litLength, anchor, offset, mlen - MINMATCH);
- ZSTD_storeSeq(seqStorePtr, litLength, anchor, offset, mlen - MINMATCH);
- anchor = ip = ip + mlen;
- }
- } /* for (cur=0; cur < last_pos; ) */
-
- /* Save reps for next block */
- {
- int i;
- for (i = 0; i < ZSTD_REP_NUM; i++)
- ctx->repToConfirm[i] = rep[i];
- }
-
- /* Last Literals */
- {
- size_t const lastLLSize = iend - anchor;
- memcpy(seqStorePtr->lit, anchor, lastLLSize);
- seqStorePtr->lit += lastLLSize;
- }
-}
-
-FORCE_INLINE
-void ZSTD_compressBlock_opt_extDict_generic(ZSTD_CCtx *ctx, const void *src, size_t srcSize, const int ultra)
-{
- seqStore_t *seqStorePtr = &(ctx->seqStore);
- const BYTE *const istart = (const BYTE *)src;
- const BYTE *ip = istart;
- const BYTE *anchor = istart;
- const BYTE *const iend = istart + srcSize;
- const BYTE *const ilimit = iend - 8;
- const BYTE *const base = ctx->base;
- const U32 lowestIndex = ctx->lowLimit;
- const U32 dictLimit = ctx->dictLimit;
- const BYTE *const prefixStart = base + dictLimit;
- const BYTE *const dictBase = ctx->dictBase;
- const BYTE *const dictEnd = dictBase + dictLimit;
-
- const U32 maxSearches = 1U << ctx->params.cParams.searchLog;
- const U32 sufficient_len = ctx->params.cParams.targetLength;
- const U32 mls = ctx->params.cParams.searchLength;
- const U32 minMatch = (ctx->params.cParams.searchLength == 3) ? 3 : 4;
-
- ZSTD_optimal_t *opt = seqStorePtr->priceTable;
- ZSTD_match_t *matches = seqStorePtr->matchTable;
- const BYTE *inr;
-
- /* init */
- U32 offset, rep[ZSTD_REP_NUM];
- {
- U32 i;
- for (i = 0; i < ZSTD_REP_NUM; i++)
- rep[i] = ctx->rep[i];
- }
-
- ctx->nextToUpdate3 = ctx->nextToUpdate;
- ZSTD_rescaleFreqs(seqStorePtr, (const BYTE *)src, srcSize);
- ip += (ip == prefixStart);
-
- /* Match Loop */
- while (ip < ilimit) {
- U32 cur, match_num, last_pos, litlen, price;
- U32 u, mlen, best_mlen, best_off, litLength;
- U32 curr = (U32)(ip - base);
- memset(opt, 0, sizeof(ZSTD_optimal_t));
- last_pos = 0;
- opt[0].litlen = (U32)(ip - anchor);
-
- /* check repCode */
- {
- U32 i, last_i = ZSTD_REP_CHECK + (ip == anchor);
- for (i = (ip == anchor); i < last_i; i++) {
- const S32 repCur = (i == ZSTD_REP_MOVE_OPT) ? (rep[0] - 1) : rep[i];
- const U32 repIndex = (U32)(curr - repCur);
- const BYTE *const repBase = repIndex < dictLimit ? dictBase : base;
- const BYTE *const repMatch = repBase + repIndex;
- if ((repCur > 0 && repCur <= (S32)curr) &&
- (((U32)((dictLimit - 1) - repIndex) >= 3) & (repIndex > lowestIndex)) /* intentional overflow */
- && (ZSTD_readMINMATCH(ip, minMatch) == ZSTD_readMINMATCH(repMatch, minMatch))) {
- /* repcode detected we should take it */
- const BYTE *const repEnd = repIndex < dictLimit ? dictEnd : iend;
- mlen = (U32)ZSTD_count_2segments(ip + minMatch, repMatch + minMatch, iend, repEnd, prefixStart) + minMatch;
-
- if (mlen > sufficient_len || mlen >= ZSTD_OPT_NUM) {
- best_mlen = mlen;
- best_off = i;
- cur = 0;
- last_pos = 1;
- goto _storeSequence;
- }
-
- best_off = i - (ip == anchor);
- litlen = opt[0].litlen;
- do {
- price = ZSTD_getPrice(seqStorePtr, litlen, anchor, best_off, mlen - MINMATCH, ultra);
- if (mlen > last_pos || price < opt[mlen].price)
- SET_PRICE(mlen, mlen, i, litlen, price); /* note : macro modifies last_pos */
- mlen--;
- } while (mlen >= minMatch);
- }
- }
- }
-
- match_num = ZSTD_BtGetAllMatches_selectMLS_extDict(ctx, ip, iend, maxSearches, mls, matches, minMatch); /* first search (depth 0) */
-
- if (!last_pos && !match_num) {
- ip++;
- continue;
- }
-
- {
- U32 i;
- for (i = 0; i < ZSTD_REP_NUM; i++)
- opt[0].rep[i] = rep[i];
- }
- opt[0].mlen = 1;
-
- if (match_num && (matches[match_num - 1].len > sufficient_len || matches[match_num - 1].len >= ZSTD_OPT_NUM)) {
- best_mlen = matches[match_num - 1].len;
- best_off = matches[match_num - 1].off;
- cur = 0;
- last_pos = 1;
- goto _storeSequence;
- }
-
- best_mlen = (last_pos) ? last_pos : minMatch;
-
- /* set prices using matches at position = 0 */
- for (u = 0; u < match_num; u++) {
- mlen = (u > 0) ? matches[u - 1].len + 1 : best_mlen;
- best_mlen = matches[u].len;
- litlen = opt[0].litlen;
- while (mlen <= best_mlen) {
- price = ZSTD_getPrice(seqStorePtr, litlen, anchor, matches[u].off - 1, mlen - MINMATCH, ultra);
- if (mlen > last_pos || price < opt[mlen].price)
- SET_PRICE(mlen, mlen, matches[u].off, litlen, price);
- mlen++;
- }
- }
-
- if (last_pos < minMatch) {
- ip++;
- continue;
- }
-
- /* check further positions */
- for (cur = 1; cur <= last_pos; cur++) {
- inr = ip + cur;
-
- if (opt[cur - 1].mlen == 1) {
- litlen = opt[cur - 1].litlen + 1;
- if (cur > litlen) {
- price = opt[cur - litlen].price + ZSTD_getLiteralPrice(seqStorePtr, litlen, inr - litlen);
- } else
- price = ZSTD_getLiteralPrice(seqStorePtr, litlen, anchor);
- } else {
- litlen = 1;
- price = opt[cur - 1].price + ZSTD_getLiteralPrice(seqStorePtr, litlen, inr - 1);
- }
-
- if (cur > last_pos || price <= opt[cur].price)
- SET_PRICE(cur, 1, 0, litlen, price);
-
- if (cur == last_pos)
- break;
-
- if (inr > ilimit) /* last match must start at a minimum distance of 8 from oend */
- continue;
-
- mlen = opt[cur].mlen;
- if (opt[cur].off > ZSTD_REP_MOVE_OPT) {
- opt[cur].rep[2] = opt[cur - mlen].rep[1];
- opt[cur].rep[1] = opt[cur - mlen].rep[0];
- opt[cur].rep[0] = opt[cur].off - ZSTD_REP_MOVE_OPT;
- } else {
- opt[cur].rep[2] = (opt[cur].off > 1) ? opt[cur - mlen].rep[1] : opt[cur - mlen].rep[2];
- opt[cur].rep[1] = (opt[cur].off > 0) ? opt[cur - mlen].rep[0] : opt[cur - mlen].rep[1];
- opt[cur].rep[0] =
- ((opt[cur].off == ZSTD_REP_MOVE_OPT) && (mlen != 1)) ? (opt[cur - mlen].rep[0] - 1) : (opt[cur - mlen].rep[opt[cur].off]);
- }
-
- best_mlen = minMatch;
- {
- U32 i, last_i = ZSTD_REP_CHECK + (mlen != 1);
- for (i = (mlen != 1); i < last_i; i++) {
- const S32 repCur = (i == ZSTD_REP_MOVE_OPT) ? (opt[cur].rep[0] - 1) : opt[cur].rep[i];
- const U32 repIndex = (U32)(curr + cur - repCur);
- const BYTE *const repBase = repIndex < dictLimit ? dictBase : base;
- const BYTE *const repMatch = repBase + repIndex;
- if ((repCur > 0 && repCur <= (S32)(curr + cur)) &&
- (((U32)((dictLimit - 1) - repIndex) >= 3) & (repIndex > lowestIndex)) /* intentional overflow */
- && (ZSTD_readMINMATCH(inr, minMatch) == ZSTD_readMINMATCH(repMatch, minMatch))) {
- /* repcode detected */
- const BYTE *const repEnd = repIndex < dictLimit ? dictEnd : iend;
- mlen = (U32)ZSTD_count_2segments(inr + minMatch, repMatch + minMatch, iend, repEnd, prefixStart) + minMatch;
-
- if (mlen > sufficient_len || cur + mlen >= ZSTD_OPT_NUM) {
- best_mlen = mlen;
- best_off = i;
- last_pos = cur + 1;
- goto _storeSequence;
- }
-
- best_off = i - (opt[cur].mlen != 1);
- if (mlen > best_mlen)
- best_mlen = mlen;
-
- do {
- if (opt[cur].mlen == 1) {
- litlen = opt[cur].litlen;
- if (cur > litlen) {
- price = opt[cur - litlen].price + ZSTD_getPrice(seqStorePtr, litlen, inr - litlen,
- best_off, mlen - MINMATCH, ultra);
- } else
- price = ZSTD_getPrice(seqStorePtr, litlen, anchor, best_off, mlen - MINMATCH, ultra);
- } else {
- litlen = 0;
- price = opt[cur].price + ZSTD_getPrice(seqStorePtr, 0, NULL, best_off, mlen - MINMATCH, ultra);
- }
-
- if (cur + mlen > last_pos || price <= opt[cur + mlen].price)
- SET_PRICE(cur + mlen, mlen, i, litlen, price);
- mlen--;
- } while (mlen >= minMatch);
- }
- }
- }
-
- match_num = ZSTD_BtGetAllMatches_selectMLS_extDict(ctx, inr, iend, maxSearches, mls, matches, minMatch);
-
- if (match_num > 0 && (matches[match_num - 1].len > sufficient_len || cur + matches[match_num - 1].len >= ZSTD_OPT_NUM)) {
- best_mlen = matches[match_num - 1].len;
- best_off = matches[match_num - 1].off;
- last_pos = cur + 1;
- goto _storeSequence;
- }
-
- /* set prices using matches at position = cur */
- for (u = 0; u < match_num; u++) {
- mlen = (u > 0) ? matches[u - 1].len + 1 : best_mlen;
- best_mlen = matches[u].len;
-
- while (mlen <= best_mlen) {
- if (opt[cur].mlen == 1) {
- litlen = opt[cur].litlen;
- if (cur > litlen)
- price = opt[cur - litlen].price + ZSTD_getPrice(seqStorePtr, litlen, ip + cur - litlen,
- matches[u].off - 1, mlen - MINMATCH, ultra);
- else
- price = ZSTD_getPrice(seqStorePtr, litlen, anchor, matches[u].off - 1, mlen - MINMATCH, ultra);
- } else {
- litlen = 0;
- price = opt[cur].price + ZSTD_getPrice(seqStorePtr, 0, NULL, matches[u].off - 1, mlen - MINMATCH, ultra);
- }
-
- if (cur + mlen > last_pos || (price < opt[cur + mlen].price))
- SET_PRICE(cur + mlen, mlen, matches[u].off, litlen, price);
-
- mlen++;
- }
- }
- } /* for (cur = 1; cur <= last_pos; cur++) */
-
- best_mlen = opt[last_pos].mlen;
- best_off = opt[last_pos].off;
- cur = last_pos - best_mlen;
-
- /* store sequence */
-_storeSequence: /* cur, last_pos, best_mlen, best_off have to be set */
- opt[0].mlen = 1;
-
- while (1) {
- mlen = opt[cur].mlen;
- offset = opt[cur].off;
- opt[cur].mlen = best_mlen;
- opt[cur].off = best_off;
- best_mlen = mlen;
- best_off = offset;
- if (mlen > cur)
- break;
- cur -= mlen;
- }
-
- for (u = 0; u <= last_pos;) {
- u += opt[u].mlen;
- }
-
- for (cur = 0; cur < last_pos;) {
- mlen = opt[cur].mlen;
- if (mlen == 1) {
- ip++;
- cur++;
- continue;
- }
- offset = opt[cur].off;
- cur += mlen;
- litLength = (U32)(ip - anchor);
-
- if (offset > ZSTD_REP_MOVE_OPT) {
- rep[2] = rep[1];
- rep[1] = rep[0];
- rep[0] = offset - ZSTD_REP_MOVE_OPT;
- offset--;
- } else {
- if (offset != 0) {
- best_off = (offset == ZSTD_REP_MOVE_OPT) ? (rep[0] - 1) : (rep[offset]);
- if (offset != 1)
- rep[2] = rep[1];
- rep[1] = rep[0];
- rep[0] = best_off;
- }
-
- if (litLength == 0)
- offset--;
- }
-
- ZSTD_updatePrice(seqStorePtr, litLength, anchor, offset, mlen - MINMATCH);
- ZSTD_storeSeq(seqStorePtr, litLength, anchor, offset, mlen - MINMATCH);
- anchor = ip = ip + mlen;
- }
- } /* for (cur=0; cur < last_pos; ) */
-
- /* Save reps for next block */
- {
- int i;
- for (i = 0; i < ZSTD_REP_NUM; i++)
- ctx->repToConfirm[i] = rep[i];
- }
-
- /* Last Literals */
- {
- size_t lastLLSize = iend - anchor;
- memcpy(seqStorePtr->lit, anchor, lastLLSize);
- seqStorePtr->lit += lastLLSize;
- }
-}
-
-#endif /* ZSTD_OPT_H_91842398743 */